Use backticks not vertical bars to denote variables in comments for /video
Bug: webrtc:12338
Change-Id: I47958800407482894ff6f17c1887dce907fdf35a
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/227030
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Artem Titov <titovartem@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#34585}
diff --git a/video/adaptation/overuse_frame_detector_unittest.cc b/video/adaptation/overuse_frame_detector_unittest.cc
index 37ad974..0cbacd7 100644
--- a/video/adaptation/overuse_frame_detector_unittest.cc
+++ b/video/adaptation/overuse_frame_detector_unittest.cc
@@ -435,8 +435,8 @@
RTC_FROM_HERE);
rtc::Event event;
- // Expect NormalUsage(). When called, stop the |overuse_detector_| and then
- // set |event| to end the test.
+ // Expect NormalUsage(). When called, stop the `overuse_detector_` and then
+ // set `event` to end the test.
EXPECT_CALL(mock_observer_, AdaptUp())
.WillOnce(InvokeWithoutArgs([this, &event] {
overuse_detector_->StopCheckForOveruse();
@@ -920,8 +920,8 @@
RTC_FROM_HERE);
rtc::Event event;
- // Expect NormalUsage(). When called, stop the |overuse_detector_| and then
- // set |event| to end the test.
+ // Expect NormalUsage(). When called, stop the `overuse_detector_` and then
+ // set `event` to end the test.
EXPECT_CALL(mock_observer_, AdaptUp())
.WillOnce(InvokeWithoutArgs([this, &event] {
overuse_detector_->StopCheckForOveruse();
diff --git a/video/adaptation/pixel_limit_resource.cc b/video/adaptation/pixel_limit_resource.cc
index 789dac2..36c93cd 100644
--- a/video/adaptation/pixel_limit_resource.cc
+++ b/video/adaptation/pixel_limit_resource.cc
@@ -78,9 +78,9 @@
int current_pixels = frame_size_pixels.value();
int target_pixel_upper_bounds = max_pixels_.value();
// To avoid toggling, we allow any resolutions between
- // |target_pixel_upper_bounds| and video_stream_adapter.h's
+ // `target_pixel_upper_bounds` and video_stream_adapter.h's
// GetLowerResolutionThan(). This is the pixels we end up if we adapt down
- // from |target_pixel_upper_bounds|.
+ // from `target_pixel_upper_bounds`.
int target_pixels_lower_bounds =
GetLowerResolutionThan(target_pixel_upper_bounds);
if (current_pixels > target_pixel_upper_bounds) {
diff --git a/video/adaptation/video_stream_encoder_resource_manager.cc b/video/adaptation/video_stream_encoder_resource_manager.cc
index 2705bf9..84981b3 100644
--- a/video/adaptation/video_stream_encoder_resource_manager.cc
+++ b/video/adaptation/video_stream_encoder_resource_manager.cc
@@ -455,7 +455,7 @@
int64_t time_sent_in_us,
absl::optional<int> encode_duration_us) {
RTC_DCHECK_RUN_ON(encoder_queue_);
- // Inform |encode_usage_resource_| of the encode completed event.
+ // Inform `encode_usage_resource_` of the encode completed event.
uint32_t timestamp = encoded_image.Timestamp();
int64_t capture_time_us =
encoded_image.capture_time_ms_ * rtc::kNumMicrosecsPerMillisec;
diff --git a/video/adaptation/video_stream_encoder_resource_manager.h b/video/adaptation/video_stream_encoder_resource_manager.h
index e7174d2..2f5dfcd 100644
--- a/video/adaptation/video_stream_encoder_resource_manager.h
+++ b/video/adaptation/video_stream_encoder_resource_manager.h
@@ -133,7 +133,7 @@
absl::optional<uint32_t> UseBandwidthAllocationBps() const;
// VideoSourceRestrictionsListener implementation.
- // Updates |video_source_restrictions_|.
+ // Updates `video_source_restrictions_`.
void OnVideoSourceRestrictionsUpdated(
VideoSourceRestrictions restrictions,
const VideoAdaptationCounters& adaptation_counters,
@@ -159,7 +159,7 @@
int LastFrameSizeOrDefault() const;
// Calculates an up-to-date value of the target frame rate and informs the
- // |encode_usage_resource_| of the new value.
+ // `encode_usage_resource_` of the new value.
void MaybeUpdateTargetFrameRate();
// Use nullopt to disable quality scaling.
diff --git a/video/alignment_adjuster.cc b/video/alignment_adjuster.cc
index 6b1db92..1762bec 100644
--- a/video/alignment_adjuster.cc
+++ b/video/alignment_adjuster.cc
@@ -19,8 +19,8 @@
namespace webrtc {
namespace {
// Round each scale factor to the closest rational in form alignment/i where i
-// is a multiple of |requested_alignment|. Each resolution divisible by
-// |alignment| will be divisible by |requested_alignment| after the scale factor
+// is a multiple of `requested_alignment`. Each resolution divisible by
+// `alignment` will be divisible by `requested_alignment` after the scale factor
// is applied.
double RoundToMultiple(int alignment,
int requested_alignment,
@@ -56,7 +56,7 @@
// Output:
// If B is false, returns K and does not adjust scaling factors.
// Otherwise, returns adjusted alignment (A), adjusted scaling factors (S'[i])
-// are written in |config| such that:
+// are written in `config` such that:
//
// A / S'[i] are integers divisible by K
// sum abs(S'[i] - S[i]) -> min
@@ -94,7 +94,7 @@
}
// Get alignment for downscaled layers.
- // Adjust |scale_resolution_down_by| to a common multiple to limit the
+ // Adjust `scale_resolution_down_by` to a common multiple to limit the
// alignment value (to avoid largely cropped frames and possibly with an
// aspect ratio far from the original).
const int kMaxAlignment = 16;
diff --git a/video/alignment_adjuster.h b/video/alignment_adjuster.h
index 4b72623..4c4e155 100644
--- a/video/alignment_adjuster.h
+++ b/video/alignment_adjuster.h
@@ -24,12 +24,12 @@
//
// If |EncoderInfo::apply_alignment_to_all_simulcast_layers| is enabled, the
// alignment will be adjusted to ensure that each simulcast layer also is
- // divisible by |requested_resolution_alignment|. The configured scale factors
- // |scale_resolution_down_by| may be adjusted to a common multiple to limit
+ // divisible by `requested_resolution_alignment`. The configured scale factors
+ // `scale_resolution_down_by` may be adjusted to a common multiple to limit
// the alignment value to avoid largely cropped frames and possibly with an
// aspect ratio far from the original.
- // Note: |max_layers| currently only taken into account when using default
+ // Note: `max_layers` currently only taken into account when using default
// scale factors.
static int GetAlignmentAndMaybeAdjustScaleFactors(
const VideoEncoder::EncoderInfo& info,
diff --git a/video/call_stats.cc b/video/call_stats.cc
index d575e11..9fd6802 100644
--- a/video/call_stats.cc
+++ b/video/call_stats.cc
@@ -64,7 +64,7 @@
// This class is used to de-register a Module from a ProcessThread to satisfy
// threading requirements of the Module (CallStats).
// The guarantee offered by TemporaryDeregistration is that while its in scope,
-// no calls to |TimeUntilNextProcess| or |Process()| will occur and therefore
+// no calls to `TimeUntilNextProcess` or `Process()` will occur and therefore
// synchronization with those methods, is not necessary.
class TemporaryDeregistration {
public:
@@ -122,7 +122,7 @@
int64_t now = clock_->TimeInMilliseconds();
last_process_time_ = now;
- // |avg_rtt_ms_| is allowed to be read on the process thread since that's the
+ // `avg_rtt_ms_` is allowed to be read on the process thread since that's the
// only thread that modifies the value.
int64_t avg_rtt_ms = avg_rtt_ms_;
RemoveOldReports(now, &reports_);
@@ -150,7 +150,7 @@
process_thread_running_ = process_thread != nullptr;
// Whether we just got attached or detached, we clear the
- // |process_thread_checker_| so that it can be used to protect variables
+ // `process_thread_checker_` so that it can be used to protect variables
// in either the process thread when it starts again, or UpdateHistograms()
// (mutually exclusive).
process_thread_checker_.Detach();
diff --git a/video/call_stats.h b/video/call_stats.h
index 5dc8fa0..0c8e267 100644
--- a/video/call_stats.h
+++ b/video/call_stats.h
@@ -40,7 +40,7 @@
void RegisterStatsObserver(CallStatsObserver* observer);
void DeregisterStatsObserver(CallStatsObserver* observer);
- // Expose |LastProcessedRtt()| from RtcpRttStats to the public interface, as
+ // Expose `LastProcessedRtt()` from RtcpRttStats to the public interface, as
// it is the part of the API that is needed by direct users of CallStats.
// TODO(tommi): Threading or lifetime guarantees are not explicit in how
// CallStats is used as RtcpRttStats or how pointers are cached in a
@@ -84,15 +84,15 @@
int64_t max_rtt_ms_ RTC_GUARDED_BY(process_thread_checker_);
// Accessed from random threads (seemingly). Consider atomic.
- // |avg_rtt_ms_| is allowed to be read on the process thread without a lock.
- // |avg_rtt_ms_lock_| must be held elsewhere for reading.
- // |avg_rtt_ms_lock_| must be held on the process thread for writing.
+ // `avg_rtt_ms_` is allowed to be read on the process thread without a lock.
+ // `avg_rtt_ms_lock_` must be held elsewhere for reading.
+ // `avg_rtt_ms_lock_` must be held on the process thread for writing.
int64_t avg_rtt_ms_;
- // Protects |avg_rtt_ms_|.
+ // Protects `avg_rtt_ms_`.
mutable Mutex avg_rtt_ms_lock_;
- // |sum_avg_rtt_ms_|, |num_avg_rtt_| and |time_of_first_rtt_ms_| are only used
+ // `sum_avg_rtt_ms_`, `num_avg_rtt_` and `time_of_first_rtt_ms_` are only used
// on the ProcessThread when running. When the Process Thread is not running,
// (and only then) they can be used in UpdateHistograms(), usually called from
// the dtor.
diff --git a/video/call_stats2.h b/video/call_stats2.h
index 35a7935..878a43f 100644
--- a/video/call_stats2.h
+++ b/video/call_stats2.h
@@ -50,7 +50,7 @@
void RegisterStatsObserver(CallStatsObserver* observer);
void DeregisterStatsObserver(CallStatsObserver* observer);
- // Expose |LastProcessedRtt()| from RtcpRttStats to the public interface, as
+ // Expose `LastProcessedRtt()` from RtcpRttStats to the public interface, as
// it is the part of the API that is needed by direct users of CallStats.
int64_t LastProcessedRtt() const;
diff --git a/video/cpu_scaling_tests.cc b/video/cpu_scaling_tests.cc
index c6a8975..108c118 100644
--- a/video/cpu_scaling_tests.cc
+++ b/video/cpu_scaling_tests.cc
@@ -87,7 +87,7 @@
case DegradationPreference::BALANCED:
if (wants.max_pixel_count == std::numeric_limits<int>::max() &&
wants.max_framerate_fps == std::numeric_limits<int>::max()) {
- // |adapt_counters_| map in VideoStreamEncoder is reset when
+ // `adapt_counters_` map in VideoStreamEncoder is reset when
// balanced mode is set.
break;
}
diff --git a/video/encoder_overshoot_detector.cc b/video/encoder_overshoot_detector.cc
index 0b9926e..80b2ec1 100644
--- a/video/encoder_overshoot_detector.cc
+++ b/video/encoder_overshoot_detector.cc
@@ -16,7 +16,7 @@
namespace {
// The buffer level for media-rate utilization is allowed to go below zero,
// down to
-// -(|kMaxMediaUnderrunFrames| / |target_framerate_fps_|) * |target_bitrate_|.
+// -(`kMaxMediaUnderrunFrames` / `target_framerate_fps_`) * `target_bitrate_`.
static constexpr double kMaxMediaUnderrunFrames = 5.0;
} // namespace
@@ -173,7 +173,7 @@
network_buffer_level_bits_ =
std::max<int64_t>(0, network_buffer_level_bits_ - leaked_bits);
- // Media buffer my go down to minus |kMaxMediaUnderrunFrames| frames worth
+ // Media buffer my go down to minus `kMaxMediaUnderrunFrames` frames worth
// of data.
const double max_underrun_seconds =
std::min(kMaxMediaUnderrunFrames, target_framerate_fps_) /
diff --git a/video/encoder_overshoot_detector.h b/video/encoder_overshoot_detector.h
index f7ef990..1f8908e 100644
--- a/video/encoder_overshoot_detector.h
+++ b/video/encoder_overshoot_detector.h
@@ -26,15 +26,15 @@
void SetTargetRate(DataRate target_bitrate,
double target_framerate_fps,
int64_t time_ms);
- // A frame has been encoded or dropped. |bytes| == 0 indicates a drop.
+ // A frame has been encoded or dropped. `bytes` == 0 indicates a drop.
void OnEncodedFrame(size_t bytes, int64_t time_ms);
// This utilization factor reaches 1.0 only if the encoder produces encoded
// frame in such a way that they can be sent onto the network at
- // |target_bitrate| without building growing queues.
+ // `target_bitrate` without building growing queues.
absl::optional<double> GetNetworkRateUtilizationFactor(int64_t time_ms);
// This utilization factor is based just on actual encoded frame sizes in
// relation to ideal sizes. An undershoot may be compensated by an
- // overshoot so that the average over time is close to |target_bitrate|.
+ // overshoot so that the average over time is close to `target_bitrate`.
absl::optional<double> GetMediaRateUtilizationFactor(int64_t time_ms);
void Reset();
diff --git a/video/end_to_end_tests/bandwidth_tests.cc b/video/end_to_end_tests/bandwidth_tests.cc
index 7217383..3d1e61b 100644
--- a/video/end_to_end_tests/bandwidth_tests.cc
+++ b/video/end_to_end_tests/bandwidth_tests.cc
@@ -94,7 +94,7 @@
~BandwidthStatsTest() override {
// Block until all already posted tasks run to avoid races when such task
- // accesses |this|.
+ // accesses `this`.
SendTask(RTC_FROM_HERE, task_queue_, [] {});
}
@@ -206,7 +206,7 @@
~BweObserver() override {
// Block until all already posted tasks run to avoid races when such task
- // accesses |this|. Also make sure we free |rtp_rtcp_| on the correct
+ // accesses `this`. Also make sure we free `rtp_rtcp_` on the correct
// thread/task queue.
SendTask(RTC_FROM_HERE, task_queue_, [this]() { rtp_rtcp_ = nullptr; });
}
diff --git a/video/end_to_end_tests/fec_tests.cc b/video/end_to_end_tests/fec_tests.cc
index 77ad9eb..d63924f 100644
--- a/video/end_to_end_tests/fec_tests.cc
+++ b/video/end_to_end_tests/fec_tests.cc
@@ -412,7 +412,7 @@
return DROP_PACKET;
// Pass one media packet after dropped packet after last FEC,
// otherwise receiver might never see a seq_no after
- // |ulpfec_sequence_number_|
+ // `ulpfec_sequence_number_`
state_ = kVerifyUlpfecPacketNotInNackList;
break;
case kVerifyUlpfecPacketNotInNackList:
diff --git a/video/end_to_end_tests/histogram_tests.cc b/video/end_to_end_tests/histogram_tests.cc
index fa71c15..7a3de1b 100644
--- a/video/end_to_end_tests/histogram_tests.cc
+++ b/video/end_to_end_tests/histogram_tests.cc
@@ -54,9 +54,9 @@
private:
void OnFrame(const VideoFrame& video_frame) override {
- // The RTT is needed to estimate |ntp_time_ms| which is used by
+ // The RTT is needed to estimate `ntp_time_ms` which is used by
// end-to-end delay stats. Therefore, start counting received frames once
- // |ntp_time_ms| is valid.
+ // `ntp_time_ms` is valid.
if (video_frame.ntp_time_ms() > 0 &&
Clock::GetRealTimeClock()->CurrentNtpInMilliseconds() >=
video_frame.ntp_time_ms()) {
diff --git a/video/end_to_end_tests/stats_tests.cc b/video/end_to_end_tests/stats_tests.cc
index 54e7bcf..9e8a9b6 100644
--- a/video/end_to_end_tests/stats_tests.cc
+++ b/video/end_to_end_tests/stats_tests.cc
@@ -474,9 +474,9 @@
bool ShouldCreateReceivers() const override { return true; }
void OnFrame(const VideoFrame& video_frame) override {
- // The RTT is needed to estimate |ntp_time_ms| which is used by
+ // The RTT is needed to estimate `ntp_time_ms` which is used by
// end-to-end delay stats. Therefore, start counting received frames once
- // |ntp_time_ms| is valid.
+ // `ntp_time_ms` is valid.
if (video_frame.ntp_time_ms() > 0 &&
Clock::GetRealTimeClock()->CurrentNtpInMilliseconds() >=
video_frame.ntp_time_ms()) {
diff --git a/video/frame_encode_metadata_writer.cc b/video/frame_encode_metadata_writer.cc
index 8a0f3b3..b5eb5cd 100644
--- a/video/frame_encode_metadata_writer.cc
+++ b/video/frame_encode_metadata_writer.cc
@@ -135,7 +135,7 @@
int64_t encode_done_ms = rtc::TimeMillis();
// Encoders with internal sources do not call OnEncodeStarted
- // |timing_frames_info_| may be not filled here.
+ // `timing_frames_info_` may be not filled here.
if (!internal_source_) {
encode_start_ms =
ExtractEncodeStartTimeAndFillMetadata(simulcast_svc_idx, encoded_image);
@@ -174,7 +174,7 @@
}
// Workaround for chromoting encoder: it passes encode start and finished
- // timestamps in |timing_| field, but they (together with capture timestamp)
+ // timestamps in `timing_` field, but they (together with capture timestamp)
// are not in the WebRTC clock.
if (internal_source_ && encoded_image->timing_.encode_finish_ms > 0 &&
encoded_image->timing_.encode_start_ms > 0) {
diff --git a/video/frame_encode_metadata_writer_unittest.cc b/video/frame_encode_metadata_writer_unittest.cc
index da54c33..631dded 100644
--- a/video/frame_encode_metadata_writer_unittest.cc
+++ b/video/frame_encode_metadata_writer_unittest.cc
@@ -61,9 +61,9 @@
image.timing_.flags != VideoSendTiming::kNotTriggered;
}
-// Emulates |num_frames| on |num_streams| frames with capture timestamps
+// Emulates `num_frames` on `num_streams` frames with capture timestamps
// increased by 1 from 0. Size of each frame is between
-// |min_frame_size| and |max_frame_size|, outliers are counted relatevely to
+// `min_frame_size` and `max_frame_size`, outliers are counted relatevely to
// |average_frame_sizes[]| for each stream.
std::vector<std::vector<FrameType>> GetTimingFrames(
const int64_t delay_ms,
diff --git a/video/picture_id_tests.cc b/video/picture_id_tests.cc
index 298919c..df7b082 100644
--- a/video/picture_id_tests.cc
+++ b/video/picture_id_tests.cc
@@ -65,7 +65,7 @@
void SetMaxExpectedPictureIdGap(int max_expected_picture_id_gap) {
MutexLock lock(&mutex_);
max_expected_picture_id_gap_ = max_expected_picture_id_gap;
- // Expect smaller gap for |tl0_pic_idx| (running index for temporal_idx 0).
+ // Expect smaller gap for `tl0_pic_idx` (running index for temporal_idx 0).
max_expected_tl0_idx_gap_ = max_expected_picture_id_gap_ / 2;
}
@@ -155,8 +155,8 @@
return;
}
- // New frame with |temporal_idx| 0.
- // |tl0_pic_idx| should be increasing.
+ // New frame with `temporal_idx` 0.
+ // `tl0_pic_idx` should be increasing.
EXPECT_TRUE(AheadOf<uint8_t>(current.tl0_pic_idx, last.tl0_pic_idx));
// Expect continuously increasing idx.
diff --git a/video/quality_limitation_reason_tracker.h b/video/quality_limitation_reason_tracker.h
index 1fbd71f..22816a8 100644
--- a/video/quality_limitation_reason_tracker.h
+++ b/video/quality_limitation_reason_tracker.h
@@ -32,7 +32,7 @@
// https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-qualitylimitationresolutionchanges
class QualityLimitationReasonTracker {
public:
- // The caller is responsible for making sure |clock| outlives the tracker.
+ // The caller is responsible for making sure `clock` outlives the tracker.
explicit QualityLimitationReasonTracker(Clock* clock);
// The current reason defaults to QualityLimitationReason::kNone.
@@ -45,9 +45,9 @@
QualityLimitationReason current_reason_;
int64_t current_reason_updated_timestamp_ms_;
// The total amount of time spent in each reason at time
- // |current_reason_updated_timestamp_ms_|. To get the total amount duration
- // so-far, including the time spent in |current_reason_| elapsed since the
- // last time |current_reason_| was updated, see DurationsMs().
+ // `current_reason_updated_timestamp_ms_`. To get the total amount duration
+ // so-far, including the time spent in `current_reason_` elapsed since the
+ // last time `current_reason_` was updated, see DurationsMs().
std::map<QualityLimitationReason, int64_t> durations_ms_;
};
diff --git a/video/quality_limitation_reason_tracker_unittest.cc b/video/quality_limitation_reason_tracker_unittest.cc
index c394535..f550c0d 100644
--- a/video/quality_limitation_reason_tracker_unittest.cc
+++ b/video/quality_limitation_reason_tracker_unittest.cc
@@ -83,19 +83,19 @@
TEST_P(QualityLimitationReasonTrackerTestWithParamReason,
SwitchBetweenReasonsBackAndForth) {
int64_t initial_duration_ms = tracker_.DurationsMs()[reason_];
- // Spend 100 ms in |different_reason_|.
+ // Spend 100 ms in `different_reason_`.
tracker_.SetReason(different_reason_);
fake_clock_.AdvanceTimeMilliseconds(100);
EXPECT_EQ(initial_duration_ms, tracker_.DurationsMs()[reason_]);
- // Spend 50 ms in |reason_|.
+ // Spend 50 ms in `reason_`.
tracker_.SetReason(reason_);
fake_clock_.AdvanceTimeMilliseconds(50);
EXPECT_EQ(initial_duration_ms + 50, tracker_.DurationsMs()[reason_]);
- // Spend another 1000 ms in |different_reason_|.
+ // Spend another 1000 ms in `different_reason_`.
tracker_.SetReason(different_reason_);
fake_clock_.AdvanceTimeMilliseconds(1000);
EXPECT_EQ(initial_duration_ms + 50, tracker_.DurationsMs()[reason_]);
- // Spend another 100 ms in |reason_|.
+ // Spend another 100 ms in `reason_`.
tracker_.SetReason(reason_);
fake_clock_.AdvanceTimeMilliseconds(100);
EXPECT_EQ(initial_duration_ms + 150, tracker_.DurationsMs()[reason_]);
diff --git a/video/receive_statistics_proxy.cc b/video/receive_statistics_proxy.cc
index 7aec685..eb4a37a 100644
--- a/video/receive_statistics_proxy.cc
+++ b/video/receive_statistics_proxy.cc
@@ -25,7 +25,7 @@
namespace webrtc {
namespace {
-// Periodic time interval for processing samples for |freq_offset_counter_|.
+// Periodic time interval for processing samples for `freq_offset_counter_`.
const int64_t kFreqOffsetProcessIntervalMs = 40000;
// Configuration for bad call detection.
@@ -129,7 +129,7 @@
const StreamDataCounters* rtx_stats) {
// Not actually running on the decoder thread, but must be called after
// DecoderThreadStopped, which detaches the thread checker. It is therefore
- // safe to access |qp_counters_|, which were updated on the decode thread
+ // safe to access `qp_counters_`, which were updated on the decode thread
// earlier.
RTC_DCHECK_RUN_ON(&decode_thread_);
diff --git a/video/receive_statistics_proxy.h b/video/receive_statistics_proxy.h
index 57738f2..4efc0f6 100644
--- a/video/receive_statistics_proxy.h
+++ b/video/receive_statistics_proxy.h
@@ -140,7 +140,7 @@
Clock* const clock_;
// Ownership of this object lies with the owner of the ReceiveStatisticsProxy
- // instance. Lifetime is guaranteed to outlive |this|.
+ // instance. Lifetime is guaranteed to outlive `this`.
// TODO(tommi): In practice the config_ reference is only used for accessing
// config_.rtp.ulpfec.ulpfec_payload_type. Instead of holding a pointer back,
// we could just store the value of ulpfec_payload_type and change the
diff --git a/video/receive_statistics_proxy2.cc b/video/receive_statistics_proxy2.cc
index af3cd22..0e700e3 100644
--- a/video/receive_statistics_proxy2.cc
+++ b/video/receive_statistics_proxy2.cc
@@ -29,7 +29,7 @@
namespace webrtc {
namespace internal {
namespace {
-// Periodic time interval for processing samples for |freq_offset_counter_|.
+// Periodic time interval for processing samples for `freq_offset_counter_`.
const int64_t kFreqOffsetProcessIntervalMs = 40000;
// Configuration for bad call detection.
@@ -676,7 +676,7 @@
interframe_delay_max_moving_.Max(*last_decoded_frame_time_ms_)
.value_or(-1);
} else {
- // We're paused. Avoid changing the state of |interframe_delay_max_moving_|.
+ // We're paused. Avoid changing the state of `interframe_delay_max_moving_`.
stats_.interframe_delay_max_ms = -1;
}
@@ -790,8 +790,8 @@
// [main] worker thread.
// So until the sender implementation has been updated, we work around this
// here by posting the update to the expected thread. We make a by value
- // copy of the |task_safety_| to handle the case if the queued task
- // runs after the |ReceiveStatisticsProxy| has been deleted. In such a
+ // copy of the `task_safety_` to handle the case if the queued task
+ // runs after the `ReceiveStatisticsProxy` has been deleted. In such a
// case the packet_counter update won't be recorded.
worker_thread_->PostTask(
ToQueuedTask(task_safety_, [ssrc, packet_counter, this]() {
diff --git a/video/receive_statistics_proxy2_unittest.cc b/video/receive_statistics_proxy2_unittest.cc
index 867a3c3..50f16c5 100644
--- a/video/receive_statistics_proxy2_unittest.cc
+++ b/video/receive_statistics_proxy2_unittest.cc
@@ -1290,7 +1290,7 @@
fake_clock_.AdvanceTimeMilliseconds(kInterFrameDelayMs);
}
- // |kMinRequiredSamples| samples, and thereby intervals, is required. That
+ // `kMinRequiredSamples` samples, and thereby intervals, is required. That
// means we're one frame short of having a valid data set.
statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
nullptr);
diff --git a/video/receive_statistics_proxy_unittest.cc b/video/receive_statistics_proxy_unittest.cc
index 626542c..fab8fd3 100644
--- a/video/receive_statistics_proxy_unittest.cc
+++ b/video/receive_statistics_proxy_unittest.cc
@@ -1257,7 +1257,7 @@
fake_clock_.AdvanceTimeMilliseconds(kInterFrameDelayMs);
}
- // |kMinRequiredSamples| samples, and thereby intervals, is required. That
+ // `kMinRequiredSamples` samples, and thereby intervals, is required. That
// means we're one frame short of having a valid data set.
statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
nullptr);
diff --git a/video/rtp_streams_synchronizer.h b/video/rtp_streams_synchronizer.h
index 574ccba..b759ad1 100644
--- a/video/rtp_streams_synchronizer.h
+++ b/video/rtp_streams_synchronizer.h
@@ -38,9 +38,9 @@
void Process() override;
// Gets the estimated playout NTP timestamp for the video frame with
- // |rtp_timestamp| and the sync offset between the current played out audio
+ // `rtp_timestamp` and the sync offset between the current played out audio
// frame and the video frame. Returns true on success, false otherwise.
- // The |estimated_freq_khz| is the frequency used in the RTP to NTP timestamp
+ // The `estimated_freq_khz` is the frequency used in the RTP to NTP timestamp
// conversion.
bool GetStreamSyncOffsetInMs(uint32_t rtp_timestamp,
int64_t render_time_ms,
diff --git a/video/rtp_streams_synchronizer2.h b/video/rtp_streams_synchronizer2.h
index 192378a..73fd604 100644
--- a/video/rtp_streams_synchronizer2.h
+++ b/video/rtp_streams_synchronizer2.h
@@ -35,9 +35,9 @@
void ConfigureSync(Syncable* syncable_audio);
// Gets the estimated playout NTP timestamp for the video frame with
- // |rtp_timestamp| and the sync offset between the current played out audio
+ // `rtp_timestamp` and the sync offset between the current played out audio
// frame and the video frame. Returns true on success, false otherwise.
- // The |estimated_freq_khz| is the frequency used in the RTP to NTP timestamp
+ // The `estimated_freq_khz` is the frequency used in the RTP to NTP timestamp
// conversion.
bool GetStreamSyncOffsetInMs(uint32_t rtp_timestamp,
int64_t render_time_ms,
diff --git a/video/rtp_video_stream_receiver.cc b/video/rtp_video_stream_receiver.cc
index a0520cd..53bba59 100644
--- a/video/rtp_video_stream_receiver.cc
+++ b/video/rtp_video_stream_receiver.cc
@@ -268,8 +268,8 @@
config_.rtp.local_ssrc)),
complete_frame_callback_(complete_frame_callback),
keyframe_request_sender_(keyframe_request_sender),
- // TODO(bugs.webrtc.org/10336): Let |rtcp_feedback_buffer_| communicate
- // directly with |rtp_rtcp_|.
+ // TODO(bugs.webrtc.org/10336): Let `rtcp_feedback_buffer_` communicate
+ // directly with `rtp_rtcp_`.
rtcp_feedback_buffer_(this, nack_sender, this),
packet_buffer_(kPacketBufferStartSize, PacketBufferMaxSize()),
reference_finder_(std::make_unique<RtpFrameReferenceFinder>()),
@@ -862,7 +862,7 @@
// In that case, request a key frame ASAP.
if (!has_received_frame_) {
if (frame->FrameType() != VideoFrameType::kVideoFrameKey) {
- // |loss_notification_controller_|, if present, would have already
+ // `loss_notification_controller_`, if present, would have already
// requested a key frame when the first packet for the non-key frame
// had arrived, so no need to replicate the request.
if (!loss_notification_controller_) {
@@ -873,16 +873,16 @@
}
MutexLock lock(&reference_finder_lock_);
- // Reset |reference_finder_| if |frame| is new and the codec have changed.
+ // Reset `reference_finder_` if `frame` is new and the codec have changed.
if (current_codec_) {
bool frame_is_newer =
AheadOf(frame->Timestamp(), last_assembled_frame_rtp_timestamp_);
if (frame->codec_type() != current_codec_) {
if (frame_is_newer) {
- // When we reset the |reference_finder_| we don't want new picture ids
+ // When we reset the `reference_finder_` we don't want new picture ids
// to overlap with old picture ids. To ensure that doesn't happen we
- // start from the |last_completed_picture_id_| and add an offset in
+ // start from the `last_completed_picture_id_` and add an offset in
// case of reordering.
reference_finder_ = std::make_unique<RtpFrameReferenceFinder>(
last_completed_picture_id_ + std::numeric_limits<uint16_t>::max());
diff --git a/video/rtp_video_stream_receiver.h b/video/rtp_video_stream_receiver.h
index b3d62f3..5e63755 100644
--- a/video/rtp_video_stream_receiver.h
+++ b/video/rtp_video_stream_receiver.h
@@ -317,7 +317,7 @@
RTC_RUN_ON(worker_task_checker_);
Clock* const clock_;
- // Ownership of this object lies with VideoReceiveStream, which owns |this|.
+ // Ownership of this object lies with VideoReceiveStream, which owns `this`.
const VideoReceiveStream::Config& config_;
PacketRouter* const packet_router_;
ProcessThread* const process_thread_;
diff --git a/video/rtp_video_stream_receiver2.cc b/video/rtp_video_stream_receiver2.cc
index daddae9..9932cec 100644
--- a/video/rtp_video_stream_receiver2.cc
+++ b/video/rtp_video_stream_receiver2.cc
@@ -242,8 +242,8 @@
config_.rtp.local_ssrc)),
complete_frame_callback_(complete_frame_callback),
keyframe_request_sender_(keyframe_request_sender),
- // TODO(bugs.webrtc.org/10336): Let |rtcp_feedback_buffer_| communicate
- // directly with |rtp_rtcp_|.
+ // TODO(bugs.webrtc.org/10336): Let `rtcp_feedback_buffer_` communicate
+ // directly with `rtp_rtcp_`.
rtcp_feedback_buffer_(this, nack_sender, this),
nack_module_(MaybeConstructNackModule(current_queue,
nack_periodic_processor,
@@ -810,7 +810,7 @@
// In that case, request a key frame ASAP.
if (!has_received_frame_) {
if (frame->FrameType() != VideoFrameType::kVideoFrameKey) {
- // |loss_notification_controller_|, if present, would have already
+ // `loss_notification_controller_`, if present, would have already
// requested a key frame when the first packet for the non-key frame
// had arrived, so no need to replicate the request.
if (!loss_notification_controller_) {
@@ -820,16 +820,16 @@
has_received_frame_ = true;
}
- // Reset |reference_finder_| if |frame| is new and the codec have changed.
+ // Reset `reference_finder_` if `frame` is new and the codec have changed.
if (current_codec_) {
bool frame_is_newer =
AheadOf(frame->Timestamp(), last_assembled_frame_rtp_timestamp_);
if (frame->codec_type() != current_codec_) {
if (frame_is_newer) {
- // When we reset the |reference_finder_| we don't want new picture ids
+ // When we reset the `reference_finder_` we don't want new picture ids
// to overlap with old picture ids. To ensure that doesn't happen we
- // start from the |last_completed_picture_id_| and add an offset in case
+ // start from the `last_completed_picture_id_` and add an offset in case
// of reordering.
reference_finder_ = std::make_unique<RtpFrameReferenceFinder>(
last_completed_picture_id_ + std::numeric_limits<uint16_t>::max());
diff --git a/video/rtp_video_stream_receiver2.h b/video/rtp_video_stream_receiver2.h
index 3b20e1c..45ed655 100644
--- a/video/rtp_video_stream_receiver2.h
+++ b/video/rtp_video_stream_receiver2.h
@@ -284,7 +284,7 @@
RTC_RUN_ON(packet_sequence_checker_);
Clock* const clock_;
- // Ownership of this object lies with VideoReceiveStream, which owns |this|.
+ // Ownership of this object lies with VideoReceiveStream, which owns `this`.
const VideoReceiveStream::Config& config_;
PacketRouter* const packet_router_;
diff --git a/video/rtp_video_stream_receiver2_unittest.cc b/video/rtp_video_stream_receiver2_unittest.cc
index f97ccb6..b5ae816 100644
--- a/video/rtp_video_stream_receiver2_unittest.cc
+++ b/video/rtp_video_stream_receiver2_unittest.cc
@@ -773,8 +773,8 @@
}
TEST_F(RtpVideoStreamReceiver2Test, NonStartedStreamGetsNoRtpCallbacks) {
- // Explicitly showing that the stream is not in the |started| state,
- // regardless of whether streams start out |started| or |stopped|.
+ // Explicitly showing that the stream is not in the `started` state,
+ // regardless of whether streams start out `started` or `stopped`.
rtp_video_stream_receiver_->StopReceive();
MockRtpPacketSink test_sink;
@@ -811,7 +811,7 @@
uint8_t* payload = rtp_packet.SetPayloadSize(data.size());
memcpy(payload, data.data(), data.size());
- // The first byte is the header, so we ignore the first byte of |data|.
+ // The first byte is the header, so we ignore the first byte of `data`.
mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data() + 1,
data.size() - 1);
@@ -852,7 +852,7 @@
uint8_t* first_packet_payload = first_packet.SetPayloadSize(data.size());
memcpy(first_packet_payload, data.data(), data.size());
- // The first byte is the header, so we ignore the first byte of |data|.
+ // The first byte is the header, so we ignore the first byte of `data`.
mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data() + 1,
data.size() - 1);
@@ -873,7 +873,7 @@
uint8_t* second_packet_payload = second_packet.SetPayloadSize(data.size());
memcpy(second_packet_payload, data.data(), data.size());
- // The first byte is the header, so we ignore the first byte of |data|.
+ // The first byte is the header, so we ignore the first byte of `data`.
mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data() + 1,
data.size() - 1);
diff --git a/video/rtp_video_stream_receiver_frame_transformer_delegate.h b/video/rtp_video_stream_receiver_frame_transformer_delegate.h
index ef05d91..e2472fa 100644
--- a/video/rtp_video_stream_receiver_frame_transformer_delegate.h
+++ b/video/rtp_video_stream_receiver_frame_transformer_delegate.h
@@ -32,7 +32,7 @@
};
// Delegates calls to FrameTransformerInterface to transform frames, and to
-// RtpVideoStreamReceiver to manage transformed frames on the |network_thread_|.
+// RtpVideoStreamReceiver to manage transformed frames on the `network_thread_`.
class RtpVideoStreamReceiverFrameTransformerDelegate
: public TransformedFrameCallback {
public:
@@ -49,12 +49,12 @@
void TransformFrame(std::unique_ptr<RtpFrameObject> frame);
// Implements TransformedFrameCallback. Can be called on any thread. Posts
- // the transformed frame to be managed on the |network_thread_|.
+ // the transformed frame to be managed on the `network_thread_`.
void OnTransformedFrame(
std::unique_ptr<TransformableFrameInterface> frame) override;
// Delegates the call to RtpVideoFrameReceiver::ManageFrame on the
- // |network_thread_|.
+ // `network_thread_`.
void ManageFrame(std::unique_ptr<TransformableFrameInterface> frame);
protected:
diff --git a/video/rtp_video_stream_receiver_unittest.cc b/video/rtp_video_stream_receiver_unittest.cc
index 765e1e1..901d693 100644
--- a/video/rtp_video_stream_receiver_unittest.cc
+++ b/video/rtp_video_stream_receiver_unittest.cc
@@ -797,8 +797,8 @@
TEST_F(RtpVideoStreamReceiverTest,
SecondariesOfNonStartedStreamGetNoNotifications) {
- // Explicitly showing that the stream is not in the |started| state,
- // regardless of whether streams start out |started| or |stopped|.
+ // Explicitly showing that the stream is not in the `started` state,
+ // regardless of whether streams start out `started` or `stopped`.
rtp_video_stream_receiver_->StopReceive();
MockRtpPacketSink secondary_sink;
@@ -836,7 +836,7 @@
uint8_t* payload = rtp_packet.SetPayloadSize(data.size());
memcpy(payload, data.data(), data.size());
- // The first byte is the header, so we ignore the first byte of |data|.
+ // The first byte is the header, so we ignore the first byte of `data`.
mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data() + 1,
data.size() - 1);
@@ -877,7 +877,7 @@
uint8_t* first_packet_payload = first_packet.SetPayloadSize(data.size());
memcpy(first_packet_payload, data.data(), data.size());
- // The first byte is the header, so we ignore the first byte of |data|.
+ // The first byte is the header, so we ignore the first byte of `data`.
mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data() + 1,
data.size() - 1);
@@ -898,7 +898,7 @@
uint8_t* second_packet_payload = second_packet.SetPayloadSize(data.size());
memcpy(second_packet_payload, data.data(), data.size());
- // The first byte is the header, so we ignore the first byte of |data|.
+ // The first byte is the header, so we ignore the first byte of `data`.
mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data() + 1,
data.size() - 1);
diff --git a/video/send_statistics_proxy.cc b/video/send_statistics_proxy.cc
index 1b968ef..8a6712b 100644
--- a/video/send_statistics_proxy.cc
+++ b/video/send_statistics_proxy.cc
@@ -871,7 +871,7 @@
return;
}
if (is_active && (pixels > *fallback_max_pixels_)) {
- // Pixels should not be above |fallback_max_pixels_|. If above skip to
+ // Pixels should not be above `fallback_max_pixels_`. If above skip to
// avoid fallbacks due to failure.
fallback_info->is_possible = false;
return;
@@ -882,7 +882,7 @@
if (fallback_info->last_update_ms) {
int64_t diff_ms = now_ms - *(fallback_info->last_update_ms);
- // If the time diff since last update is greater than |max_frame_diff_ms|,
+ // If the time diff since last update is greater than `max_frame_diff_ms`,
// video is considered paused/muted and the change is not included.
if (diff_ms < fallback_info->max_frame_diff_ms) {
uma_container_->fallback_active_counter_.Add(fallback_info->is_active,
@@ -1229,7 +1229,7 @@
}
// Informes observer if an internal encoder scaler has reduced video
-// resolution or not. |is_scaled| is a flag indicating if the video is scaled
+// resolution or not. `is_scaled` is a flag indicating if the video is scaled
// down.
void SendStatisticsProxy::OnEncoderInternalScalerUpdate(bool is_scaled) {
MutexLock lock(&mutex_);
diff --git a/video/send_statistics_proxy_unittest.cc b/video/send_statistics_proxy_unittest.cc
index d4a7a49..cfb1905 100644
--- a/video/send_statistics_proxy_unittest.cc
+++ b/video/send_statistics_proxy_unittest.cc
@@ -411,7 +411,7 @@
statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
// On the first frame we don't know the frame rate yet, calculation yields
// zero. Our estimate assumes at least 1 FPS, so we expect the frame size to
- // increment by a full |kTargetBytesPerSecond|.
+ // increment by a full `kTargetBytesPerSecond`.
EXPECT_EQ(kTargetBytesPerSecond,
statistics_proxy_->GetStats().total_encoded_bytes_target);
}
@@ -422,7 +422,7 @@
const int kInterframeDelayMs = 100;
// SendStatisticsProxy uses a RateTracker internally. SendStatisticsProxy uses
- // |fake_clock_| for testing, but the RateTracker relies on a global clock.
+ // `fake_clock_` for testing, but the RateTracker relies on a global clock.
// This test relies on rtc::ScopedFakeClock to synchronize these two clocks.
// TODO(https://crbug.com/webrtc/10640): When the RateTracker uses a Clock
// this test can stop relying on rtc::ScopedFakeClock.
@@ -447,7 +447,7 @@
auto stats = statistics_proxy_->GetStats();
// By the time the second frame arrives, one frame has previously arrived
- // during a |kInterframeDelayMs| interval. The estimated encode frame rate at
+ // during a `kInterframeDelayMs` interval. The estimated encode frame rate at
// the second frame's arrival should be 10 FPS.
uint64_t delta_encoded_bytes_target =
stats.total_encoded_bytes_target - first_total_encoded_bytes_target;
diff --git a/video/stats_counter.cc b/video/stats_counter.cc
index 1e72a79..fed9f66 100644
--- a/video/stats_counter.cc
+++ b/video/stats_counter.cc
@@ -228,7 +228,7 @@
if (diff_ms < process_intervals_ms_)
return false;
- // Advance number of complete |process_intervals_ms_| that have passed.
+ // Advance number of complete `process_intervals_ms_` that have passed.
int64_t num_intervals = diff_ms / process_intervals_ms_;
last_process_time_ms_ += num_intervals * process_intervals_ms_;
@@ -338,7 +338,7 @@
int64_t process_intervals_ms)
: StatsCounter(clock,
process_intervals_ms,
- false, // |include_empty_intervals|
+ false, // `include_empty_intervals`
observer) {}
void MaxCounter::Add(int sample) {
@@ -361,7 +361,7 @@
PercentCounter::PercentCounter(Clock* clock, StatsCounterObserver* observer)
: StatsCounter(clock,
kDefaultProcessIntervalMs,
- false, // |include_empty_intervals|
+ false, // `include_empty_intervals`
observer) {}
void PercentCounter::Add(bool sample) {
@@ -385,7 +385,7 @@
PermilleCounter::PermilleCounter(Clock* clock, StatsCounterObserver* observer)
: StatsCounter(clock,
kDefaultProcessIntervalMs,
- false, // |include_empty_intervals|
+ false, // `include_empty_intervals`
observer) {}
void PermilleCounter::Add(bool sample) {
diff --git a/video/stats_counter.h b/video/stats_counter.h
index fb6017f..9c3f6f8 100644
--- a/video/stats_counter.h
+++ b/video/stats_counter.h
@@ -22,7 +22,7 @@
class Clock;
class Samples;
-// |StatsCounterObserver| is called periodically when a metric is updated.
+// `StatsCounterObserver` is called periodically when a metric is updated.
class StatsCounterObserver {
public:
virtual void OnMetricUpdated(int sample) = 0;
@@ -43,13 +43,13 @@
// Classes which periodically computes a metric.
//
-// During a period, |kProcessIntervalMs|, different metrics can be computed e.g:
-// - |AvgCounter|: average of samples
-// - |PercentCounter|: percentage of samples
-// - |PermilleCounter|: permille of samples
+// During a period, `kProcessIntervalMs`, different metrics can be computed e.g:
+// - `AvgCounter`: average of samples
+// - `PercentCounter`: percentage of samples
+// - `PermilleCounter`: permille of samples
//
// Each periodic metric can be either:
-// - reported to an |observer| each period
+// - reported to an `observer` each period
// - aggregated during the call (e.g. min, max, average)
//
// periodically computed
@@ -76,7 +76,7 @@
// stats: {min:4, max:15, avg:8}
//
-// Note: StatsCounter takes ownership of |observer|.
+// Note: StatsCounter takes ownership of `observer`.
class StatsCounter {
public:
@@ -145,7 +145,7 @@
// | Add(5) Add(1) Add(6) | Add(5) Add(5) |
// GetMetric | (5 + 1 + 6) / 3 | (5 + 5) / 2 |
//
-// |include_empty_intervals|: If set, intervals without samples will be included
+// `include_empty_intervals`: If set, intervals without samples will be included
// in the stats. The value for an interval is
// determined by GetValueForEmptyInterval().
//
@@ -236,7 +236,7 @@
// |<------ 2 sec ------->| |
// GetMetric | (5 + 1 + 6) / 2 | (5 + 5) / 2 |
//
-// |include_empty_intervals|: If set, intervals without samples will be included
+// `include_empty_intervals`: If set, intervals without samples will be included
// in the stats. The value for an interval is
// determined by GetValueForEmptyInterval().
//
@@ -263,7 +263,7 @@
// |<------ 2 sec ------->| |
// GetMetric | (8 - 0) / 2 | (13 - 8) / 2 |
//
-// |include_empty_intervals|: If set, intervals without samples will be included
+// `include_empty_intervals`: If set, intervals without samples will be included
// in the stats. The value for an interval is
// determined by GetValueForEmptyInterval().
//
diff --git a/video/stream_synchronization.h b/video/stream_synchronization.h
index 2da6a49..61073cb 100644
--- a/video/stream_synchronization.h
+++ b/video/stream_synchronization.h
@@ -33,15 +33,15 @@
int* total_audio_delay_target_ms,
int* total_video_delay_target_ms);
- // On success |relative_delay_ms| contains the number of milliseconds later
+ // On success `relative_delay_ms` contains the number of milliseconds later
// video is rendered relative audio. If audio is played back later than video
- // |relative_delay_ms| will be negative.
+ // `relative_delay_ms` will be negative.
static bool ComputeRelativeDelay(const Measurements& audio_measurement,
const Measurements& video_measurement,
int* relative_delay_ms);
// Set target buffering delay. Audio and video will be delayed by at least
- // |target_delay_ms|.
+ // `target_delay_ms`.
void SetTargetBufferingDelay(int target_delay_ms);
// Lowers the audio delay by 10%. Can be used to recover from errors.
diff --git a/video/stream_synchronization_unittest.cc b/video/stream_synchronization_unittest.cc
index 3d6fdd8..5c6c79f 100644
--- a/video/stream_synchronization_unittest.cc
+++ b/video/stream_synchronization_unittest.cc
@@ -32,9 +32,9 @@
protected:
// Generates the necessary RTCP measurements and RTP timestamps and computes
// the audio and video delays needed to get the two streams in sync.
- // |audio_delay_ms| and |video_delay_ms| are the number of milliseconds after
+ // `audio_delay_ms` and `video_delay_ms` are the number of milliseconds after
// capture which the frames are received.
- // |current_audio_delay_ms| is the number of milliseconds which audio is
+ // `current_audio_delay_ms` is the number of milliseconds which audio is
// currently being delayed by the receiver.
bool DelayedStreams(int audio_delay_ms,
int video_delay_ms,
diff --git a/video/video_analyzer.cc b/video/video_analyzer.cc
index b90ba29..62ee7b43 100644
--- a/video/video_analyzer.cc
+++ b/video/video_analyzer.cc
@@ -458,10 +458,10 @@
}
void VideoAnalyzer::PollStats() {
- // Do not grab |comparison_lock_|, before |GetStats()| completes.
+ // Do not grab `comparison_lock_`, before `GetStats()` completes.
// Otherwise a deadlock may occur:
- // 1) |comparison_lock_| is acquired after |lock_|
- // 2) |lock_| is acquired after internal pacer lock in SendRtp()
+ // 1) `comparison_lock_` is acquired after `lock_`
+ // 2) `lock_` is acquired after internal pacer lock in SendRtp()
// 3) internal pacer lock is acquired by GetStats().
Call::Stats call_stats = call_->GetStats();
@@ -490,8 +490,8 @@
if (receive_stream_ != nullptr) {
VideoReceiveStream::Stats receive_stats = receive_stream_->GetStats();
- // |total_decode_time_ms| gives a good estimate of the mean decode time,
- // |decode_ms| is used to keep track of the standard deviation.
+ // `total_decode_time_ms` gives a good estimate of the mean decode time,
+ // `decode_ms` is used to keep track of the standard deviation.
if (receive_stats.frames_decoded > 0)
mean_decode_time_ms_ =
static_cast<double>(receive_stats.total_decode_time_ms) /
@@ -504,8 +504,8 @@
pixels_.AddSample(receive_stats.width * receive_stats.height);
}
- // |frames_decoded| and |frames_rendered| are used because they are more
- // accurate than |decode_frame_rate| and |render_frame_rate|.
+ // `frames_decoded` and `frames_rendered` are used because they are more
+ // accurate than `decode_frame_rate` and `render_frame_rate`.
// The latter two are calculated on a momentary basis.
const double total_frames_duration_sec_double =
static_cast<double>(receive_stats.total_frames_duration_ms) / 1000.0;
diff --git a/video/video_analyzer.h b/video/video_analyzer.h
index 68861d1..bb08fbc 100644
--- a/video/video_analyzer.h
+++ b/video/video_analyzer.h
@@ -162,7 +162,7 @@
const rtc::VideoSinkWants& wants)
RTC_LOCKS_EXCLUDED(lock_) override;
- // Called by |send_stream_| when |send_stream_.SetSource()| is called.
+ // Called by `send_stream_` when |send_stream_.SetSource()| is called.
void RemoveSink(rtc::VideoSinkInterface<VideoFrame>* sink)
RTC_LOCKS_EXCLUDED(lock_) override;
diff --git a/video/video_quality_observer2.h b/video/video_quality_observer2.h
index ed5a0b9..3587785 100644
--- a/video/video_quality_observer2.h
+++ b/video/video_quality_observer2.h
@@ -51,7 +51,7 @@
uint32_t TotalFramesDurationMs() const;
double SumSquaredFrameDurationsSec() const;
- // Set |screenshare| to true if the last decoded frame was for screenshare.
+ // Set `screenshare` to true if the last decoded frame was for screenshare.
void UpdateHistograms(bool screenshare);
static const uint32_t kMinFrameSamplesToDetectFreeze;
diff --git a/video/video_receive_stream.cc b/video/video_receive_stream.cc
index da8eb7d..da5701c 100644
--- a/video/video_receive_stream.cc
+++ b/video/video_receive_stream.cc
@@ -380,7 +380,7 @@
new VideoStreamDecoder(&video_receiver_, &stats_proxy_, renderer));
// Make sure we register as a stats observer *after* we've prepared the
- // |video_stream_decoder_|.
+ // `video_stream_decoder_`.
call_stats_->RegisterStatsObserver(this);
// Start decoding on task queue.
diff --git a/video/video_receive_stream.h b/video/video_receive_stream.h
index c778d74..637b91a 100644
--- a/video/video_receive_stream.h
+++ b/video/video_receive_stream.h
@@ -210,9 +210,9 @@
mutable Mutex playout_delay_lock_;
- // All of them tries to change current min_playout_delay on |timing_| but
+ // All of them tries to change current min_playout_delay on `timing_` but
// source of the change request is different in each case. Among them the
- // biggest delay is used. -1 means use default value from the |timing_|.
+ // biggest delay is used. -1 means use default value from the `timing_`.
//
// Minimum delay as decided by the RTP playout delay extension.
int frame_minimum_playout_delay_ms_ RTC_GUARDED_BY(playout_delay_lock_) = -1;
diff --git a/video/video_receive_stream2.cc b/video/video_receive_stream2.cc
index 0bb2307..ce1eb7e 100644
--- a/video/video_receive_stream2.cc
+++ b/video/video_receive_stream2.cc
@@ -400,7 +400,7 @@
new VideoStreamDecoder(&video_receiver_, &stats_proxy_, renderer));
// Make sure we register as a stats observer *after* we've prepared the
- // |video_stream_decoder_|.
+ // `video_stream_decoder_`.
call_stats_->RegisterStatsObserver(this);
// Start decoding on task queue.
@@ -739,7 +739,7 @@
void VideoReceiveStream2::HandleEncodedFrame(
std::unique_ptr<EncodedFrame> frame) {
- // Running on |decode_queue_|.
+ // Running on `decode_queue_`.
int64_t now_ms = clock_->TimeInMilliseconds();
// Current OnPreDecode only cares about QP for VP8.
@@ -810,7 +810,7 @@
std::unique_ptr<EncodedFrame> frame) {
// Running on decode_queue_.
- // If |buffered_encoded_frames_| grows out of control (=60 queued frames),
+ // If `buffered_encoded_frames_` grows out of control (=60 queued frames),
// maybe due to a stuck decoder, we just halt the process here and log the
// error.
const bool encoded_frame_output_enabled =
@@ -841,7 +841,7 @@
absl::optional<RecordableEncodedFrame::EncodedResolution>
pending_resolution;
{
- // Fish out |pending_resolution_| to avoid taking the mutex on every lap
+ // Fish out `pending_resolution_` to avoid taking the mutex on every lap
// or dispatching under the mutex in the flush loop.
webrtc::MutexLock lock(&pending_resolution_mutex_);
if (pending_resolution_.has_value())
diff --git a/video/video_receive_stream2.h b/video/video_receive_stream2.h
index 2d19514..8a1418a 100644
--- a/video/video_receive_stream2.h
+++ b/video/video_receive_stream2.h
@@ -263,9 +263,9 @@
const int max_wait_for_keyframe_ms_;
const int max_wait_for_frame_ms_;
- // All of them tries to change current min_playout_delay on |timing_| but
+ // All of them tries to change current min_playout_delay on `timing_` but
// source of the change request is different in each case. Among them the
- // biggest delay is used. -1 means use default value from the |timing_|.
+ // biggest delay is used. -1 means use default value from the `timing_`.
//
// Minimum delay as decided by the RTP playout delay extension.
int frame_minimum_playout_delay_ms_ RTC_GUARDED_BY(worker_sequence_checker_) =
@@ -298,17 +298,17 @@
std::vector<std::unique_ptr<EncodedFrame>> buffered_encoded_frames_
RTC_GUARDED_BY(decode_queue_);
- // Set by the field trial WebRTC-LowLatencyRenderer. The parameter |enabled|
+ // Set by the field trial WebRTC-LowLatencyRenderer. The parameter `enabled`
// determines if the low-latency renderer algorithm should be used for the
// case min playout delay=0 and max playout delay>0.
FieldTrialParameter<bool> low_latency_renderer_enabled_;
// Set by the field trial WebRTC-LowLatencyRenderer. The parameter
- // |include_predecode_buffer| determines if the predecode buffer should be
+ // `include_predecode_buffer` determines if the predecode buffer should be
// taken into account when calculating maximum number of frames in composition
// queue.
FieldTrialParameter<bool> low_latency_renderer_include_predecode_buffer_;
- // Set by the field trial WebRTC-PreStreamDecoders. The parameter |max|
+ // Set by the field trial WebRTC-PreStreamDecoders. The parameter `max`
// determines the maximum number of decoders that are created up front before
// any video frame has been received.
FieldTrialParameter<int> maximum_pre_stream_decoders_;
diff --git a/video/video_receive_stream2_unittest.cc b/video/video_receive_stream2_unittest.cc
index 0b0ea35..6fb4234 100644
--- a/video/video_receive_stream2_unittest.cc
+++ b/video/video_receive_stream2_unittest.cc
@@ -228,7 +228,7 @@
video_receive_stream_->OnCompleteFrame(std::move(test_frame));
- // Ensure that -1 preserves default maximum value from |timing_|.
+ // Ensure that -1 preserves default maximum value from `timing_`.
EXPECT_EQ(kPlayoutDelayMs.min_ms, timing_->min_playout_delay());
EXPECT_NE(kPlayoutDelayMs.max_ms, timing_->max_playout_delay());
EXPECT_EQ(default_max_playout_latency, timing_->max_playout_delay());
@@ -244,7 +244,7 @@
video_receive_stream_->OnCompleteFrame(std::move(test_frame));
- // Ensure that -1 preserves default minimum value from |timing_|.
+ // Ensure that -1 preserves default minimum value from `timing_`.
EXPECT_NE(kPlayoutDelayMs.min_ms, timing_->min_playout_delay());
EXPECT_EQ(kPlayoutDelayMs.max_ms, timing_->max_playout_delay());
EXPECT_EQ(default_min_playout_latency, timing_->min_playout_delay());
@@ -430,7 +430,7 @@
// Verify that the per-packet information is passed to the renderer.
EXPECT_THAT(fake_renderer_.packet_infos(), ElementsAreArray(packet_infos));
- // Verify that the per-packet information also updates |GetSources()|.
+ // Verify that the per-packet information also updates `GetSources()`.
std::vector<RtpSource> sources = video_receive_stream_->GetSources();
ASSERT_THAT(sources, SizeIs(2));
{
diff --git a/video/video_receive_stream_unittest.cc b/video/video_receive_stream_unittest.cc
index cb14f7d..d7398b8 100644
--- a/video/video_receive_stream_unittest.cc
+++ b/video/video_receive_stream_unittest.cc
@@ -205,7 +205,7 @@
video_receive_stream_->OnCompleteFrame(std::move(test_frame));
- // Ensure that -1 preserves default maximum value from |timing_|.
+ // Ensure that -1 preserves default maximum value from `timing_`.
EXPECT_EQ(kPlayoutDelayMs.min_ms, timing_->min_playout_delay());
EXPECT_NE(kPlayoutDelayMs.max_ms, timing_->max_playout_delay());
EXPECT_EQ(default_max_playout_latency, timing_->max_playout_delay());
@@ -221,7 +221,7 @@
video_receive_stream_->OnCompleteFrame(std::move(test_frame));
- // Ensure that -1 preserves default minimum value from |timing_|.
+ // Ensure that -1 preserves default minimum value from `timing_`.
EXPECT_NE(kPlayoutDelayMs.min_ms, timing_->min_playout_delay());
EXPECT_EQ(kPlayoutDelayMs.max_ms, timing_->max_playout_delay());
EXPECT_EQ(default_min_playout_latency, timing_->min_playout_delay());
@@ -363,7 +363,7 @@
// Verify that the per-packet information is passed to the renderer.
EXPECT_THAT(fake_renderer_.packet_infos(), ElementsAreArray(packet_infos));
- // Verify that the per-packet information also updates |GetSources()|.
+ // Verify that the per-packet information also updates `GetSources()`.
std::vector<RtpSource> sources = video_receive_stream_->GetSources();
ASSERT_THAT(sources, SizeIs(2));
{
diff --git a/video/video_send_stream_impl.cc b/video/video_send_stream_impl.cc
index 3fc6b67..862dfde 100644
--- a/video/video_send_stream_impl.cc
+++ b/video/video_send_stream_impl.cc
@@ -102,7 +102,7 @@
if (is_svc) {
// For SVC, since there is only one "stream", the padding bitrate
// needed to enable the top spatial layer is stored in the
- // |target_bitrate_bps| field.
+ // `target_bitrate_bps` field.
// TODO(sprang): This behavior needs to die.
pad_up_to_bitrate_bps = static_cast<int>(
hysteresis_factor * active_streams[0].target_bitrate_bps + 0.5);
diff --git a/video/video_send_stream_impl.h b/video/video_send_stream_impl.h
index babf1dc..5ee4d19 100644
--- a/video/video_send_stream_impl.h
+++ b/video/video_send_stream_impl.h
@@ -107,7 +107,7 @@
VideoLayersAllocation allocation) override;
// Implements EncodedImageCallback. The implementation routes encoded frames
- // to the |payload_router_| and |config.pre_encode_callback| if set.
+ // to the `payload_router_` and |config.pre_encode_callback| if set.
// Called on an arbitrary encoder callback thread.
EncodedImageCallback::Result OnEncodedImage(
const EncodedImage& encoded_image,
diff --git a/video/video_send_stream_tests.cc b/video/video_send_stream_tests.cc
index 0bda716..5703f15 100644
--- a/video/video_send_stream_tests.cc
+++ b/video/video_send_stream_tests.cc
@@ -1322,7 +1322,7 @@
(last_packet_time_ms_ &&
clock_->TimeInMilliseconds() - last_packet_time_ms_.value() >
kNoPacketsThresholdMs)) {
- // No packets seen for |kNoPacketsThresholdMs|, restart camera.
+ // No packets seen for `kNoPacketsThresholdMs`, restart camera.
capturer_->Start();
test_state_ = kWaitingForMediaAfterCameraRestart;
}
@@ -1461,7 +1461,7 @@
bitrate_capped_(false) {}
~BitrateObserver() override {
- // Make sure we free |rtp_rtcp_| in the same context as we constructed it.
+ // Make sure we free `rtp_rtcp_` in the same context as we constructed it.
SendTask(RTC_FROM_HERE, task_queue_, [this]() { rtp_rtcp_ = nullptr; });
}
@@ -1551,7 +1551,7 @@
~ChangingNetworkRouteTest() {
// Block until all already posted tasks run to avoid 'use after free'
- // when such task accesses |this|.
+ // when such task accesses `this`.
SendTask(RTC_FROM_HERE, task_queue_, [] {});
}
@@ -1677,7 +1677,7 @@
~RelayToDirectRouteTest() {
// Block until all already posted tasks run to avoid 'use after free'
- // when such task accesses |this|.
+ // when such task accesses `this`.
SendTask(RTC_FROM_HERE, task_queue_, [] {});
}
@@ -1848,7 +1848,7 @@
~MaxPaddingSetTest() {
// Block until all already posted tasks run to avoid 'use after free'
- // when such task accesses |this|.
+ // when such task accesses `this`.
SendTask(RTC_FROM_HERE, task_queue_, [] {});
}
@@ -1889,7 +1889,7 @@
RTC_DCHECK_RUN_ON(&task_queue_thread_);
// In case we get a callback during teardown.
// When this happens, OnStreamsStopped() has been called already,
- // |call_| is null and the streams are being torn down.
+ // `call_` is null and the streams are being torn down.
if (!call_)
return;
@@ -1925,7 +1925,7 @@
return SEND_PACKET;
}
- // Called on |task_queue_|
+ // Called on `task_queue_`
void OnStreamsStopped() override {
RTC_DCHECK_RUN_ON(&task_queue_thread_);
RTC_DCHECK(task_queue_->IsCurrent());
@@ -3788,7 +3788,7 @@
TEST_F(VideoSendStreamTest, AlrConfiguredWhenSendSideOn) {
test::ScopedFieldTrials alr_experiment(GetAlrProbingExperimentString());
- // Send-side bwe on, use pacing factor from |kAlrProbingExperiment| above.
+ // Send-side bwe on, use pacing factor from `kAlrProbingExperiment` above.
PacingFactorObserver test_with_send_side(true,
kAlrProbingExperimentPaceMultiplier);
RunBaseTest(&test_with_send_side);
diff --git a/video/video_source_sink_controller.h b/video/video_source_sink_controller.h
index c61084f..d2e3267 100644
--- a/video/video_source_sink_controller.h
+++ b/video/video_source_sink_controller.h
@@ -74,8 +74,8 @@
// Pixel and frame rate restrictions.
VideoSourceRestrictions restrictions_ RTC_GUARDED_BY(&sequence_checker_);
// Ensures that even if we are not restricted, the sink is never configured
- // above this limit. Example: We are not CPU limited (no |restrictions_|) but
- // our encoder is capped at 30 fps (= |frame_rate_upper_limit_|).
+ // above this limit. Example: We are not CPU limited (no `restrictions_`) but
+ // our encoder is capped at 30 fps (= `frame_rate_upper_limit_`).
absl::optional<size_t> pixels_per_frame_upper_limit_
RTC_GUARDED_BY(&sequence_checker_);
absl::optional<double> frame_rate_upper_limit_
diff --git a/video/video_source_sink_controller_unittest.cc b/video/video_source_sink_controller_unittest.cc
index 66881cd..9338863 100644
--- a/video/video_source_sink_controller_unittest.cc
+++ b/video/video_source_sink_controller_unittest.cc
@@ -80,11 +80,11 @@
VideoSourceSinkController controller(&sink, &source);
VideoSourceRestrictions restrictions = controller.restrictions();
- // max_pixels_per_frame() maps to |max_pixel_count|.
+ // max_pixels_per_frame() maps to `max_pixel_count`.
restrictions.set_max_pixels_per_frame(42u);
- // target_pixels_per_frame() maps to |target_pixel_count|.
+ // target_pixels_per_frame() maps to `target_pixel_count`.
restrictions.set_target_pixels_per_frame(200u);
- // max_frame_rate() maps to |max_framerate_fps|.
+ // max_frame_rate() maps to `max_framerate_fps`.
restrictions.set_max_frame_rate(30.0);
controller.SetRestrictions(restrictions);
EXPECT_CALL(source, AddOrUpdateSink(_, _))
@@ -96,9 +96,9 @@
});
controller.PushSourceSinkSettings();
- // pixels_per_frame_upper_limit() caps |max_pixel_count|.
+ // pixels_per_frame_upper_limit() caps `max_pixel_count`.
controller.SetPixelsPerFrameUpperLimit(24);
- // frame_rate_upper_limit() caps |max_framerate_fps|.
+ // frame_rate_upper_limit() caps `max_framerate_fps`.
controller.SetFrameRateUpperLimit(10.0);
EXPECT_CALL(source, AddOrUpdateSink(_, _))
diff --git a/video/video_stream_decoder.cc b/video/video_stream_decoder.cc
index 49021bc..7fe0ade 100644
--- a/video/video_stream_decoder.cc
+++ b/video/video_stream_decoder.cc
@@ -37,7 +37,7 @@
video_receiver_->RegisterReceiveCallback(nullptr);
}
-// Do not acquire the lock of |video_receiver_| in this function. Decode
+// Do not acquire the lock of `video_receiver_` in this function. Decode
// callback won't necessarily be called from the decoding thread. The decoding
// thread may have held the lock when calling VideoDecoder::Decode, Reset, or
// Release. Acquiring the same lock in the path of decode callback can deadlock.
diff --git a/video/video_stream_decoder2.cc b/video/video_stream_decoder2.cc
index a73bb64..6ef6002 100644
--- a/video/video_stream_decoder2.cc
+++ b/video/video_stream_decoder2.cc
@@ -38,7 +38,7 @@
video_receiver_->RegisterReceiveCallback(nullptr);
}
-// Do not acquire the lock of |video_receiver_| in this function. Decode
+// Do not acquire the lock of `video_receiver_` in this function. Decode
// callback won't necessarily be called from the decoding thread. The decoding
// thread may have held the lock when calling VideoDecoder::Decode, Reset, or
// Release. Acquiring the same lock in the path of decode callback can deadlock.
diff --git a/video/video_stream_decoder_impl.cc b/video/video_stream_decoder_impl.cc
index b6d754e..4c52d35 100644
--- a/video/video_stream_decoder_impl.cc
+++ b/video/video_stream_decoder_impl.cc
@@ -185,7 +185,7 @@
}
case video_coding::FrameBuffer::kTimeout: {
callbacks_->OnNonDecodableState();
- // The |frame_buffer_| requires the frame callback function to complete
+ // The `frame_buffer_` requires the frame callback function to complete
// before NextFrame is called again. For this reason we call
// StartNextDecode in a later task to allow this task to complete first.
bookkeeping_queue_.PostTask([this]() {
diff --git a/video/video_stream_decoder_impl.h b/video/video_stream_decoder_impl.h
index 106f383..2cb6513 100644
--- a/video/video_stream_decoder_impl.h
+++ b/video/video_stream_decoder_impl.h
@@ -102,14 +102,14 @@
std::map<int, std::pair<SdpVideoFormat, int>> decoder_settings_
RTC_GUARDED_BY(decode_queue_);
- // The |bookkeeping_queue_| use the |frame_buffer_| and also posts tasks to
- // the |decode_queue_|. The |decode_queue_| in turn use the |decoder_| to
- // decode frames. When the |decoder_| is done it will post back to the
- // |bookkeeping_queue_| with the decoded frame. During shutdown we start by
- // isolating the |bookkeeping_queue_| from the |decode_queue_|, so now it's
- // safe for the |decode_queue_| to be destructed. After that the |decoder_|
- // can be destructed, and then the |bookkeeping_queue_|. Finally the
- // |frame_buffer_| can be destructed.
+ // The `bookkeeping_queue_` use the `frame_buffer_` and also posts tasks to
+ // the `decode_queue_`. The `decode_queue_` in turn use the `decoder_` to
+ // decode frames. When the `decoder_` is done it will post back to the
+ // `bookkeeping_queue_` with the decoded frame. During shutdown we start by
+ // isolating the `bookkeeping_queue_` from the `decode_queue_`, so now it's
+ // safe for the `decode_queue_` to be destructed. After that the `decoder_`
+ // can be destructed, and then the `bookkeeping_queue_`. Finally the
+ // `frame_buffer_` can be destructed.
Mutex shut_down_mutex_;
bool shut_down_ RTC_GUARDED_BY(shut_down_mutex_);
video_coding::FrameBuffer frame_buffer_ RTC_GUARDED_BY(bookkeeping_queue_);
diff --git a/video/video_stream_encoder.cc b/video/video_stream_encoder.cc
index 7eac426..b56edbe 100644
--- a/video/video_stream_encoder.cc
+++ b/video/video_stream_encoder.cc
@@ -877,7 +877,7 @@
encoder_reset_required = true;
}
- // Possibly adjusts scale_resolution_down_by in |encoder_config_| to limit the
+ // Possibly adjusts scale_resolution_down_by in `encoder_config_` to limit the
// alignment value.
AlignmentAdjuster::GetAlignmentAndMaybeAdjustScaleFactors(
encoder_->GetEncoderInfo(), &encoder_config_, absl::nullopt);
@@ -1437,7 +1437,7 @@
return;
}
- // |bitrate_allocation| is 0 it means that the network is down or the send
+ // `bitrate_allocation` is 0 it means that the network is down or the send
// pacer is full. We currently only report this if the encoder has an internal
// source. If the encoder does not have an internal source, higher levels
// are expected to not call AddVideoFrame. We do this since it is unclear
@@ -1524,7 +1524,7 @@
if (last_encoder_rate_settings_) {
// Clone rate settings before update, so that SetEncoderRates() will
// actually detect the change between the input and
- // |last_encoder_rate_setings_|, triggering the call to SetRate() on the
+ // `last_encoder_rate_setings_`, triggering the call to SetRate() on the
// encoder.
EncoderRateSettings new_rate_settings = *last_encoder_rate_settings_;
new_rate_settings.rate_control.framerate_fps =
@@ -1869,7 +1869,7 @@
// Currently internal quality scaler is used for VP9 instead of webrtc qp
// scaler (in no-svc case or if only a single spatial layer is encoded).
// It has to be explicitly detected and reported to adaptation metrics.
- // Post a task because |send_codec_| requires |encoder_queue_| lock.
+ // Post a task because `send_codec_` requires `encoder_queue_` lock.
unsigned int image_width = image_copy._encodedWidth;
unsigned int image_height = image_copy._encodedHeight;
encoder_queue_.PostTask([this, codec_type, image_width, image_height] {
diff --git a/video/video_stream_encoder.h b/video/video_stream_encoder.h
index 9e70203..2b18155 100644
--- a/video/video_stream_encoder.h
+++ b/video/video_stream_encoder.h
@@ -115,8 +115,8 @@
double cwnd_reduce_ratio);
protected:
- // Used for testing. For example the |ScalingObserverInterface| methods must
- // be called on |encoder_queue_|.
+ // Used for testing. For example the `ScalingObserverInterface` methods must
+ // be called on `encoder_queue_`.
rtc::TaskQueue* encoder_queue() { return &encoder_queue_; }
void OnVideoSourceRestrictionsUpdated(
@@ -159,7 +159,7 @@
VideoEncoder::RateControlParameters rate_control;
// This is the scalar target bitrate before the VideoBitrateAllocator, i.e.
- // the |target_bitrate| argument of the OnBitrateUpdated() method. This is
+ // the `target_bitrate` argument of the OnBitrateUpdated() method. This is
// needed because the bitrate allocator may truncate the total bitrate and a
// later call to the same allocator instance, e.g.
// |using last_encoder_rate_setings_->bitrate.get_sum_bps()|, may trick it
@@ -197,7 +197,7 @@
void TraceFrameDropStart();
void TraceFrameDropEnd();
- // Returns a copy of |rate_settings| with the |bitrate| field updated using
+ // Returns a copy of `rate_settings` with the `bitrate` field updated using
// the current VideoBitrateAllocator.
EncoderRateSettings UpdateBitrateAllocation(
const EncoderRateSettings& rate_settings) RTC_RUN_ON(&encoder_queue_);
@@ -212,7 +212,7 @@
DataSize frame_size);
bool HasInternalSource() const RTC_RUN_ON(&encoder_queue_);
void ReleaseEncoder() RTC_RUN_ON(&encoder_queue_);
- // After calling this function |resource_adaptation_processor_| will be null.
+ // After calling this function `resource_adaptation_processor_` will be null.
void ShutdownResourceAdaptationQueue();
void CheckForAnimatedContent(const VideoFrame& frame,
@@ -323,7 +323,7 @@
// encoder behavior might dynamically change.
bool force_disable_frame_dropper_ RTC_GUARDED_BY(&encoder_queue_);
RateStatistics input_framerate_ RTC_GUARDED_BY(&encoder_queue_);
- // Incremented on worker thread whenever |frame_dropper_| determines that a
+ // Incremented on worker thread whenever `frame_dropper_` determines that a
// frame should be dropped. Decremented on whichever thread runs
// OnEncodedImage(), which is only called by one thread but not necessarily
// the worker thread.
@@ -339,7 +339,7 @@
RTC_GUARDED_BY(&encoder_queue_);
// TODO(sprang): Change actually support keyframe per simulcast stream, or
- // turn this into a simple bool |pending_keyframe_request_|.
+ // turn this into a simple bool `pending_keyframe_request_`.
std::vector<VideoFrameType> next_frame_types_ RTC_GUARDED_BY(&encoder_queue_);
FrameEncodeMetadataWriter frame_encode_metadata_writer_;
@@ -387,7 +387,7 @@
// specific resources, such as "encode usage percent" measurements and "QP
// scaling". Also involved with various mitigations such as initial frame
// dropping.
- // The manager primarily operates on the |encoder_queue_| but its lifetime is
+ // The manager primarily operates on the `encoder_queue_` but its lifetime is
// tied to the VideoStreamEncoder (which is destroyed off the encoder queue)
// and its resource list is accessible from any thread.
VideoStreamEncoderResourceManager stream_resource_manager_
diff --git a/video/video_stream_encoder_unittest.cc b/video/video_stream_encoder_unittest.cc
index cbfd93e9..37deab0 100644
--- a/video/video_stream_encoder_unittest.cc
+++ b/video/video_stream_encoder_unittest.cc
@@ -5698,7 +5698,7 @@
video_encoder_config.max_bitrate_bps = kSimulcastTargetBitrateBps;
video_encoder_config.content_type =
VideoEncoderConfig::ContentType::kRealtimeVideo;
- // Currently simulcast layers |active| flags are used to inidicate
+ // Currently simulcast layers `active` flags are used to inidicate
// which SVC layers are active.
video_encoder_config.simulcast_layers.resize(3);
@@ -6167,7 +6167,7 @@
video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
max_bitrate, max_bitrate, max_bitrate, 0, 0, 0);
- // Insert frames and advance |min_duration_ms|.
+ // Insert frames and advance `min_duration_ms`.
for (size_t i = 1; i <= 10; i++) {
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
@@ -7649,8 +7649,8 @@
// The encoders produces by the VideoEncoderProxyFactory have a pointer back
// to it's factory, so in order for the encoder instance in the
- // |video_stream_encoder_| to be destroyed before the |encoder_factory| we
- // reset the |video_stream_encoder_| here.
+ // `video_stream_encoder_` to be destroyed before the `encoder_factory` we
+ // reset the `video_stream_encoder_` here.
video_stream_encoder_.reset();
}
@@ -7736,8 +7736,8 @@
// The encoders produces by the VideoEncoderProxyFactory have a pointer back
// to it's factory, so in order for the encoder instance in the
- // |video_stream_encoder_| to be destroyed before the |encoder_factory| we
- // reset the |video_stream_encoder_| here.
+ // `video_stream_encoder_` to be destroyed before the `encoder_factory` we
+ // reset the `video_stream_encoder_` here.
video_stream_encoder_.reset();
}
@@ -8020,7 +8020,7 @@
TEST_F(VideoStreamEncoderTest, EncoderResolutionsExposedInSimulcast) {
// Pick downscale factors such that we never encode at full resolution - this
// is an interesting use case. The frame resolution influences the encoder
- // resolutions, but if no layer has |scale_resolution_down_by| == 1 then the
+ // resolutions, but if no layer has `scale_resolution_down_by` == 1 then the
// encoder should not ask for the frame resolution. This allows video frames
// to have the appearence of one resolution but optimize its internal buffers
// for what is actually encoded.
@@ -8268,8 +8268,8 @@
void TearDown() override {
video_stream_encoder_->Stop();
- // Ensure |video_stream_encoder_| is destroyed before
- // |encoder_proxy_factory_|.
+ // Ensure `video_stream_encoder_` is destroyed before
+ // `encoder_proxy_factory_`.
video_stream_encoder_.reset();
VideoStreamEncoderTest::TearDown();
}