Use backticks not vertical bars to denote variables in comments
Bug: webrtc:12338
Change-Id: I89c8b3a328d04203177522cbdfd9e606fd4bce4c
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/228246
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Artem Titov <titovartem@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#34696}
diff --git a/api/ice_transport_factory.h b/api/ice_transport_factory.h
index a9fd04e..2268ea5 100644
--- a/api/ice_transport_factory.h
+++ b/api/ice_transport_factory.h
@@ -35,9 +35,9 @@
// without using a webrtc::PeerConnection.
// The returned object must be accessed and destroyed on the thread that
// created it.
-// |init.port_allocator()| is required and must outlive the created
+// `init.port_allocator()` is required and must outlive the created
// IceTransportInterface object.
-// |init.async_resolver_factory()| and |init.event_log()| are optional, but if
+// `init.async_resolver_factory()` and `init.event_log()` are optional, but if
// provided must outlive the created IceTransportInterface object.
RTC_EXPORT rtc::scoped_refptr<IceTransportInterface> CreateIceTransport(
IceTransportInit);
diff --git a/api/jsep.h b/api/jsep.h
index 3348d7b..d2aa57c 100644
--- a/api/jsep.h
+++ b/api/jsep.h
@@ -166,8 +166,8 @@
// Ownership is not transferred.
//
// Returns false if the session description does not have a media section
- // that corresponds to |candidate.sdp_mid()| or
- // |candidate.sdp_mline_index()|.
+ // that corresponds to `candidate.sdp_mid()` or
+ // `candidate.sdp_mline_index()`.
virtual bool AddCandidate(const IceCandidateInterface* candidate) = 0;
// Removes the candidates from the description, if found.
diff --git a/api/peer_connection_interface.h b/api/peer_connection_interface.h
index b9350ac..a3c420f 100644
--- a/api/peer_connection_interface.h
+++ b/api/peer_connection_interface.h
@@ -1295,8 +1295,8 @@
// This is called when signaling indicates a transceiver will be receiving
// media from the remote endpoint. This is fired during a call to
// SetRemoteDescription. The receiving track can be accessed by:
- // |transceiver->receiver()->track()| and its associated streams by
- // |transceiver->receiver()->streams()|.
+ // `transceiver->receiver()->track()` and its associated streams by
+ // `transceiver->receiver()->streams()`.
// Note: This will only be called if Unified Plan semantics are specified.
// This behavior is specified in section 2.2.8.2.5 of the "Set the
// RTCSessionDescription" algorithm:
diff --git a/api/rtp_packet_info.h b/api/rtp_packet_info.h
index 13d3a39..bc9839f 100644
--- a/api/rtp_packet_info.h
+++ b/api/rtp_packet_info.h
@@ -113,7 +113,7 @@
// capture clock offset defined in the Absolute Capture Time header extension.
absl::optional<int64_t> local_capture_clock_offset_;
- // Local |webrtc::Clock|-based timestamp of when the packet was received.
+ // Local `webrtc::Clock`-based timestamp of when the packet was received.
Timestamp receive_time_;
};
diff --git a/api/rtp_packet_infos.h b/api/rtp_packet_infos.h
index d636464..2ca3174 100644
--- a/api/rtp_packet_infos.h
+++ b/api/rtp_packet_infos.h
@@ -26,8 +26,8 @@
// an audio or video frame. Uses internal reference counting to make it very
// cheap to copy.
//
-// We should ideally just use |std::vector<RtpPacketInfo>| and have it
-// |std::move()|-ed as the per-packet information is transferred from one object
+// We should ideally just use `std::vector<RtpPacketInfo>` and have it
+// `std::move()`-ed as the per-packet information is transferred from one object
// to another. But moving the info, instead of copying it, is not easily done
// for the current video code.
class RTC_EXPORT RtpPacketInfos {
diff --git a/api/set_local_description_observer_interface.h b/api/set_local_description_observer_interface.h
index 90d000c..8e7b625 100644
--- a/api/set_local_description_observer_interface.h
+++ b/api/set_local_description_observer_interface.h
@@ -21,7 +21,7 @@
// the observer to examine the effects of the operation without delay.
class SetLocalDescriptionObserverInterface : public rtc::RefCountInterface {
public:
- // On success, |error.ok()| is true.
+ // On success, `error.ok()` is true.
virtual void OnSetLocalDescriptionComplete(RTCError error) = 0;
};
diff --git a/api/set_remote_description_observer_interface.h b/api/set_remote_description_observer_interface.h
index 1782555..d1c0753 100644
--- a/api/set_remote_description_observer_interface.h
+++ b/api/set_remote_description_observer_interface.h
@@ -22,7 +22,7 @@
// operation.
class SetRemoteDescriptionObserverInterface : public rtc::RefCountInterface {
public:
- // On success, |error.ok()| is true.
+ // On success, `error.ok()` is true.
virtual void OnSetRemoteDescriptionComplete(RTCError error) = 0;
};
diff --git a/api/stats/rtc_stats.h b/api/stats/rtc_stats.h
index 8ad39b4..a5fae52 100644
--- a/api/stats/rtc_stats.h
+++ b/api/stats/rtc_stats.h
@@ -217,7 +217,7 @@
// Interface for `RTCStats` members, which have a name and a value of a type
// defined in a subclass. Only the types listed in `Type` are supported, these
-// are implemented by |RTCStatsMember<T>|. The value of a member may be
+// are implemented by `RTCStatsMember<T>`. The value of a member may be
// undefined, the value can only be read if `is_defined`.
class RTCStatsMemberInterface {
public:
@@ -286,7 +286,7 @@
// Template implementation of `RTCStatsMemberInterface`.
// The supported types are the ones described by
-// |RTCStatsMemberInterface::Type|.
+// `RTCStatsMemberInterface::Type`.
template <typename T>
class RTCStatsMember : public RTCStatsMemberInterface {
public:
diff --git a/api/stats/rtc_stats_report.h b/api/stats/rtc_stats_report.h
index a26db86..2ced422 100644
--- a/api/stats/rtc_stats_report.h
+++ b/api/stats/rtc_stats_report.h
@@ -90,7 +90,7 @@
// Takes ownership of all the stats in `other`, leaving it empty.
void TakeMembersFrom(rtc::scoped_refptr<RTCStatsReport> other);
- // Stats iterators. Stats are ordered lexicographically on |RTCStats::id|.
+ // Stats iterators. Stats are ordered lexicographically on `RTCStats::id`.
ConstIterator begin() const;
ConstIterator end() const;
diff --git a/api/stats/rtcstats_objects.h b/api/stats/rtcstats_objects.h
index b18ef97..8a6327e 100644
--- a/api/stats/rtcstats_objects.h
+++ b/api/stats/rtcstats_objects.h
@@ -57,7 +57,7 @@
static const char* const kFailed;
};
-// |RTCMediaStreamTrackStats::kind| is not an enum in the spec but the only
+// `RTCMediaStreamTrackStats::kind` is not an enum in the spec but the only
// valid values are "audio" and "video".
// https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats-kind
struct RTCMediaStreamTrackKind {
diff --git a/api/stats_types.h b/api/stats_types.h
index 6745d14..9a03db3 100644
--- a/api/stats_types.h
+++ b/api/stats_types.h
@@ -232,7 +232,7 @@
kStatsValueNameSrtpCipher,
kStatsValueNameTargetDelayMs,
kStatsValueNameTargetEncBitrate,
- kStatsValueNameTimingFrameInfo, // Result of |TimingFrameInfo::ToString|
+ kStatsValueNameTimingFrameInfo, // Result of `TimingFrameInfo::ToString`
kStatsValueNameTrackId,
kStatsValueNameTransmitBitrate,
kStatsValueNameTransportType,
diff --git a/api/task_queue/task_queue_base.h b/api/task_queue/task_queue_base.h
index 88419ed..d8af6e6 100644
--- a/api/task_queue/task_queue_base.h
+++ b/api/task_queue/task_queue_base.h
@@ -38,7 +38,7 @@
virtual void Delete() = 0;
// Schedules a task to execute. Tasks are executed in FIFO order.
- // If |task->Run()| returns true, task is deleted on the task queue
+ // If `task->Run()` returns true, task is deleted on the task queue
// before next QueuedTask starts executing.
// When a TaskQueue is deleted, pending tasks will not be executed but they
// will be deleted. The deletion of tasks may happen synchronously on the
diff --git a/api/video_codecs/video_encoder.h b/api/video_codecs/video_encoder.h
index 2bdf8d0..3035dd7 100644
--- a/api/video_codecs/video_encoder.h
+++ b/api/video_codecs/video_encoder.h
@@ -287,7 +287,7 @@
// the last InitEncode() call.
double framerate_fps;
// The network bandwidth available for video. This is at least
- // |bitrate.get_sum_bps()|, but may be higher if the application is not
+ // `bitrate.get_sum_bps()`, but may be higher if the application is not
// network constrained.
DataRate bandwidth_allocation;
diff --git a/api/video_codecs/video_encoder_config.h b/api/video_codecs/video_encoder_config.h
index 5440f1f..cfda2ad 100644
--- a/api/video_codecs/video_encoder_config.h
+++ b/api/video_codecs/video_encoder_config.h
@@ -129,7 +129,7 @@
// An implementation should return a std::vector<VideoStream> with the
// wanted VideoStream settings for the given video resolution.
// The size of the vector may not be larger than
- // |encoder_config.number_of_streams|.
+ // `encoder_config.number_of_streams`.
virtual std::vector<VideoStream> CreateEncoderStreams(
int width,
int height,
diff --git a/api/video_codecs/vp8_frame_buffer_controller.h b/api/video_codecs/vp8_frame_buffer_controller.h
index 852008f..fc494f7 100644
--- a/api/video_codecs/vp8_frame_buffer_controller.h
+++ b/api/video_codecs/vp8_frame_buffer_controller.h
@@ -129,7 +129,7 @@
// Called by the encoder before encoding a frame. Returns a set of overrides
// the controller wishes to enact in the encoder's configuration.
// If a value is not overridden, previous overrides are still in effect.
- // However, if |Vp8EncoderConfig::reset_previous_configuration_overrides|
+ // However, if `Vp8EncoderConfig::reset_previous_configuration_overrides`
// is set to `true`, all previous overrides are reset.
virtual Vp8EncoderConfig UpdateConfiguration(size_t stream_index) = 0;
diff --git a/audio/channel_receive.cc b/audio/channel_receive.cc
index 3ca3b51b..b741a8c 100644
--- a/audio/channel_receive.cc
+++ b/audio/channel_receive.cc
@@ -464,7 +464,7 @@
}
}
- // Fill in local capture clock offset in |audio_frame->packet_infos_|.
+ // Fill in local capture clock offset in `audio_frame->packet_infos_`.
RtpPacketInfos::vector_type packet_infos;
for (auto& packet_info : audio_frame->packet_infos_) {
absl::optional<int64_t> local_capture_clock_offset;
diff --git a/audio/utility/audio_frame_operations.h b/audio/utility/audio_frame_operations.h
index 7e954df..2a5f29f 100644
--- a/audio/utility/audio_frame_operations.h
+++ b/audio/utility/audio_frame_operations.h
@@ -33,13 +33,13 @@
// `result_frame` is empty.
static void Add(const AudioFrame& frame_to_add, AudioFrame* result_frame);
- // |frame.num_channels_| will be updated. This version checks for sufficient
+ // `frame.num_channels_` will be updated. This version checks for sufficient
// buffer size and that `num_channels_` is mono. Use UpmixChannels
// instead. TODO(bugs.webrtc.org/8649): remove.
ABSL_DEPRECATED("bugs.webrtc.org/8649")
static int MonoToStereo(AudioFrame* frame);
- // |frame.num_channels_| will be updated. This version checks that
+ // `frame.num_channels_` will be updated. This version checks that
// `num_channels_` is stereo. Use DownmixChannels
// instead. TODO(bugs.webrtc.org/8649): remove.
ABSL_DEPRECATED("bugs.webrtc.org/8649")
@@ -52,7 +52,7 @@
size_t samples_per_channel,
int16_t* dst_audio);
- // |frame.num_channels_| will be updated. This version checks that
+ // `frame.num_channels_` will be updated. This version checks that
// `num_channels_` is 4 channels.
static int QuadToStereo(AudioFrame* frame);
@@ -66,12 +66,12 @@
size_t dst_channels,
int16_t* dst_audio);
- // |frame.num_channels_| will be updated. This version checks that
+ // `frame.num_channels_` will be updated. This version checks that
// `num_channels_` and `dst_channels` are valid and performs relevant downmix.
// Supported channel combinations are N channels to Mono, and Quad to Stereo.
static void DownmixChannels(size_t dst_channels, AudioFrame* frame);
- // |frame.num_channels_| will be updated. This version checks that
+ // `frame.num_channels_` will be updated. This version checks that
// `num_channels_` and `dst_channels` are valid and performs relevant
// downmix. Supported channel combinations are Mono to N
// channels. The single channel is replicated.
diff --git a/call/rtp_config.h b/call/rtp_config.h
index ae5ae3b..c3b5b4a 100644
--- a/call/rtp_config.h
+++ b/call/rtp_config.h
@@ -81,7 +81,7 @@
// If rids are specified, they should correspond to the `ssrcs` vector.
// This means that:
// 1. rids.size() == 0 || rids.size() == ssrcs.size().
- // 2. If rids is not empty, then |rids[i]| should use |ssrcs[i]|.
+ // 2. If rids is not empty, then `rids[i]` should use `ssrcs[i]`.
std::vector<std::string> rids;
// The value to send in the MID RTP header extension if the extension is
diff --git a/call/simulated_network.cc b/call/simulated_network.cc
index f8a5bd8..fc34fda 100644
--- a/call/simulated_network.cc
+++ b/call/simulated_network.cc
@@ -216,8 +216,8 @@
pending_drain_bits_ -= packet.packet.size * 8;
RTC_DCHECK(pending_drain_bits_ >= 0);
- // Drop packets at an average rate of |state.config.loss_percent| with
- // and average loss burst length of |state.config.avg_burst_loss_length|.
+ // Drop packets at an average rate of `state.config.loss_percent` with
+ // and average loss burst length of `state.config.avg_burst_loss_length`.
if ((bursting_ && random_.Rand<double>() < state.prob_loss_bursting) ||
(!bursting_ && random_.Rand<double>() < state.prob_start_bursting)) {
bursting_ = true;
diff --git a/common_audio/vad/vad_core.c b/common_audio/vad/vad_core.c
index d62d5ff..0872449 100644
--- a/common_audio/vad/vad_core.c
+++ b/common_audio/vad/vad_core.c
@@ -298,8 +298,8 @@
nmk2 = nmk;
if (!vadflag) {
// deltaN = (x-mu)/sigma^2
- // ngprvec[k] = |noise_probability[k]| /
- // (|noise_probability[0]| + |noise_probability[1]|)
+ // ngprvec[k] = `noise_probability[k]` /
+ // (`noise_probability[0]` + `noise_probability[1]`)
// (Q14 * Q11 >> 11) = Q14.
delt = (int16_t)((ngprvec[gaussian] * deltaN[gaussian]) >> 11);
@@ -327,8 +327,8 @@
if (vadflag) {
// Update speech mean vector:
// `deltaS` = (x-mu)/sigma^2
- // sgprvec[k] = |speech_probability[k]| /
- // (|speech_probability[0]| + |speech_probability[1]|)
+ // sgprvec[k] = `speech_probability[k]` /
+ // (`speech_probability[0]` + `speech_probability[1]`)
// (Q14 * Q11) >> 11 = Q14.
delt = (int16_t)((sgprvec[gaussian] * deltaS[gaussian]) >> 11);
@@ -430,14 +430,14 @@
tmp2_s16 = (int16_t)((3 * tmp_s16) >> 2);
// Move Gaussian means for speech model by `tmp1_s16` and update
- // `speech_global_mean`. Note that |self->speech_means[channel]| is
+ // `speech_global_mean`. Note that `self->speech_means[channel]` is
// changed after the call.
speech_global_mean = WeightedAverage(&self->speech_means[channel],
tmp1_s16,
&kSpeechDataWeights[channel]);
// Move Gaussian means for noise model by -`tmp2_s16` and update
- // `noise_global_mean`. Note that |self->noise_means[channel]| is
+ // `noise_global_mean`. Note that `self->noise_means[channel]` is
// changed after the call.
noise_global_mean = WeightedAverage(&self->noise_means[channel],
-tmp2_s16,
diff --git a/common_audio/vad/vad_sp.h b/common_audio/vad/vad_sp.h
index 37ee19f..89138c5 100644
--- a/common_audio/vad/vad_sp.h
+++ b/common_audio/vad/vad_sp.h
@@ -35,7 +35,7 @@
// Updates and returns the smoothed feature minimum. As minimum we use the
// median of the five smallest feature values in a 100 frames long window.
-// As long as |handle->frame_counter| is zero, that is, we haven't received any
+// As long as `handle->frame_counter` is zero, that is, we haven't received any
// "valid" data, FindMinimum() outputs the default value of 1600.
//
// Inputs:
diff --git a/common_video/video_frame_buffer_pool.cc b/common_video/video_frame_buffer_pool.cc
index d225370..a450bd1 100644
--- a/common_video/video_frame_buffer_pool.cc
+++ b/common_video/video_frame_buffer_pool.cc
@@ -20,7 +20,7 @@
bool HasOneRef(const rtc::scoped_refptr<VideoFrameBuffer>& buffer) {
// Cast to rtc::RefCountedObject is safe because this function is only called
// on locally created VideoFrameBuffers, which are either
- // |rtc::RefCountedObject<I420Buffer>| or |rtc::RefCountedObject<NV12Buffer>|.
+ // `rtc::RefCountedObject<I420Buffer>` or `rtc::RefCountedObject<NV12Buffer>`.
switch (buffer->type()) {
case VideoFrameBuffer::Type::kI420: {
return static_cast<rtc::RefCountedObject<I420Buffer>*>(buffer.get())
@@ -94,7 +94,7 @@
GetExistingBuffer(width, height, VideoFrameBuffer::Type::kI420);
if (existing_buffer) {
// Cast is safe because the only way kI420 buffer is created is
- // in the same function below, where |RefCountedObject<I420Buffer>| is
+ // in the same function below, where `RefCountedObject<I420Buffer>` is
// created.
rtc::RefCountedObject<I420Buffer>* raw_buffer =
static_cast<rtc::RefCountedObject<I420Buffer>*>(existing_buffer.get());
@@ -125,7 +125,7 @@
GetExistingBuffer(width, height, VideoFrameBuffer::Type::kNV12);
if (existing_buffer) {
// Cast is safe because the only way kI420 buffer is created is
- // in the same function below, where |RefCountedObject<I420Buffer>| is
+ // in the same function below, where `RefCountedObject<I420Buffer>` is
// created.
rtc::RefCountedObject<NV12Buffer>* raw_buffer =
static_cast<rtc::RefCountedObject<NV12Buffer>*>(existing_buffer.get());
diff --git a/docs/native-code/rtp-hdrext/transport-wide-cc-02/README.md b/docs/native-code/rtp-hdrext/transport-wide-cc-02/README.md
index 20b1d51..8dc8261 100644
--- a/docs/native-code/rtp-hdrext/transport-wide-cc-02/README.md
+++ b/docs/native-code/rtp-hdrext/transport-wide-cc-02/README.md
@@ -29,19 +29,19 @@
Data layout of transport-wide sequence number
1-byte header + 2 bytes of data:
- 0 1 2
+ 0 1 2
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | ID | L=1 |transport-wide sequence number |
+ | ID | L=1 |transport-wide sequence number |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Data layout of transport-wide sequence number and optional feedback request
1-byte header + 4 bytes of data:
- 0 1 2 3
+ 0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | ID | L=3 |transport-wide sequence number |T| seq count |
+ | ID | L=3 |transport-wide sequence number |T| seq count |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|seq count cont.|
+-+-+-+-+-+-+-+-+
diff --git a/logging/rtc_event_log/encoder/rtc_event_log_encoder_new_format.cc b/logging/rtc_event_log/encoder/rtc_event_log_encoder_new_format.cc
index 4b9be76..236aea7 100644
--- a/logging/rtc_event_log/encoder/rtc_event_log_encoder_new_format.cc
+++ b/logging/rtc_event_log/encoder/rtc_event_log_encoder_new_format.cc
@@ -288,7 +288,7 @@
}
// Copies all RTCP blocks except APP, SDES and unknown from `packet` to
-// `buffer`. `buffer` must have space for at least |packet.size()| bytes.
+// `buffer`. `buffer` must have space for at least `packet.size()` bytes.
size_t RemoveNonAllowlistedRtcpBlocks(const rtc::Buffer& packet,
uint8_t* buffer) {
RTC_DCHECK(buffer != nullptr);
diff --git a/media/base/delayable.h b/media/base/delayable.h
index 90ce5d7..f0344c5 100644
--- a/media/base/delayable.h
+++ b/media/base/delayable.h
@@ -18,7 +18,7 @@
namespace cricket {
// Delayable is used by user code through ApplyConstraints algorithm. Its
-// methods must take precendence over similar functional in |syncable.h|.
+// methods must take precendence over similar functional in `syncable.h`.
class Delayable {
public:
virtual ~Delayable() {}
diff --git a/media/base/video_adapter.h b/media/base/video_adapter.h
index 76fefab..0493323 100644
--- a/media/base/video_adapter.h
+++ b/media/base/video_adapter.h
@@ -86,24 +86,24 @@
const absl::optional<int>& max_fps) RTC_LOCKS_EXCLUDED(mutex_);
// Requests the output frame size from `AdaptFrameResolution` to have as close
- // as possible to |sink_wants.target_pixel_count| pixels (if set)
- // but no more than |sink_wants.max_pixel_count|.
- // |sink_wants.max_framerate_fps| is essentially analogous to
- // |sink_wants.max_pixel_count|, but for framerate rather than resolution.
- // Set |sink_wants.max_pixel_count| and/or |sink_wants.max_framerate_fps| to
+ // as possible to `sink_wants.target_pixel_count` pixels (if set)
+ // but no more than `sink_wants.max_pixel_count`.
+ // `sink_wants.max_framerate_fps` is essentially analogous to
+ // `sink_wants.max_pixel_count`, but for framerate rather than resolution.
+ // Set `sink_wants.max_pixel_count` and/or `sink_wants.max_framerate_fps` to
// std::numeric_limit<int>::max() if no upper limit is desired.
// The sink resolution alignment requirement is given by
- // |sink_wants.resolution_alignment|.
+ // `sink_wants.resolution_alignment`.
// Note: Should be called from the sink only.
void OnSinkWants(const rtc::VideoSinkWants& sink_wants)
RTC_LOCKS_EXCLUDED(mutex_);
// Returns maximum image area, which shouldn't impose any adaptations.
- // Can return |numeric_limits<int>::max()| if no limit is set.
+ // Can return `numeric_limits<int>::max()` if no limit is set.
int GetTargetPixels() const;
// Returns current frame-rate limit.
- // Can return |numeric_limits<float>::infinity()| if no limit is set.
+ // Can return `numeric_limits<float>::infinity()` if no limit is set.
float GetMaxFramerate() const;
private:
@@ -124,7 +124,7 @@
const int source_resolution_alignment_;
// The currently applied resolution alignment, as given by the requirements:
// - the fixed `source_resolution_alignment_`; and
- // - the latest |sink_wants.resolution_alignment|.
+ // - the latest `sink_wants.resolution_alignment`.
int resolution_alignment_ RTC_GUARDED_BY(mutex_);
// The target timestamp for the next frame based on requested format.
diff --git a/media/engine/simulcast_encoder_adapter_unittest.cc b/media/engine/simulcast_encoder_adapter_unittest.cc
index 5a2bf8e..5f3e54f 100644
--- a/media/engine/simulcast_encoder_adapter_unittest.cc
+++ b/media/engine/simulcast_encoder_adapter_unittest.cc
@@ -761,7 +761,7 @@
EXPECT_EQ(3u, helper_->factory()->encoders().size());
// The adapter should destroy all encoders it has allocated. Since
- // |helper_->factory()| is owned by `adapter_`, however, we need to rely on
+ // `helper_->factory()` is owned by `adapter_`, however, we need to rely on
// lsan to find leaks here.
EXPECT_EQ(0, adapter_->Release());
adapter_.reset();
diff --git a/media/engine/webrtc_media_engine.cc b/media/engine/webrtc_media_engine.cc
index 7ac666e..6ce52e4 100644
--- a/media/engine/webrtc_media_engine.cc
+++ b/media/engine/webrtc_media_engine.cc
@@ -27,7 +27,7 @@
std::unique_ptr<MediaEngineInterface> CreateMediaEngine(
MediaEngineDependencies dependencies) {
- // TODO(sprang): Make populating |dependencies.trials| mandatory and remove
+ // TODO(sprang): Make populating `dependencies.trials` mandatory and remove
// these fallbacks.
std::unique_ptr<webrtc::WebRtcKeyValueConfig> fallback_trials(
dependencies.trials ? nullptr : new webrtc::FieldTrialBasedConfig());
diff --git a/media/engine/webrtc_voice_engine.cc b/media/engine/webrtc_voice_engine.cc
index e9ffb21..cbc6abf 100644
--- a/media/engine/webrtc_voice_engine.cc
+++ b/media/engine/webrtc_voice_engine.cc
@@ -152,8 +152,8 @@
const AudioOptions& options) {
if (options.audio_network_adaptor && *options.audio_network_adaptor &&
options.audio_network_adaptor_config) {
- // Turn on audio network adaptor only when |options_.audio_network_adaptor|
- // equals true and |options_.audio_network_adaptor_config| has a value.
+ // Turn on audio network adaptor only when `options_.audio_network_adaptor`
+ // equals true and `options_.audio_network_adaptor_config` has a value.
return options.audio_network_adaptor_config;
}
return absl::nullopt;
@@ -1495,10 +1495,10 @@
}
// TODO(minyue): The following legacy actions go into
- // |WebRtcAudioSendStream::SetRtpParameters()| which is called at the end,
+ // `WebRtcAudioSendStream::SetRtpParameters()` which is called at the end,
// though there are two difference:
- // 1. |WebRtcVoiceMediaChannel::SetChannelSendParameters()| only calls
- // `SetSendCodec` while |WebRtcAudioSendStream::SetRtpParameters()| calls
+ // 1. `WebRtcVoiceMediaChannel::SetChannelSendParameters()` only calls
+ // `SetSendCodec` while `WebRtcAudioSendStream::SetRtpParameters()` calls
// `SetSendCodecs`. The outcome should be the same.
// 2. AudioSendStream can be recreated.
diff --git a/media/engine/webrtc_voice_engine_unittest.cc b/media/engine/webrtc_voice_engine_unittest.cc
index 4b2742c..1fd2480 100644
--- a/media/engine/webrtc_voice_engine_unittest.cc
+++ b/media/engine/webrtc_voice_engine_unittest.cc
@@ -2505,7 +2505,7 @@
const int initial_num = call_.GetNumCreatedSendStreams();
cricket::AudioOptions options;
options.audio_network_adaptor = absl::nullopt;
- // Unvalued |options.audio_network_adaptor|.should not reset audio network
+ // Unvalued `options.audio_network_adaptor` should not reset audio network
// adaptor.
SetAudioSend(kSsrcX, true, nullptr, &options);
// AudioSendStream not expected to be recreated.
diff --git a/media/sctp/sctp_transport_internal.h b/media/sctp/sctp_transport_internal.h
index e44efb5..93a59b9 100644
--- a/media/sctp/sctp_transport_internal.h
+++ b/media/sctp/sctp_transport_internal.h
@@ -119,7 +119,7 @@
// Send data down this channel (will be wrapped as SCTP packets then given to
// usrsctp that will then post the network interface).
// Returns true iff successful data somewhere on the send-queue/network.
- // Uses |params.ssrc| as the SCTP sid.
+ // Uses `params.ssrc` as the SCTP sid.
virtual bool SendData(int sid,
const webrtc::SendDataParams& params,
const rtc::CopyOnWriteBuffer& payload,
diff --git a/modules/audio_coding/acm2/acm_receiver.h b/modules/audio_coding/acm2/acm_receiver.h
index 9963603..18b662a 100644
--- a/modules/audio_coding/acm2/acm_receiver.h
+++ b/modules/audio_coding/acm2/acm_receiver.h
@@ -180,7 +180,7 @@
// of NACK list are in the range of [N - `max_nack_list_size`, N).
//
// `max_nack_list_size` should be positive (none zero) and less than or
- // equal to |Nack::kNackListSizeLimit|. Otherwise, No change is applied and -1
+ // equal to `Nack::kNackListSizeLimit`. Otherwise, No change is applied and -1
// is returned. 0 is returned at success.
//
int EnableNack(size_t max_nack_list_size);
diff --git a/modules/audio_coding/acm2/audio_coding_module.cc b/modules/audio_coding/acm2/audio_coding_module.cc
index d629139..8ba1b9f 100644
--- a/modules/audio_coding/acm2/audio_coding_module.cc
+++ b/modules/audio_coding/acm2/audio_coding_module.cc
@@ -229,7 +229,7 @@
const InputData& input_data,
absl::optional<int64_t> absolute_capture_timestamp_ms) {
// TODO(bugs.webrtc.org/10739): add dcheck that
- // |audio_frame.absolute_capture_timestamp_ms()| always has a value.
+ // `audio_frame.absolute_capture_timestamp_ms()` always has a value.
AudioEncoder::EncodedInfo encoded_info;
uint8_t previous_pltype;
@@ -333,7 +333,7 @@
MutexLock lock(&acm_mutex_);
int r = Add10MsDataInternal(audio_frame, &input_data_);
// TODO(bugs.webrtc.org/10739): add dcheck that
- // |audio_frame.absolute_capture_timestamp_ms()| always has a value.
+ // `audio_frame.absolute_capture_timestamp_ms()` always has a value.
return r < 0
? r
: Encode(input_data_, audio_frame.absolute_capture_timestamp_ms());
diff --git a/modules/audio_coding/audio_network_adaptor/bitrate_controller_unittest.cc b/modules/audio_coding/audio_network_adaptor/bitrate_controller_unittest.cc
index 76f52ad..3155f19 100644
--- a/modules/audio_coding/audio_network_adaptor/bitrate_controller_unittest.cc
+++ b/modules/audio_coding/audio_network_adaptor/bitrate_controller_unittest.cc
@@ -85,7 +85,7 @@
1000 /
kInitialFrameLengthMs;
// Frame length unchanged, bitrate changes in accordance with
- // |metrics.target_audio_bitrate_bps| and |metrics.overhead_bytes_per_packet|.
+ // `metrics.target_audio_bitrate_bps` and `metrics.overhead_bytes_per_packet`.
UpdateNetworkMetrics(&controller, kTargetBitrateBps, kOverheadBytesPerPacket);
CheckDecision(&controller, kInitialFrameLengthMs, kBitrateBps);
}
diff --git a/modules/audio_coding/audio_network_adaptor/config.proto b/modules/audio_coding/audio_network_adaptor/config.proto
index 4f8b2c7..63b220d 100644
--- a/modules/audio_coding/audio_network_adaptor/config.proto
+++ b/modules/audio_coding/audio_network_adaptor/config.proto
@@ -169,7 +169,7 @@
// Shorter distance means higher significance. The significances of
// controllers determine their order in the processing pipeline. Controllers
// without `scoring_point` follow their default order in
- // |ControllerManager::controllers|.
+ // `ControllerManager::controllers`.
optional ScoringPoint scoring_point = 1;
oneof controller {
diff --git a/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based_unittest.cc b/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based_unittest.cc
index 355431a..743b087 100644
--- a/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based_unittest.cc
+++ b/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based_unittest.cc
@@ -101,7 +101,7 @@
}
// Checks that the FEC decision and `uplink_packet_loss_fraction` given by
-// |states->controller->MakeDecision| matches `expected_enable_fec` and
+// `states->controller->MakeDecision` matches `expected_enable_fec` and
// `expected_uplink_packet_loss_fraction`, respectively.
void CheckDecision(FecControllerPlrBasedTestStates* states,
bool expected_enable_fec,
diff --git a/modules/audio_coding/codecs/cng/webrtc_cng.cc b/modules/audio_coding/codecs/cng/webrtc_cng.cc
index bfe77c7..48f1b8c 100644
--- a/modules/audio_coding/codecs/cng/webrtc_cng.cc
+++ b/modules/audio_coding/codecs/cng/webrtc_cng.cc
@@ -195,7 +195,7 @@
/* `lpPoly` - Coefficients in Q12.
* `excitation` - Speech samples.
- * |nst->dec_filtstate| - State preservation.
+ * `nst->dec_filtstate` - State preservation.
* `out_data` - Filtered speech samples. */
WebRtcSpl_FilterAR(lpPoly, WEBRTC_CNG_MAX_LPC_ORDER + 1, excitation,
num_samples, dec_filtstate_, WEBRTC_CNG_MAX_LPC_ORDER,
diff --git a/modules/audio_coding/codecs/isac/main/source/pitch_filter.c b/modules/audio_coding/codecs/isac/main/source/pitch_filter.c
index 899d842..bf03dff 100644
--- a/modules/audio_coding/codecs/isac/main/source/pitch_filter.c
+++ b/modules/audio_coding/codecs/isac/main/source/pitch_filter.c
@@ -140,9 +140,9 @@
int j;
double sum;
double sum2;
- /* Index of |parameters->buffer| where the output is written to. */
+ /* Index of `parameters->buffer` where the output is written to. */
int pos = parameters->index + PITCH_BUFFSIZE;
- /* Index of |parameters->buffer| where samples are read for fractional-lag
+ /* Index of `parameters->buffer` where samples are read for fractional-lag
* computation. */
int pos_lag = pos - parameters->lag_offset;
@@ -174,9 +174,9 @@
/* Filter for fractional pitch. */
sum2 = 0.0;
for (m = PITCH_FRACORDER-1; m >= m_tmp; --m) {
- /* |lag_index + m| is always larger than or equal to zero, see how
+ /* `lag_index + m` is always larger than or equal to zero, see how
* m_tmp is computed. This is equivalent to assume samples outside
- * |out_dg[j]| are zero. */
+ * `out_dg[j]` are zero. */
sum2 += out_dg[j][lag_index + m] * parameters->interpol_coeff[m];
}
/* Add the contribution of differential gain change. */
diff --git a/modules/audio_coding/codecs/opus/audio_encoder_opus.h b/modules/audio_coding/codecs/opus/audio_encoder_opus.h
index ab954fe..c7ee4f4 100644
--- a/modules/audio_coding/codecs/opus/audio_encoder_opus.h
+++ b/modules/audio_coding/codecs/opus/audio_encoder_opus.h
@@ -139,7 +139,7 @@
absl::optional<int64_t> link_capacity_allocation);
// TODO(minyue): remove "override" when we can deprecate
- // |AudioEncoder::SetTargetBitrate|.
+ // `AudioEncoder::SetTargetBitrate`.
void SetTargetBitrate(int target_bps) override;
void ApplyAudioNetworkAdaptor();
diff --git a/modules/audio_coding/codecs/opus/opus_unittest.cc b/modules/audio_coding/codecs/opus/opus_unittest.cc
index b507a32..b40d738 100644
--- a/modules/audio_coding/codecs/opus/opus_unittest.cc
+++ b/modules/audio_coding/codecs/opus/opus_unittest.cc
@@ -116,7 +116,7 @@
void TestCbrEffect(bool dtx, int block_length_ms);
// Prepare `speech_data_` for encoding, read from a hard-coded file.
- // After preparation, |speech_data_.GetNextBlock()| returns a pointer to a
+ // After preparation, `speech_data_.GetNextBlock()` returns a pointer to a
// block of `block_length_ms` milliseconds. The data is looped every
// `loop_length_ms` milliseconds.
void PrepareSpeechData(int block_length_ms, int loop_length_ms);
diff --git a/modules/audio_coding/neteq/neteq_impl_unittest.cc b/modules/audio_coding/neteq/neteq_impl_unittest.cc
index 875e62c..b0fee47 100644
--- a/modules/audio_coding/neteq/neteq_impl_unittest.cc
+++ b/modules/audio_coding/neteq/neteq_impl_unittest.cc
@@ -510,7 +510,7 @@
EXPECT_EQ(1u, output.num_channels_);
EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
- // Verify |output.packet_infos_|.
+ // Verify `output.packet_infos_`.
ASSERT_THAT(output.packet_infos_, SizeIs(1));
{
const auto& packet_info = output.packet_infos_[0];
@@ -602,7 +602,7 @@
EXPECT_EQ(1u, output.num_channels_);
EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
- // Verify |output.packet_infos_|.
+ // Verify `output.packet_infos_`.
ASSERT_THAT(output.packet_infos_, SizeIs(1));
{
const auto& packet_info = output.packet_infos_[0];
@@ -648,7 +648,7 @@
// out-of-order packet should have been discarded.
EXPECT_TRUE(packet_buffer_->Empty());
- // Verify |output.packet_infos_|. Expect to only see the second packet.
+ // Verify `output.packet_infos_`. Expect to only see the second packet.
ASSERT_THAT(output.packet_infos_, SizeIs(1));
{
const auto& packet_info = output.packet_infos_[0];
diff --git a/modules/audio_device/fine_audio_buffer.h b/modules/audio_device/fine_audio_buffer.h
index 99f282c1..a6c3042 100644
--- a/modules/audio_device/fine_audio_buffer.h
+++ b/modules/audio_device/fine_audio_buffer.h
@@ -42,8 +42,8 @@
bool IsReadyForPlayout() const;
bool IsReadyForRecord() const;
- // Copies audio samples into `audio_buffer` where number of requested
- // elements is specified by |audio_buffer.size()|. The producer will always
+ // Copies audio samples into `audio_buffer` where number of requested
+ // elements is specified by `audio_buffer.size()`. The producer will always
// fill up the audio buffer and if no audio exists, the buffer will contain
// silence instead. The provided delay estimate in `playout_delay_ms` should
// contain an estimate of the latency between when an audio frame is read from
diff --git a/modules/audio_device/win/core_audio_base_win.cc b/modules/audio_device/win/core_audio_base_win.cc
index 12c5146..c42c091 100644
--- a/modules/audio_device/win/core_audio_base_win.cc
+++ b/modules/audio_device/win/core_audio_base_win.cc
@@ -448,7 +448,7 @@
// - HDAudio driver
// - kEnableLowLatencyIfSupported changed from false (default) to true.
// TODO(henrika): IsLowLatencySupported() returns AUDCLNT_E_UNSUPPORTED_FORMAT
- // when |sample_rate_.has_value()| returns true if rate conversion is
+ // when `sample_rate_.has_value()` returns true if rate conversion is
// actually required (i.e., client asks for other than the default rate).
bool low_latency_support = false;
uint32_t min_period_in_frames = 0;
diff --git a/modules/audio_processing/agc2/rnn_vad/pitch_search_internal.cc b/modules/audio_processing/agc2/rnn_vad/pitch_search_internal.cc
index 4000e33..e8c9125 100644
--- a/modules/audio_processing/agc2/rnn_vad/pitch_search_internal.cc
+++ b/modules/audio_processing/agc2/rnn_vad/pitch_search_internal.cc
@@ -250,24 +250,24 @@
RTC_DCHECK_GE(initial.period, 0);
RTC_DCHECK_GE(alternative.period, 0);
RTC_DCHECK_GE(period_divisor, 2);
- // Compute a term that lowers the threshold when |alternative.period| is close
- // to the last estimated period |last.period| - i.e., pitch tracking.
+ // Compute a term that lowers the threshold when `alternative.period` is close
+ // to the last estimated period `last.period` - i.e., pitch tracking.
float lower_threshold_term = 0.f;
if (std::abs(alternative.period - last.period) <= 1) {
// The candidate pitch period is within 1 sample from the last one.
- // Make the candidate at |alternative.period| very easy to be accepted.
+ // Make the candidate at `alternative.period` very easy to be accepted.
lower_threshold_term = last.strength;
} else if (std::abs(alternative.period - last.period) == 2 &&
initial.period >
kInitialPitchPeriodThresholds[period_divisor - 2]) {
// The candidate pitch period is 2 samples far from the last one and the
- // period |initial.period| (from which |alternative.period| has been
- // derived) is greater than a threshold. Make |alternative.period| easy to
+ // period `initial.period` (from which `alternative.period` has been
+ // derived) is greater than a threshold. Make `alternative.period` easy to
// be accepted.
lower_threshold_term = 0.5f * last.strength;
}
// Set the threshold based on the strength of the initial estimate
- // |initial.period|. Also reduce the chance of false positives caused by a
+ // `initial.period`. Also reduce the chance of false positives caused by a
// bias towards high frequencies (originating from short-term correlations).
float threshold =
std::max(0.3f, 0.7f * initial.strength - lower_threshold_term);
@@ -457,7 +457,7 @@
alternative_pitch.period = GetAlternativePitchPeriod(
initial_pitch.period, /*multiplier=*/1, period_divisor);
RTC_DCHECK_GE(alternative_pitch.period, kMinPitch24kHz);
- // When looking at |alternative_pitch.period|, we also look at one of its
+ // When looking at `alternative_pitch.period`, we also look at one of its
// sub-harmonics. `kSubHarmonicMultipliers` is used to know where to look.
// `period_divisor` == 2 is a special case since `dual_alternative_period`
// might be greater than the maximum pitch period.
@@ -472,7 +472,7 @@
<< "The lower pitch period and the additional sub-harmonic must not "
"coincide.";
// Compute an auto-correlation score for the primary pitch candidate
- // |alternative_pitch.period| by also looking at its possible sub-harmonic
+ // `alternative_pitch.period` by also looking at its possible sub-harmonic
// `dual_alternative_period`.
const float xy_primary_period = ComputeAutoCorrelation(
kMaxPitch24kHz - alternative_pitch.period, pitch_buffer, vector_math);
diff --git a/modules/audio_processing/gain_controller2_unittest.cc b/modules/audio_processing/gain_controller2_unittest.cc
index b1ab00e..8f65a89 100644
--- a/modules/audio_processing/gain_controller2_unittest.cc
+++ b/modules/audio_processing/gain_controller2_unittest.cc
@@ -310,7 +310,7 @@
GainController2,
FixedDigitalTest,
::testing::Values(
- // When gain < |test::kLimiterMaxInputLevelDbFs|, the limiter will not
+ // When gain < `test::kLimiterMaxInputLevelDbFs`, the limiter will not
// saturate the signal (at any sample rate).
FixedDigitalTestParams(0.1f,
test::kLimiterMaxInputLevelDbFs - 0.01f,
@@ -320,7 +320,7 @@
test::kLimiterMaxInputLevelDbFs - 0.01f,
48000,
false),
- // When gain > |test::kLimiterMaxInputLevelDbFs|, the limiter will
+ // When gain > `test::kLimiterMaxInputLevelDbFs`, the limiter will
// saturate the signal (at any sample rate).
FixedDigitalTestParams(test::kLimiterMaxInputLevelDbFs + 0.01f,
10.f,
diff --git a/modules/audio_processing/include/audio_processing.h b/modules/audio_processing/include/audio_processing.h
index 047776b..6761ed4 100644
--- a/modules/audio_processing/include/audio_processing.h
+++ b/modules/audio_processing/include/audio_processing.h
@@ -570,8 +570,8 @@
// The int16 interfaces require:
// - only `NativeRate`s be used
// - that the input, output and reverse rates must match
- // - that |processing_config.output_stream()| matches
- // |processing_config.input_stream()|.
+ // - that `processing_config.output_stream()` matches
+ // `processing_config.input_stream()`.
//
// The float interfaces accept arbitrary rates and support differing input and
// output layouts, but the output must have either one channel or the same
diff --git a/modules/audio_processing/test/py_quality_assessment/quality_assessment/export.py b/modules/audio_processing/test/py_quality_assessment/quality_assessment/export.py
index 0affbed..fe3a6c7 100644
--- a/modules/audio_processing/test/py_quality_assessment/quality_assessment/export.py
+++ b/modules/audio_processing/test/py_quality_assessment/quality_assessment/export.py
@@ -349,7 +349,7 @@
def _SliceDataForScoreTableCell(self, score_name, apm_config,
test_data_gen, test_data_gen_params):
- """Slices |self._scores_data_frame| to extract the data for a tab."""
+ """Slices `self._scores_data_frame` to extract the data for a tab."""
masks = []
masks.append(self._scores_data_frame.eval_score_name == score_name)
masks.append(self._scores_data_frame.apm_config == apm_config)
diff --git a/modules/desktop_capture/desktop_region.cc b/modules/desktop_capture/desktop_region.cc
index d4e7179..2c87c11 100644
--- a/modules/desktop_capture/desktop_region.cc
+++ b/modules/desktop_capture/desktop_region.cc
@@ -91,7 +91,7 @@
return;
// Top of the part of the `rect` that hasn't been inserted yet. Increased as
- // we iterate over the rows until it reaches |rect.bottom()|.
+ // we iterate over the rows until it reaches `rect.bottom()`.
int top = rect.top();
// Iterate over all rows that may intersect with `rect` and add new rows when
@@ -456,7 +456,7 @@
// static
bool DesktopRegion::IsSpanInRow(const Row& row, const RowSpan& span) {
- // Find the first span that starts at or after |span.left| and then check if
+ // Find the first span that starts at or after `span.left` and then check if
// it's the same span.
RowSpanSet::const_iterator it = std::lower_bound(
row.spans.begin(), row.spans.end(), span.left, CompareSpanLeft);
diff --git a/modules/desktop_capture/win/wgc_capture_session.cc b/modules/desktop_capture/win/wgc_capture_session.cc
index 5caaaea..22dbf90 100644
--- a/modules/desktop_capture/win/wgc_capture_session.cc
+++ b/modules/desktop_capture/win/wgc_capture_session.cc
@@ -286,7 +286,7 @@
int image_width = std::min(previous_size_.Width, new_size.Width);
int row_data_length = image_width * DesktopFrame::kBytesPerPixel;
- // Make a copy of the data pointed to by |map_info.pData| so we are free to
+ // Make a copy of the data pointed to by `map_info.pData` so we are free to
// unmap our texture.
uint8_t* src_data = static_cast<uint8_t*>(map_info.pData);
std::vector<uint8_t> image_data;
diff --git a/modules/pacing/round_robin_packet_queue.cc b/modules/pacing/round_robin_packet_queue.cc
index 1feb5a9..ef37e52 100644
--- a/modules/pacing/round_robin_packet_queue.cc
+++ b/modules/pacing/round_robin_packet_queue.cc
@@ -175,7 +175,7 @@
// Calculate the total amount of time spent by this packet in the queue
// while in a non-paused state. Note that the `pause_time_sum_ms_` was
- // subtracted from |packet.enqueue_time_ms| when the packet was pushed, and
+ // subtracted from `packet.enqueue_time_ms` when the packet was pushed, and
// by subtracting it now we effectively remove the time spent in in the
// queue while in a paused state.
TimeDelta time_in_non_paused_state =
diff --git a/modules/rtp_rtcp/source/absolute_capture_time_interpolator.h b/modules/rtp_rtcp/source/absolute_capture_time_interpolator.h
index a59e2b4..f5ec820 100644
--- a/modules/rtp_rtcp/source/absolute_capture_time_interpolator.h
+++ b/modules/rtp_rtcp/source/absolute_capture_time_interpolator.h
@@ -45,7 +45,7 @@
rtc::ArrayView<const uint32_t> csrcs);
// Returns a received header extension, an interpolated header extension, or
- // |absl::nullopt| if it's not possible to interpolate a header extension.
+ // `absl::nullopt` if it's not possible to interpolate a header extension.
absl::optional<AbsoluteCaptureTime> OnReceivePacket(
uint32_t source,
uint32_t rtp_timestamp,
diff --git a/modules/rtp_rtcp/source/absolute_capture_time_sender.h b/modules/rtp_rtcp/source/absolute_capture_time_sender.h
index 3deff3d..be5a77d 100644
--- a/modules/rtp_rtcp/source/absolute_capture_time_sender.h
+++ b/modules/rtp_rtcp/source/absolute_capture_time_sender.h
@@ -50,7 +50,7 @@
static uint32_t GetSource(uint32_t ssrc,
rtc::ArrayView<const uint32_t> csrcs);
- // Returns a header extension to be sent, or |absl::nullopt| if the header
+ // Returns a header extension to be sent, or `absl::nullopt` if the header
// extension shouldn't be sent.
absl::optional<AbsoluteCaptureTime> OnSendPacket(
uint32_t source,
diff --git a/modules/rtp_rtcp/source/fec_test_helper.h b/modules/rtp_rtcp/source/fec_test_helper.h
index 7a24ecf..92e09fd 100644
--- a/modules/rtp_rtcp/source/fec_test_helper.h
+++ b/modules/rtp_rtcp/source/fec_test_helper.h
@@ -113,7 +113,7 @@
// Creates a new RtpPacket with FEC payload and RED header. Does this by
// creating a new fake media AugmentedPacket, clears the marker bit and adds a
// RED header. Finally replaces the payload with the content of
- // |packet->data|.
+ // `packet->data`.
RtpPacketReceived BuildUlpfecRedPacket(
const ForwardErrorCorrection::Packet& packet);
};
diff --git a/modules/rtp_rtcp/source/rtcp_packet/transport_feedback_unittest.cc b/modules/rtp_rtcp/source/rtcp_packet/transport_feedback_unittest.cc
index 3f99b03..abaa078 100644
--- a/modules/rtp_rtcp/source/rtcp_packet/transport_feedback_unittest.cc
+++ b/modules/rtp_rtcp/source/rtcp_packet/transport_feedback_unittest.cc
@@ -138,7 +138,7 @@
};
// The following tests use FeedbackTester that simulates received packets as
-// specified by the parameters |received_seq[]| and |received_ts[]| (optional).
+// specified by the parameters `received_seq[]` and `received_ts[]` (optional).
// The following is verified in these tests:
// - Expected size of serialized packet.
// - Expected sequence numbers and receive deltas.
diff --git a/modules/rtp_rtcp/source/rtcp_receiver.cc b/modules/rtp_rtcp/source/rtcp_receiver.cc
index 762255c..32f442a 100644
--- a/modules/rtp_rtcp/source/rtcp_receiver.cc
+++ b/modules/rtp_rtcp/source/rtcp_receiver.cc
@@ -599,7 +599,7 @@
//
// We can calc RTT if we send a send report and get a report block back.
- // |report_block.source_ssrc()| is the SSRC identifier of the source to
+ // `report_block.source_ssrc()` is the SSRC identifier of the source to
// which the information in this reception report block pertains.
// Filter out all report blocks that are not for us.
@@ -957,7 +957,7 @@
entry->tmmbr_item = rtcp::TmmbItem(sender_ssrc, request.bitrate_bps(),
request.packet_overhead());
// FindOrCreateTmmbrInfo always sets `last_time_received_ms` to
- // |clock_->TimeInMilliseconds()|.
+ // `clock_->TimeInMilliseconds()`.
entry->last_updated_ms = tmmbr_info->last_time_received_ms;
packet_information->packet_type_flags |= kRtcpTmmbr;
diff --git a/modules/rtp_rtcp/source/rtcp_receiver_unittest.cc b/modules/rtp_rtcp/source/rtcp_receiver_unittest.cc
index e61ae64..585d698 100644
--- a/modules/rtp_rtcp/source/rtcp_receiver_unittest.cc
+++ b/modules/rtp_rtcp/source/rtcp_receiver_unittest.cc
@@ -1335,7 +1335,7 @@
const int64_t kUtcNowUs = 42;
// The "report_block_timestamp_utc_us" is obtained from the global UTC clock
- // (not the simulcated |mocks.clock|) and requires a scoped fake clock.
+ // (not the simulcated `mocks.clock`) and requires a scoped fake clock.
rtc::ScopedFakeClock fake_clock;
fake_clock.SetTime(Timestamp::Micros(kUtcNowUs));
diff --git a/modules/rtp_rtcp/source/rtcp_transceiver.h b/modules/rtp_rtcp/source/rtcp_transceiver.h
index 862d4be..20fda94 100644
--- a/modules/rtp_rtcp/source/rtcp_transceiver.h
+++ b/modules/rtp_rtcp/source/rtcp_transceiver.h
@@ -47,7 +47,7 @@
void Stop(std::function<void()> on_destroyed);
// Registers observer to be notified about incoming rtcp packets.
- // Calls to observer will be done on the |config.task_queue|.
+ // Calls to observer will be done on the `config.task_queue`.
void AddMediaReceiverRtcpObserver(uint32_t remote_ssrc,
MediaReceiverRtcpObserver* observer);
// Deregisters the observer. Might return before observer is deregistered.
diff --git a/modules/rtp_rtcp/source/rtcp_transceiver_impl.cc b/modules/rtp_rtcp/source/rtcp_transceiver_impl.cc
index 5753ffd..0f29b4d 100644
--- a/modules/rtp_rtcp/source/rtcp_transceiver_impl.cc
+++ b/modules/rtp_rtcp/source/rtcp_transceiver_impl.cc
@@ -431,7 +431,7 @@
if (!config_.receive_statistics)
return {};
// TODO(danilchap): Support sending more than
- // |ReceiverReport::kMaxNumberOfReportBlocks| per compound rtcp packet.
+ // `ReceiverReport::kMaxNumberOfReportBlocks` per compound rtcp packet.
std::vector<rtcp::ReportBlock> report_blocks =
config_.receive_statistics->RtcpReportBlocks(
rtcp::ReceiverReport::kMaxNumberOfReportBlocks);
diff --git a/modules/rtp_rtcp/source/rtp_sender.cc b/modules/rtp_rtcp/source/rtp_sender.cc
index ccc72a6..707973f 100644
--- a/modules/rtp_rtcp/source/rtp_sender.cc
+++ b/modules/rtp_rtcp/source/rtp_sender.cc
@@ -693,7 +693,7 @@
continue;
}
- // Empty extensions should be supported, so not checking |source.empty()|.
+ // Empty extensions should be supported, so not checking `source.empty()`.
if (!packet.HasExtension(extension)) {
continue;
}
diff --git a/modules/rtp_rtcp/source/ulpfec_generator.cc b/modules/rtp_rtcp/source/ulpfec_generator.cc
index 2d585d7..20402fc 100644
--- a/modules/rtp_rtcp/source/ulpfec_generator.cc
+++ b/modules/rtp_rtcp/source/ulpfec_generator.cc
@@ -30,12 +30,12 @@
constexpr size_t kRedForFecHeaderLength = 1;
// This controls the maximum amount of excess overhead (actual - target)
-// allowed in order to trigger EncodeFec(), before |params_.max_fec_frames|
+// allowed in order to trigger EncodeFec(), before `params_.max_fec_frames`
// is reached. Overhead here is defined as relative to number of media packets.
constexpr int kMaxExcessOverhead = 50; // Q8.
// This is the minimum number of media packets required (above some protection
-// level) in order to trigger EncodeFec(), before |params_.max_fec_frames| is
+// level) in order to trigger EncodeFec(), before `params_.max_fec_frames` is
// reached.
constexpr size_t kMinMediaPackets = 4;
@@ -146,7 +146,7 @@
auto params = CurrentParams();
- // Produce FEC over at most |params_.max_fec_frames| frames, or as soon as:
+ // Produce FEC over at most `params_.max_fec_frames` frames, or as soon as:
// (1) the excess overhead (actual overhead - requested/target overhead) is
// less than `kMaxExcessOverhead`, and
// (2) at least `min_num_media_packets_` media packets is reached.
diff --git a/modules/rtp_rtcp/source/ulpfec_generator.h b/modules/rtp_rtcp/source/ulpfec_generator.h
index c9924581..88a8b45 100644
--- a/modules/rtp_rtcp/source/ulpfec_generator.h
+++ b/modules/rtp_rtcp/source/ulpfec_generator.h
@@ -83,7 +83,7 @@
// Returns true if the excess overhead (actual - target) for the FEC is below
// the amount `kMaxExcessOverhead`. This effects the lower protection level
// cases and low number of media packets/frame. The target overhead is given
- // by |params_.fec_rate|, and is only achievable in the limit of large number
+ // by `params_.fec_rate`, and is only achievable in the limit of large number
// of media packets.
bool ExcessOverheadBelowMax() const;
diff --git a/modules/video_coding/codecs/h264/h264_decoder_impl.cc b/modules/video_coding/codecs/h264/h264_decoder_impl.cc
index e696987..11d36b7 100644
--- a/modules/video_coding/codecs/h264/h264_decoder_impl.cc
+++ b/modules/video_coding/codecs/h264/h264_decoder_impl.cc
@@ -80,9 +80,9 @@
RTC_CHECK(context->pix_fmt == kPixelFormatDefault ||
context->pix_fmt == kPixelFormatFullRange);
- // |av_frame->width| and |av_frame->height| are set by FFmpeg. These are the
- // actual image's dimensions and may be different from |context->width| and
- // |context->coded_width| due to reordering.
+ // `av_frame->width` and `av_frame->height` are set by FFmpeg. These are the
+ // actual image's dimensions and may be different from `context->width` and
+ // `context->coded_width` due to reordering.
int width = av_frame->width;
int height = av_frame->height;
// See `lowres`, if used the decoder scales the image by 1/2^(lowres). This
@@ -201,7 +201,7 @@
av_context_->extradata = nullptr;
av_context_->extradata_size = 0;
- // If this is ever increased, look at |av_context_->thread_safe_callbacks| and
+ // If this is ever increased, look at `av_context_->thread_safe_callbacks` and
// make it possible to disable the thread checker in the frame buffer pool.
av_context_->thread_count = 1;
av_context_->thread_type = FF_THREAD_SLICE;
diff --git a/modules/video_coding/codecs/h264/h264_decoder_impl.h b/modules/video_coding/codecs/h264/h264_decoder_impl.h
index 6ba4eb7..2c90a40 100644
--- a/modules/video_coding/codecs/h264/h264_decoder_impl.h
+++ b/modules/video_coding/codecs/h264/h264_decoder_impl.h
@@ -61,7 +61,7 @@
~H264DecoderImpl() override;
// If `codec_settings` is NULL it is ignored. If it is not NULL,
- // |codec_settings->codecType| must be `kVideoCodecH264`.
+ // `codec_settings->codecType` must be `kVideoCodecH264`.
int32_t InitDecode(const VideoCodec* codec_settings,
int32_t number_of_cores) override;
int32_t Release() override;
diff --git a/modules/video_coding/codecs/h264/h264_encoder_impl.cc b/modules/video_coding/codecs/h264/h264_encoder_impl.cc
index 4e78d1e..887aa58 100644
--- a/modules/video_coding/codecs/h264/h264_encoder_impl.cc
+++ b/modules/video_coding/codecs/h264/h264_encoder_impl.cc
@@ -89,13 +89,13 @@
// Helper method used by H264EncoderImpl::Encode.
// Copies the encoded bytes from `info` to `encoded_image`. The
-// |encoded_image->_buffer| may be deleted and reallocated if a bigger buffer is
+// `encoded_image->_buffer` may be deleted and reallocated if a bigger buffer is
// required.
//
// After OpenH264 encoding, the encoded bytes are stored in `info` spread out
// over a number of layers and "NAL units". Each NAL unit is a fragment starting
// with the four-byte start code {0,0,0,1}. All of this data (including the
-// start codes) is copied to the |encoded_image->_buffer|.
+// start codes) is copied to the `encoded_image->_buffer`.
static void RtpFragmentize(EncodedImage* encoded_image, SFrameBSInfo* info) {
// Calculate minimum buffer size required to hold encoded data.
size_t required_capacity = 0;
@@ -115,7 +115,7 @@
encoded_image->SetEncodedData(buffer);
// Iterate layers and NAL units, note each NAL unit as a fragment and copy
- // the data to |encoded_image->_buffer|.
+ // the data to `encoded_image->_buffer`.
const uint8_t start_code[4] = {0, 0, 0, 1};
size_t frag = 0;
encoded_image->set_size(0);
@@ -489,7 +489,7 @@
RtpFragmentize(&encoded_images_[i], &info);
// Encoder can skip frames to save bandwidth in which case
- // |encoded_images_[i]._length| == 0.
+ // `encoded_images_[i]._length` == 0.
if (encoded_images_[i].size() > 0) {
// Parse QP.
h264_bitstream_parser_.ParseBitstream(encoded_images_[i]);
diff --git a/modules/video_coding/codecs/h264/h264_encoder_impl.h b/modules/video_coding/codecs/h264/h264_encoder_impl.h
index b96de10..1163464 100644
--- a/modules/video_coding/codecs/h264/h264_encoder_impl.h
+++ b/modules/video_coding/codecs/h264/h264_encoder_impl.h
@@ -57,7 +57,7 @@
explicit H264EncoderImpl(const cricket::VideoCodec& codec);
~H264EncoderImpl() override;
- // |settings.max_payload_size| is ignored.
+ // `settings.max_payload_size` is ignored.
// The following members of `codec_settings` are used. The rest are ignored.
// - codecType (must be kVideoCodecH264)
// - targetBitrate
diff --git a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
index 712a833..1f70569 100644
--- a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
+++ b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
@@ -1049,7 +1049,7 @@
error == WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT)) {
++num_tries;
// Note we must pass 0 for `flags` field in encode call below since they are
- // set above in |libvpx_interface_->vpx_codec_control_| function for each
+ // set above in `libvpx_interface_->vpx_codec_control_` function for each
// encoder/spatial layer.
error = libvpx_->codec_encode(&encoders_[0], &raw_images_[0], timestamp_,
duration, 0, VPX_DL_REALTIME);
diff --git a/modules/video_coding/codecs/vp9/libvpx_vp9_decoder.cc b/modules/video_coding/codecs/vp9/libvpx_vp9_decoder.cc
index 8d8cb95..0f8ade3 100644
--- a/modules/video_coding/codecs/vp9/libvpx_vp9_decoder.cc
+++ b/modules/video_coding/codecs/vp9/libvpx_vp9_decoder.cc
@@ -247,7 +247,7 @@
VPX_DL_REALTIME)) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
- // |img->fb_priv| contains the image data, a reference counted Vp9FrameBuffer.
+ // `img->fb_priv` contains the image data, a reference counted Vp9FrameBuffer.
// It may be released by libvpx during future vpx_codec_decode or
// vpx_codec_destroy calls.
img = vpx_codec_get_frame(decoder_, &iter);
diff --git a/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h b/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h
index 7c87d58..826e8d6 100644
--- a/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h
+++ b/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h
@@ -226,7 +226,7 @@
// Performance flags, ordered by `min_pixel_count`.
const PerformanceFlags performance_flags_;
// Caching of of `speed_configs_`, where index i maps to the resolution as
- // specified in |codec_.spatialLayer[i]|.
+ // specified in `codec_.spatialLayer[i]`.
std::vector<PerformanceFlags::ParameterSet>
performance_flags_by_spatial_index_;
void UpdatePerformanceFlags();
diff --git a/modules/video_coding/session_info.h b/modules/video_coding/session_info.h
index dc27a64..846352a 100644
--- a/modules/video_coding/session_info.h
+++ b/modules/video_coding/session_info.h
@@ -79,7 +79,7 @@
void InformOfEmptyPacket(uint16_t seq_num);
// Finds the packet of the beginning of the next VP8 partition. If
- // none is found the returned iterator points to |packets_.end()|.
+ // none is found the returned iterator points to `packets_.end()`.
// `it` is expected to point to the last packet of the previous partition,
// or to the first packet of the frame. `packets_skipped` is incremented
// for each packet found which doesn't have the beginning bit set.
diff --git a/p2p/base/p2p_transport_channel.h b/p2p/base/p2p_transport_channel.h
index 025cac2..28248e7 100644
--- a/p2p/base/p2p_transport_channel.h
+++ b/p2p/base/p2p_transport_channel.h
@@ -378,7 +378,7 @@
void SetReceiving(bool receiving);
// Clears the address and the related address fields of a local candidate to
// avoid IP leakage. This is applicable in several scenarios as commented in
- // |PortAllocator::SanitizeCandidate|.
+ // `PortAllocator::SanitizeCandidate`.
Candidate SanitizeLocalCandidate(const Candidate& c) const;
// Clears the address field of a remote candidate to avoid IP leakage. This is
// applicable in the following scenarios:
diff --git a/p2p/base/transport_description_factory_unittest.cc b/p2p/base/transport_description_factory_unittest.cc
index 08efe12..01120a8 100644
--- a/p2p/base/transport_description_factory_unittest.cc
+++ b/p2p/base/transport_description_factory_unittest.cc
@@ -291,25 +291,25 @@
}
// Test that ice ufrag and password is changed in an updated offer and answer
-// if |TransportDescriptionOptions::ice_restart| is true.
+// if `TransportDescriptionOptions::ice_restart` is true.
TEST_F(TransportDescriptionFactoryTest, TestIceRestart) {
TestIceRestart(false);
}
// Test that ice ufrag and password is changed in an updated offer and answer
-// if |TransportDescriptionOptions::ice_restart| is true and DTLS is enabled.
+// if `TransportDescriptionOptions::ice_restart` is true and DTLS is enabled.
TEST_F(TransportDescriptionFactoryTest, TestIceRestartWithDtls) {
TestIceRestart(true);
}
// Test that ice renomination is set in an updated offer and answer
-// if |TransportDescriptionOptions::enable_ice_renomination| is true.
+// if `TransportDescriptionOptions::enable_ice_renomination` is true.
TEST_F(TransportDescriptionFactoryTest, TestIceRenomination) {
TestIceRenomination(false);
}
// Test that ice renomination is set in an updated offer and answer
-// if |TransportDescriptionOptions::enable_ice_renomination| is true and DTLS
+// if `TransportDescriptionOptions::enable_ice_renomination` is true and DTLS
// is enabled.
TEST_F(TransportDescriptionFactoryTest, TestIceRenominationWithDtls) {
TestIceRenomination(true);
diff --git a/pc/dtmf_sender.h b/pc/dtmf_sender.h
index 5f20054..a208b10 100644
--- a/pc/dtmf_sender.h
+++ b/pc/dtmf_sender.h
@@ -42,7 +42,7 @@
// The `duration` indicates the length of the DTMF tone in ms.
// Returns true on success and false on failure.
virtual bool InsertDtmf(int code, int duration) = 0;
- // Returns a |sigslot::signal0<>| signal. The signal should fire before
+ // Returns a `sigslot::signal0<>` signal. The signal should fire before
// the provider is destroyed.
virtual sigslot::signal0<>* GetOnDestroyedSignal() = 0;
diff --git a/pc/ice_server_parsing.cc b/pc/ice_server_parsing.cc
index c1c8557..a38e28c 100644
--- a/pc/ice_server_parsing.cc
+++ b/pc/ice_server_parsing.cc
@@ -104,7 +104,7 @@
// This method parses IPv6 and IPv4 literal strings, along with hostnames in
// standard hostname:port format.
// Consider following formats as correct.
-// |hostname:port|, |[IPV6 address]:port|, |IPv4 address|:port,
+// `hostname:port`, |[IPV6 address]:port|, |IPv4 address|:port,
// `hostname`, |[IPv6 address]|, |IPv4 address|.
static bool ParseHostnameAndPortFromString(const std::string& in_str,
std::string* host,
diff --git a/pc/jsep_session_description.cc b/pc/jsep_session_description.cc
index 4c1a4e7..57ccf7c 100644
--- a/pc/jsep_session_description.cc
+++ b/pc/jsep_session_description.cc
@@ -104,7 +104,7 @@
// Combining the above considerations, we use 0.0.0.0 with port 9 to
// populate the c= and the m= lines. See `BuildMediaDescription` in
// webrtc_sdp.cc for the SDP generation with
- // |media_desc->connection_address()|.
+ // `media_desc->connection_address()`.
connection_addr = rtc::SocketAddress(kDummyAddress, kDummyPort);
}
media_desc->set_connection_address(connection_addr);
diff --git a/pc/jsep_transport.h b/pc/jsep_transport.h
index 5593122..e3e929b 100644
--- a/pc/jsep_transport.h
+++ b/pc/jsep_transport.h
@@ -323,7 +323,7 @@
RTC_GUARDED_BY(network_thread_);
// This is invoked when RTCP-mux becomes active and
- // |rtcp_dtls_transport_| is destroyed. The JsepTransportController will
+ // `rtcp_dtls_transport_` is destroyed. The JsepTransportController will
// receive the callback and update the aggregate transport states.
std::function<void()> rtcp_mux_active_callback_;
diff --git a/pc/media_session.cc b/pc/media_session.cc
index b66d7f6..4bbb877 100644
--- a/pc/media_session.cc
+++ b/pc/media_session.cc
@@ -1755,7 +1755,7 @@
ContentInfo& added = answer->contents().back();
if (!added.rejected && session_options.bundle_enabled &&
bundle_index.has_value()) {
- // The `bundle_index` is for |media_description_options.mid|.
+ // The `bundle_index` is for `media_description_options.mid`.
RTC_DCHECK_EQ(media_description_options.mid, added.name);
answer_bundles[bundle_index.value()].AddContentName(added.name);
bundle_transports[bundle_index.value()].reset(
diff --git a/pc/media_session_unittest.cc b/pc/media_session_unittest.cc
index fa08f40..a02b4c1 100644
--- a/pc/media_session_unittest.cc
+++ b/pc/media_session_unittest.cc
@@ -2719,7 +2719,7 @@
// offer/answer exchange plus the audio codecs only `f2_` offer, sorted in
// preference order.
// TODO(wu): `updated_offer` should not include the codec
- // (i.e. |kAudioCodecs2[0]|) the other side doesn't support.
+ // (i.e. `kAudioCodecs2[0]`) the other side doesn't support.
const AudioCodec kUpdatedAudioCodecOffer[] = {
kAudioCodecsAnswer[0],
kAudioCodecsAnswer[1],
diff --git a/pc/peer_connection_ice_unittest.cc b/pc/peer_connection_ice_unittest.cc
index 8726afb..a27d174 100644
--- a/pc/peer_connection_ice_unittest.cc
+++ b/pc/peer_connection_ice_unittest.cc
@@ -548,7 +548,7 @@
ASSERT_TRUE(
caller->SetRemoteDescription(callee->CreateAnswerAndSetAsLocal()));
- // |candidate.transport_name()| is empty.
+ // `candidate.transport_name()` is empty.
cricket::Candidate candidate = CreateLocalUdpCandidate(kCalleeAddress);
auto* audio_content = cricket::GetFirstAudioContent(
caller->pc()->local_description()->description());
@@ -1492,7 +1492,7 @@
ASSERT_TRUE(
caller->SetRemoteDescription(callee->CreateAnswerAndSetAsLocal()));
- // |candidate.transport_name()| is empty.
+ // `candidate.transport_name()` is empty.
cricket::Candidate candidate = CreateLocalUdpCandidate(kCalleeAddress);
auto* audio_content = cricket::GetFirstAudioContent(
caller->pc()->local_description()->description());
diff --git a/pc/peer_connection_integrationtest.cc b/pc/peer_connection_integrationtest.cc
index 0ab0e4d..31652ac 100644
--- a/pc/peer_connection_integrationtest.cc
+++ b/pc/peer_connection_integrationtest.cc
@@ -3194,7 +3194,7 @@
EXPECT_EQ_WAIT(webrtc::PeerConnectionInterface::kIceConnectionConnected,
callee()->ice_connection_state(), kDefaultTimeout);
// Note that we cannot use the metric
- // |WebRTC.PeerConnection.CandidatePairType_UDP| in this test since this
+ // `WebRTC.PeerConnection.CandidatePairType_UDP` in this test since this
// metric is only populated when we reach kIceConnectionComplete in the
// current implementation.
EXPECT_EQ(cricket::RELAY_PORT_TYPE,
diff --git a/pc/peer_connection_rtp_unittest.cc b/pc/peer_connection_rtp_unittest.cc
index 2822854..715546b 100644
--- a/pc/peer_connection_rtp_unittest.cc
+++ b/pc/peer_connection_rtp_unittest.cc
@@ -58,7 +58,7 @@
#include "test/gtest.h"
// This file contains tests for RTP Media API-related behavior of
-// |webrtc::PeerConnection|, see https://w3c.github.io/webrtc-pc/#rtp-media-api.
+// `webrtc::PeerConnection`, see https://w3c.github.io/webrtc-pc/#rtp-media-api.
namespace webrtc {
@@ -188,7 +188,7 @@
}
};
-// These tests cover |webrtc::PeerConnectionObserver| callbacks firing upon
+// These tests cover `webrtc::PeerConnectionObserver` callbacks firing upon
// setting the remote description.
TEST_P(PeerConnectionRtpTest, AddTrackWithoutStreamFiresOnAddTrack) {
@@ -1994,7 +1994,7 @@
if (sdp_semantics_ == SdpSemantics::kPlanB) {
// TODO(hbos): When https://crbug.com/webrtc/8734 is resolved, this should
- // return true, and doing |callee->SetRemoteDescription()| should work.
+ // return true, and doing `callee->SetRemoteDescription()` should work.
EXPECT_FALSE(caller->CreateOfferAndSetAsLocal());
} else {
EXPECT_TRUE(caller->CreateOfferAndSetAsLocal());
diff --git a/pc/rtc_stats_collector_unittest.cc b/pc/rtc_stats_collector_unittest.cc
index 3fc8b8e..8c4f0a6 100644
--- a/pc/rtc_stats_collector_unittest.cc
+++ b/pc/rtc_stats_collector_unittest.cc
@@ -1466,9 +1466,9 @@
expected_pair.responses_received = 4321;
expected_pair.responses_sent = 1000;
expected_pair.consent_requests_sent = (2020 - 2000);
- // |expected_pair.current_round_trip_time| should be undefined because the
+ // `expected_pair.current_round_trip_time` should be undefined because the
// current RTT is not set.
- // |expected_pair.available_[outgoing/incoming]_bitrate| should be undefined
+ // `expected_pair.available_[outgoing/incoming]_bitrate` should be undefined
// because is is not the current pair.
ASSERT_TRUE(report->Get(expected_pair.id()));
@@ -1768,7 +1768,7 @@
IdForType<RTCMediaStreamTrackStats>(report), report->timestamp_us(),
RTCMediaStreamTrackKind::kAudio);
expected_remote_audio_track.track_identifier = remote_audio_track->id();
- // |expected_remote_audio_track.media_source_id| should be undefined
+ // `expected_remote_audio_track.media_source_id` should be undefined
// because the track is remote.
expected_remote_audio_track.remote_source = true;
expected_remote_audio_track.ended = false;
@@ -1920,7 +1920,7 @@
RTCMediaStreamTrackKind::kVideo);
expected_remote_video_track_ssrc3.track_identifier =
remote_video_track_ssrc3->id();
- // |expected_remote_video_track_ssrc3.media_source_id| should be undefined
+ // `expected_remote_video_track_ssrc3.media_source_id` should be undefined
// because the track is remote.
expected_remote_video_track_ssrc3.remote_source = true;
expected_remote_video_track_ssrc3.ended = true;
@@ -2011,7 +2011,7 @@
expected_audio.header_bytes_received = 4;
expected_audio.packets_lost = -1;
expected_audio.packets_discarded = 7788;
- // |expected_audio.last_packet_received_timestamp| should be undefined.
+ // `expected_audio.last_packet_received_timestamp` should be undefined.
expected_audio.jitter = 4.5;
expected_audio.jitter_buffer_delay = 1.0;
expected_audio.jitter_buffer_emitted_count = 2;
@@ -2116,16 +2116,16 @@
expected_video.frames_decoded = 9;
expected_video.key_frames_decoded = 3;
expected_video.frames_dropped = 13;
- // |expected_video.qp_sum| should be undefined.
+ // `expected_video.qp_sum` should be undefined.
expected_video.total_decode_time = 9.0;
expected_video.total_inter_frame_delay = 0.123;
expected_video.total_squared_inter_frame_delay = 0.00456;
expected_video.jitter = 1.199;
expected_video.jitter_buffer_delay = 3.456;
expected_video.jitter_buffer_emitted_count = 13;
- // |expected_video.last_packet_received_timestamp| should be undefined.
- // |expected_video.content_type| should be undefined.
- // |expected_video.decoder_implementation| should be undefined.
+ // `expected_video.last_packet_received_timestamp` should be undefined.
+ // `expected_video.content_type` should be undefined.
+ // `expected_video.decoder_implementation` should be undefined.
ASSERT_TRUE(report->Get(expected_video.id()));
EXPECT_EQ(
@@ -2189,7 +2189,7 @@
RTCOutboundRTPStreamStats expected_audio("RTCOutboundRTPAudioStream_1",
report->timestamp_us());
expected_audio.media_source_id = "RTCAudioSource_50";
- // |expected_audio.remote_id| should be undefined.
+ // `expected_audio.remote_id` should be undefined.
expected_audio.ssrc = 1;
expected_audio.media_type = "audio";
expected_audio.kind = "audio";
@@ -2275,7 +2275,7 @@
RTCOutboundRTPStreamStats expected_video(stats_of_my_type[0]->id(),
report->timestamp_us());
expected_video.media_source_id = "RTCVideoSource_50";
- // |expected_video.remote_id| should be undefined.
+ // `expected_video.remote_id` should be undefined.
expected_video.ssrc = 1;
expected_video.media_type = "video";
expected_video.kind = "video";
@@ -2305,9 +2305,9 @@
expected_video.frames_per_second = 10.0;
expected_video.frames_sent = 5;
expected_video.huge_frames_sent = 2;
- // |expected_video.content_type| should be undefined.
- // |expected_video.qp_sum| should be undefined.
- // |expected_video.encoder_implementation| should be undefined.
+ // `expected_video.content_type` should be undefined.
+ // `expected_video.qp_sum` should be undefined.
+ // `expected_video.encoder_implementation` should be undefined.
ASSERT_TRUE(report->Get(expected_video.id()));
EXPECT_EQ(
@@ -2889,7 +2889,7 @@
report_block_data.SetReportBlock(report_block, kReportBlockTimestampUtcUs);
report_block_data.AddRoundTripTimeSample(kRoundTripTimeSample1Ms);
// Only the last sample should be exposed as the
- // |RTCRemoteInboundRtpStreamStats::round_trip_time|.
+ // `RTCRemoteInboundRtpStreamStats::round_trip_time`.
report_block_data.AddRoundTripTimeSample(kRoundTripTimeSample2Ms);
report_block_datas.push_back(report_block_data);
}
diff --git a/pc/rtp_sender.cc b/pc/rtp_sender.cc
index 9883945..d428637 100644
--- a/pc/rtp_sender.cc
+++ b/pc/rtp_sender.cc
@@ -538,7 +538,7 @@
}
#endif
- // |track_->enabled()| hops to the signaling thread, so call it before we hop
+ // `track_->enabled()` hops to the signaling thread, so call it before we hop
// to the worker thread or else it will deadlock.
bool track_enabled = track_->enabled();
bool success = worker_thread_->Invoke<bool>(RTC_FROM_HERE, [&] {
diff --git a/pc/sdp_offer_answer.cc b/pc/sdp_offer_answer.cc
index 22bf377..c167f35 100644
--- a/pc/sdp_offer_answer.cc
+++ b/pc/sdp_offer_answer.cc
@@ -4091,7 +4091,7 @@
// Find new and active senders.
for (const cricket::StreamParams& params : streams) {
- // The sync_label is the MediaStream label and the |stream.id| is the
+ // The sync_label is the MediaStream label and the `stream.id` is the
// sender id.
const std::string& stream_id = params.first_stream_id();
const std::string& sender_id = params.id;
@@ -4154,8 +4154,8 @@
break;
}
- // |params.id| is the sender id and the stream id uses the first of
- // |params.stream_ids|. The remote description could come from a Unified
+ // `params.id` is the sender id and the stream id uses the first of
+ // `params.stream_ids`. The remote description could come from a Unified
// Plan endpoint, with multiple or no stream_ids() signaled. Since this is
// not supported in Plan B, we just take the first here and create the
// default stream ID if none is specified.
diff --git a/pc/test/fake_rtc_certificate_generator.h b/pc/test/fake_rtc_certificate_generator.h
index b591c4c..fc931ad 100644
--- a/pc/test/fake_rtc_certificate_generator.h
+++ b/pc/test/fake_rtc_certificate_generator.h
@@ -83,9 +83,9 @@
// ECDSA with EC_NIST_P256.
// These PEM strings were created by generating an identity with
-// |SSLIdentity::Create| and invoking |identity->PrivateKeyToPEMString()|,
-// |identity->PublicKeyToPEMString()| and
-// |identity->certificate().ToPEMString()|.
+// `SSLIdentity::Create` and invoking `identity->PrivateKeyToPEMString()`,
+// `identity->PublicKeyToPEMString()` and
+// `identity->certificate().ToPEMString()`.
static const rtc::RTCCertificatePEM kEcdsaPems[] = {
rtc::RTCCertificatePEM(
"-----BEGIN PRIVATE KEY-----\n"
diff --git a/pc/webrtc_session_description_factory.h b/pc/webrtc_session_description_factory.h
index d0b3ad7..8e80fb5 100644
--- a/pc/webrtc_session_description_factory.h
+++ b/pc/webrtc_session_description_factory.h
@@ -41,7 +41,7 @@
class WebRtcCertificateGeneratorCallback
: public rtc::RTCCertificateGeneratorCallback {
public:
- // |rtc::RTCCertificateGeneratorCallback| overrides.
+ // `rtc::RTCCertificateGeneratorCallback` overrides.
void OnSuccess(
const rtc::scoped_refptr<rtc::RTCCertificate>& certificate) override;
void OnFailure() override;
diff --git a/rtc_base/containers/void_t.h b/rtc_base/containers/void_t.h
index 62c57d4..149fc70 100644
--- a/rtc_base/containers/void_t.h
+++ b/rtc_base/containers/void_t.h
@@ -25,7 +25,7 @@
// webrtc::void_t is an implementation of std::void_t from C++17.
//
-// We use |webrtc::void_t_internal::make_void| as a helper struct to avoid a
+// We use `webrtc::void_t_internal::make_void` as a helper struct to avoid a
// C++14 defect:
// http://en.cppreference.com/w/cpp/types/void_t
// http://open-std.org/JTC1/SC22/WG21/docs/cwg_defects.html#1558
diff --git a/rtc_base/rtc_certificate.h b/rtc_base/rtc_certificate.h
index 882c735..0102c4f 100644
--- a/rtc_base/rtc_certificate.h
+++ b/rtc_base/rtc_certificate.h
@@ -31,7 +31,7 @@
// certificate and acts as a text representation of RTCCertificate. Certificates
// can be serialized and deserialized to and from this format, which allows for
// cloning and storing of certificates to disk. The PEM format is that of
-// |SSLIdentity::PrivateKeyToPEMString| and |SSLCertificate::ToPEMString|, e.g.
+// `SSLIdentity::PrivateKeyToPEMString` and `SSLCertificate::ToPEMString`, e.g.
// the string representations used by OpenSSL.
class RTCCertificatePEM {
public:
diff --git a/rtc_base/rtc_certificate_generator.cc b/rtc_base/rtc_certificate_generator.cc
index 3a59781..16ff23c 100644
--- a/rtc_base/rtc_certificate_generator.cc
+++ b/rtc_base/rtc_certificate_generator.cc
@@ -51,7 +51,7 @@
expires_s = std::min(expires_s, kYearInSeconds);
// TODO(torbjorng): Stop using `time_t`, its type is unspecified. It it safe
// to assume it can hold up to a year's worth of seconds (and more), but
- // |SSLIdentity::Create| should stop relying on `time_t`.
+ // `SSLIdentity::Create` should stop relying on `time_t`.
// See bugs.webrtc.org/5720.
time_t cert_lifetime_s = static_cast<time_t>(expires_s);
identity = SSLIdentity::Create(kIdentityName, key_params, cert_lifetime_s);
diff --git a/rtc_base/rtc_certificate_generator.h b/rtc_base/rtc_certificate_generator.h
index ee68b27..065b8b5 100644
--- a/rtc_base/rtc_certificate_generator.h
+++ b/rtc_base/rtc_certificate_generator.h
@@ -23,7 +23,7 @@
namespace rtc {
-// See |RTCCertificateGeneratorInterface::GenerateCertificateAsync|.
+// See `RTCCertificateGeneratorInterface::GenerateCertificateAsync`.
class RTCCertificateGeneratorCallback : public RefCountInterface {
public:
virtual void OnSuccess(const scoped_refptr<RTCCertificate>& certificate) = 0;
diff --git a/rtc_base/ssl_certificate.cc b/rtc_base/ssl_certificate.cc
index 3f12c79..ed42998 100644
--- a/rtc_base/ssl_certificate.cc
+++ b/rtc_base/ssl_certificate.cc
@@ -49,15 +49,15 @@
std::unique_ptr<SSLCertificateStats> SSLCertificate::GetStats() const {
// TODO(bemasc): Move this computation to a helper class that caches these
- // values to reduce CPU use in |StatsCollector::GetStats|. This will require
- // adding a fast |SSLCertificate::Equals| to detect certificate changes.
+ // values to reduce CPU use in `StatsCollector::GetStats`. This will require
+ // adding a fast `SSLCertificate::Equals` to detect certificate changes.
std::string digest_algorithm;
if (!GetSignatureDigestAlgorithm(&digest_algorithm))
return nullptr;
- // |SSLFingerprint::Create| can fail if the algorithm returned by
- // |SSLCertificate::GetSignatureDigestAlgorithm| is not supported by the
- // implementation of |SSLCertificate::ComputeDigest|. This currently happens
+ // `SSLFingerprint::Create` can fail if the algorithm returned by
+ // `SSLCertificate::GetSignatureDigestAlgorithm` is not supported by the
+ // implementation of `SSLCertificate::ComputeDigest`. This currently happens
// with MD5- and SHA-224-signed certificates when linked to libNSS.
std::unique_ptr<SSLFingerprint> ssl_fingerprint =
SSLFingerprint::Create(digest_algorithm, *this);
diff --git a/rtc_base/ssl_identity_unittest.cc b/rtc_base/ssl_identity_unittest.cc
index a8be7963..53f4a2a 100644
--- a/rtc_base/ssl_identity_unittest.cc
+++ b/rtc_base/ssl_identity_unittest.cc
@@ -65,12 +65,12 @@
0x35, 0xce, 0x26, 0x58, 0x4a, 0x33, 0x6d, 0xbc, 0xb6};
// These PEM strings were created by generating an identity with
-// |SSLIdentity::Create| and invoking |identity->PrivateKeyToPEMString()|,
-// |identity->PublicKeyToPEMString()| and
-// |identity->certificate().ToPEMString()|. If the crypto library is updated,
+// `SSLIdentity::Create` and invoking `identity->PrivateKeyToPEMString()`,
+// `identity->PublicKeyToPEMString()` and
+// `identity->certificate().ToPEMString()`. If the crypto library is updated,
// and the update changes the string form of the keys, these will have to be
// updated too. The fingerprint, fingerprint algorithm and base64 certificate
-// were created by calling |identity->certificate().GetStats()|.
+// were created by calling `identity->certificate().GetStats()`.
static const char kRSA_PRIVATE_KEY_PEM[] =
"-----BEGIN PRIVATE KEY-----\n"
"MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAMQPqDStRlYeDpkX\n"
diff --git a/rtc_base/timestamp_aligner_unittest.cc b/rtc_base/timestamp_aligner_unittest.cc
index 0a050ff..ca91b62 100644
--- a/rtc_base/timestamp_aligner_unittest.cc
+++ b/rtc_base/timestamp_aligner_unittest.cc
@@ -158,7 +158,7 @@
// Non-monotonic translated timestamps can happen when only for
// translated timestamps in the future. Which is tolerated if
- // |timestamp_aligner.clip_bias_us| is large enough. Instead of
+ // `timestamp_aligner.clip_bias_us` is large enough. Instead of
// changing that private member for this test, just add the bias to
// `kSystemTimeUs` when calling ClipTimestamp.
const int64_t kClipBiasUs = 100000;
diff --git a/sdk/android/api/org/webrtc/DataChannel.java b/sdk/android/api/org/webrtc/DataChannel.java
index 804915d..b9301f1 100644
--- a/sdk/android/api/org/webrtc/DataChannel.java
+++ b/sdk/android/api/org/webrtc/DataChannel.java
@@ -82,7 +82,7 @@
/** The data channel state has changed. */
@CalledByNative("Observer") public void onStateChange();
/**
- * A data buffer was successfully received. NOTE: |buffer.data| will be
+ * A data buffer was successfully received. NOTE: `buffer.data` will be
* freed once this function returns so callers who want to use the data
* asynchronously must make sure to copy it first.
*/
diff --git a/sdk/objc/api/peerconnection/RTCDataChannel.h b/sdk/objc/api/peerconnection/RTCDataChannel.h
index 6f4ef37..89eb58b 100644
--- a/sdk/objc/api/peerconnection/RTCDataChannel.h
+++ b/sdk/objc/api/peerconnection/RTCDataChannel.h
@@ -112,7 +112,7 @@
/**
* The number of bytes of application data that have been queued using
- * |sendData:| but that have not yet been transmitted to the network.
+ * `sendData:` but that have not yet been transmitted to the network.
*/
@property(nonatomic, readonly) uint64_t bufferedAmount;
diff --git a/sdk/objc/components/audio/RTCAudioSession.h b/sdk/objc/components/audio/RTCAudioSession.h
index 59250fe..5881155 100644
--- a/sdk/objc/components/audio/RTCAudioSession.h
+++ b/sdk/objc/components/audio/RTCAudioSession.h
@@ -122,7 +122,7 @@
* WebRTC and the application layer are avoided.
*
* RTCAudioSession also coordinates activation so that the audio session is
- * activated only once. See |setActive:error:|.
+ * activated only once. See `setActive:error:`.
*/
RTC_OBJC_EXPORT
@interface RTC_OBJC_TYPE (RTCAudioSession) : NSObject <RTC_OBJC_TYPE(RTCAudioSessionActivationDelegate)>
diff --git a/stats/rtc_stats.cc b/stats/rtc_stats.cc
index 4895edc..e6eb51e 100644
--- a/stats/rtc_stats.cc
+++ b/stats/rtc_stats.cc
@@ -20,7 +20,7 @@
namespace {
-// Produces "[a,b,c]". Works for non-vector |RTCStatsMemberInterface::Type|
+// Produces "[a,b,c]". Works for non-vector `RTCStatsMemberInterface::Type`
// types.
template <typename T>
std::string VectorToString(const std::vector<T>& vector) {
diff --git a/system_wrappers/include/ntp_time.h b/system_wrappers/include/ntp_time.h
index cb58018..b912bc8 100644
--- a/system_wrappers/include/ntp_time.h
+++ b/system_wrappers/include/ntp_time.h
@@ -65,7 +65,7 @@
// Converts `int64_t` milliseconds to Q32.32-formatted fixed-point seconds.
// Performs clamping if the result overflows or underflows.
inline int64_t Int64MsToQ32x32(int64_t milliseconds) {
- // TODO(bugs.webrtc.org/10893): Change to use |rtc::saturated_cast| once the
+ // TODO(bugs.webrtc.org/10893): Change to use `rtc::saturated_cast` once the
// bug has been fixed.
double result =
std::round(milliseconds * (NtpTime::kFractionsPerSecond / 1000.0));
@@ -88,7 +88,7 @@
// Converts `int64_t` milliseconds to UQ32.32-formatted fixed-point seconds.
// Performs clamping if the result overflows or underflows.
inline uint64_t Int64MsToUQ32x32(int64_t milliseconds) {
- // TODO(bugs.webrtc.org/10893): Change to use |rtc::saturated_cast| once the
+ // TODO(bugs.webrtc.org/10893): Change to use `rtc::saturated_cast` once the
// bug has been fixed.
double result =
std::round(milliseconds * (NtpTime::kFractionsPerSecond / 1000.0));
diff --git a/test/ios/google_test_runner_delegate.h b/test/ios/google_test_runner_delegate.h
index f0bcfe9..bb3493a 100644
--- a/test/ios/google_test_runner_delegate.h
+++ b/test/ios/google_test_runner_delegate.h
@@ -17,7 +17,7 @@
@protocol GoogleTestRunnerDelegate
// Returns YES if this delegate supports running GoogleTests via a call to
-// |runGoogleTests|.
+// `runGoogleTests`.
@property(nonatomic, readonly, assign) BOOL supportsRunningGoogleTests;
// Runs GoogleTests and returns the final exit code.
diff --git a/test/ios/test_support.mm b/test/ios/test_support.mm
index 3da896f..24cbcc7 100644
--- a/test/ios/test_support.mm
+++ b/test/ios/test_support.mm
@@ -72,8 +72,8 @@
[_window setRootViewController:[[UIViewController alloc] init]];
if (!rtc::test::ShouldRunIOSUnittestsWithXCTest()) {
- // When running in XCTest mode, XCTest will invoke |runGoogleTest| directly.
- // Otherwise, schedule a call to |runTests|.
+ // When running in XCTest mode, XCTest will invoke `runGoogleTest` directly.
+ // Otherwise, schedule a call to `runTests`.
[self performSelector:@selector(runTests) withObject:nil afterDelay:0.1];
}
diff --git a/test/pc/e2e/analyzer/video/default_video_quality_analyzer.h b/test/pc/e2e/analyzer/video/default_video_quality_analyzer.h
index f5f3920..1461dd7 100644
--- a/test/pc/e2e/analyzer/video/default_video_quality_analyzer.h
+++ b/test/pc/e2e/analyzer/video/default_video_quality_analyzer.h
@@ -297,7 +297,7 @@
absl::optional<VideoFrame> rendered;
// If true frame was dropped somewhere from capturing to rendering and
// wasn't rendered on remote peer side. If `dropped` is true, `rendered`
- // will be |absl::nullopt|.
+ // will be `absl::nullopt`.
bool dropped;
FrameStats frame_stats;
OverloadReason overload_reason;
diff --git a/test/pc/e2e/peer_connection_quality_test.cc b/test/pc/e2e/peer_connection_quality_test.cc
index 72af279..53e6220 100644
--- a/test/pc/e2e/peer_connection_quality_test.cc
+++ b/test/pc/e2e/peer_connection_quality_test.cc
@@ -445,12 +445,12 @@
RtpTransceiverInit transceiver_params;
if (video_config.simulcast_config) {
transceiver_params.direction = RtpTransceiverDirection::kSendOnly;
- // Because simulcast enabled |alice_->params()->video_codecs| has only 1
+ // Because simulcast enabled `alice_->params()->video_codecs` has only 1
// element.
if (alice_->params()->video_codecs[0].name == cricket::kVp8CodecName) {
// For Vp8 simulcast we need to add as many RtpEncodingParameters to the
// track as many simulcast streams requested. If they specified in
- // |video_config.simulcast_config| it should be copied from there.
+ // `video_config.simulcast_config` it should be copied from there.
for (int i = 0;
i < video_config.simulcast_config->simulcast_streams_count; ++i) {
RtpEncodingParameters enc_params;
diff --git a/test/pc/e2e/sdp/sdp_changer.cc b/test/pc/e2e/sdp/sdp_changer.cc
index b3e5fc2..b3ee3b7 100644
--- a/test/pc/e2e/sdp/sdp_changer.cc
+++ b/test/pc/e2e/sdp/sdp_changer.cc
@@ -185,7 +185,7 @@
}
if (!params_.stream_label_to_simulcast_streams_count.empty()) {
- // Because simulcast enabled |params_.video_codecs| has only 1 element.
+ // Because simulcast enabled `params_.video_codecs` has only 1 element.
if (first_codec.name == cricket::kVp8CodecName) {
return PatchVp8Offer(std::move(offer));
}
@@ -378,7 +378,7 @@
}
if (!params_.stream_label_to_simulcast_streams_count.empty()) {
- // Because simulcast enabled |params_.video_codecs| has only 1 element.
+ // Because simulcast enabled `params_.video_codecs` has only 1 element.
if (first_codec.name == cricket::kVp8CodecName) {
return PatchVp8Answer(std::move(answer));
}
diff --git a/test/pc/e2e/test_peer_factory.cc b/test/pc/e2e/test_peer_factory.cc
index 5ba9b44..5683de4 100644
--- a/test/pc/e2e/test_peer_factory.cc
+++ b/test/pc/e2e/test_peer_factory.cc
@@ -73,7 +73,7 @@
// Returns mapping from stream label to optional spatial index.
// If we have stream label "Foo" and mapping contains
-// 1. |absl::nullopt| means "Foo" isn't simulcast/SVC stream
+// 1. `absl::nullopt` means "Foo" isn't simulcast/SVC stream
// 2. `kAnalyzeAnySpatialStream` means all simulcast/SVC streams are required
// 3. Concrete value means that particular simulcast/SVC stream have to be
// analyzed.
diff --git a/video/alignment_adjuster.h b/video/alignment_adjuster.h
index 4c4e155..ea2a9a0 100644
--- a/video/alignment_adjuster.h
+++ b/video/alignment_adjuster.h
@@ -19,10 +19,10 @@
class AlignmentAdjuster {
public:
// Returns the resolution alignment requested by the encoder (i.e
- // |EncoderInfo::requested_resolution_alignment| which ensures that delivered
+ // `EncoderInfo::requested_resolution_alignment` which ensures that delivered
// frames to the encoder are divisible by this alignment).
//
- // If |EncoderInfo::apply_alignment_to_all_simulcast_layers| is enabled, the
+ // If `EncoderInfo::apply_alignment_to_all_simulcast_layers` is enabled, the
// alignment will be adjusted to ensure that each simulcast layer also is
// divisible by `requested_resolution_alignment`. The configured scale factors
// `scale_resolution_down_by` may be adjusted to a common multiple to limit
diff --git a/video/frame_encode_metadata_writer_unittest.cc b/video/frame_encode_metadata_writer_unittest.cc
index 631dded..8b60a8c 100644
--- a/video/frame_encode_metadata_writer_unittest.cc
+++ b/video/frame_encode_metadata_writer_unittest.cc
@@ -64,7 +64,7 @@
// Emulates `num_frames` on `num_streams` frames with capture timestamps
// increased by 1 from 0. Size of each frame is between
// `min_frame_size` and `max_frame_size`, outliers are counted relatevely to
-// |average_frame_sizes[]| for each stream.
+// `average_frame_sizes[]` for each stream.
std::vector<std::vector<FrameType>> GetTimingFrames(
const int64_t delay_ms,
const size_t min_frame_size,
diff --git a/video/receive_statistics_proxy.h b/video/receive_statistics_proxy.h
index 4efc0f6..1e5189d 100644
--- a/video/receive_statistics_proxy.h
+++ b/video/receive_statistics_proxy.h
@@ -158,7 +158,7 @@
rtc::SampleCounter qp_sample_ RTC_GUARDED_BY(mutex_);
int num_bad_states_ RTC_GUARDED_BY(mutex_);
int num_certain_states_ RTC_GUARDED_BY(mutex_);
- // Note: The |stats_.rtp_stats| member is not used or populated by this class.
+ // Note: The `stats_.rtp_stats` member is not used or populated by this class.
mutable VideoReceiveStream::Stats stats_ RTC_GUARDED_BY(mutex_);
RateStatistics decode_fps_estimator_ RTC_GUARDED_BY(mutex_);
RateStatistics renders_fps_estimator_ RTC_GUARDED_BY(mutex_);
diff --git a/video/receive_statistics_proxy2.h b/video/receive_statistics_proxy2.h
index 7797d93..2dda15e 100644
--- a/video/receive_statistics_proxy2.h
+++ b/video/receive_statistics_proxy2.h
@@ -163,7 +163,7 @@
rtc::SampleCounter qp_sample_ RTC_GUARDED_BY(main_thread_);
int num_bad_states_ RTC_GUARDED_BY(main_thread_);
int num_certain_states_ RTC_GUARDED_BY(main_thread_);
- // Note: The |stats_.rtp_stats| member is not used or populated by this class.
+ // Note: The `stats_.rtp_stats` member is not used or populated by this class.
mutable VideoReceiveStream::Stats stats_ RTC_GUARDED_BY(main_thread_);
// Same as stats_.ssrc, but const (no lock required).
const uint32_t remote_ssrc_;
diff --git a/video/send_statistics_proxy.cc b/video/send_statistics_proxy.cc
index efdf03f..a50008c 100644
--- a/video/send_statistics_proxy.cc
+++ b/video/send_statistics_proxy.cc
@@ -951,7 +951,7 @@
encode_frame_rate = 1.0;
double target_frame_size_bytes =
stats_.target_media_bitrate_bps / (8.0 * encode_frame_rate);
- // |stats_.target_media_bitrate_bps| is set in
+ // `stats_.target_media_bitrate_bps` is set in
// SendStatisticsProxy::OnSetEncoderTargetRate.
stats_.total_encoded_bytes_target += round(target_frame_size_bytes);
if (codec_info) {
@@ -1196,7 +1196,7 @@
stats_.quality_limitation_reason =
quality_limitation_reason_tracker_.current_reason();
- // |stats_.quality_limitation_durations_ms| depends on the current time
+ // `stats_.quality_limitation_durations_ms` depends on the current time
// when it is polled; it is updated in SendStatisticsProxy::GetStats().
}
diff --git a/video/video_analyzer.h b/video/video_analyzer.h
index bb08fbc..c121370 100644
--- a/video/video_analyzer.h
+++ b/video/video_analyzer.h
@@ -157,12 +157,12 @@
void OnFrame(const VideoFrame& video_frame)
RTC_LOCKS_EXCLUDED(lock_) override;
- // Called when |send_stream_.SetSource()| is called.
+ // Called when `send_stream_.SetSource()` is called.
void AddOrUpdateSink(rtc::VideoSinkInterface<VideoFrame>* sink,
const rtc::VideoSinkWants& wants)
RTC_LOCKS_EXCLUDED(lock_) override;
- // Called by `send_stream_` when |send_stream_.SetSource()| is called.
+ // Called by `send_stream_` when `send_stream_.SetSource()` is called.
void RemoveSink(rtc::VideoSinkInterface<VideoFrame>* sink)
RTC_LOCKS_EXCLUDED(lock_) override;
diff --git a/video/video_receive_stream.cc b/video/video_receive_stream.cc
index da5701c..2ddaa5b 100644
--- a/video/video_receive_stream.cc
+++ b/video/video_receive_stream.cc
@@ -508,7 +508,7 @@
double estimated_freq_khz;
// TODO(bugs.webrtc.org/10739): we should set local capture clock offset for
- // |video_frame.packet_infos|. But VideoFrame is const qualified here.
+ // `video_frame.packet_infos`. But VideoFrame is const qualified here.
// TODO(tommi): GetStreamSyncOffsetInMs grabs three locks. One inside the
// function itself, another in GetChannel() and a third in
diff --git a/video/video_receive_stream2.cc b/video/video_receive_stream2.cc
index ce1eb7e..ba3f4a2 100644
--- a/video/video_receive_stream2.cc
+++ b/video/video_receive_stream2.cc
@@ -563,7 +563,7 @@
VideoFrameMetaData frame_meta(video_frame, clock_->CurrentTime());
// TODO(bugs.webrtc.org/10739): we should set local capture clock offset for
- // |video_frame.packet_infos|. But VideoFrame is const qualified here.
+ // `video_frame.packet_infos`. But VideoFrame is const qualified here.
call_->worker_thread()->PostTask(
ToQueuedTask(task_safety_, [frame_meta, this]() {
diff --git a/video/video_send_stream_impl.h b/video/video_send_stream_impl.h
index 5ee4d19..a29f186 100644
--- a/video/video_send_stream_impl.h
+++ b/video/video_send_stream_impl.h
@@ -107,7 +107,7 @@
VideoLayersAllocation allocation) override;
// Implements EncodedImageCallback. The implementation routes encoded frames
- // to the `payload_router_` and |config.pre_encode_callback| if set.
+ // to the `payload_router_` and `config.pre_encode_callback` if set.
// Called on an arbitrary encoder callback thread.
EncodedImageCallback::Result OnEncodedImage(
const EncodedImage& encoded_image,
diff --git a/video/video_source_sink_controller.cc b/video/video_source_sink_controller.cc
index 4cd12d8..6955e3b 100644
--- a/video/video_source_sink_controller.cc
+++ b/video/video_source_sink_controller.cc
@@ -157,7 +157,7 @@
const {
rtc::VideoSinkWants wants;
wants.rotation_applied = rotation_applied_;
- // |wants.black_frames| is not used, it always has its default value false.
+ // `wants.black_frames` is not used, it always has its default value false.
wants.max_pixel_count =
rtc::dchecked_cast<int>(restrictions_.max_pixels_per_frame().value_or(
std::numeric_limits<int>::max()));
diff --git a/video/video_stream_encoder.cc b/video/video_stream_encoder.cc
index b56edbe..be611fa 100644
--- a/video/video_stream_encoder.cc
+++ b/video/video_stream_encoder.cc
@@ -1038,7 +1038,7 @@
// The resolutions that we're actually encoding with.
std::vector<rtc::VideoSinkWants::FrameSize> encoder_resolutions;
- // TODO(hbos): For the case of SVC, also make use of |codec.spatialLayers|.
+ // TODO(hbos): For the case of SVC, also make use of `codec.spatialLayers`.
// For now, SVC layers are handled by the VP9 encoder.
for (const auto& simulcastStream : codec.simulcastStream) {
if (!simulcastStream.active)
@@ -1344,7 +1344,7 @@
// Pause video if paused by caller or as long as the network is down or the
// pacer queue has grown too large in buffered mode.
// If the pacer queue has grown too large or the network is down,
- // |last_encoder_rate_settings_->encoder_target| will be 0.
+ // `last_encoder_rate_settings_->encoder_target` will be 0.
return !last_encoder_rate_settings_ ||
last_encoder_rate_settings_->encoder_target == DataRate::Zero();
}