Use backticks not vertical bars to denote variables in comments for /modules/audio_coding

Bug: webrtc:12338
Change-Id: I02613d9fca45d00e2477f334b7a0416e7912e26b
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/227037
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Artem Titov <titovartem@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#34621}
diff --git a/modules/audio_coding/acm2/acm_receive_test.h b/modules/audio_coding/acm2/acm_receive_test.h
index 043092c..6349c63 100644
--- a/modules/audio_coding/acm2/acm_receive_test.h
+++ b/modules/audio_coding/acm2/acm_receive_test.h
@@ -71,8 +71,8 @@
   RTC_DISALLOW_COPY_AND_ASSIGN(AcmReceiveTestOldApi);
 };
 
-// This test toggles the output frequency every |toggle_period_ms|. The test
-// starts with |output_freq_hz_1|. Except for the toggling, it does the same
+// This test toggles the output frequency every `toggle_period_ms`. The test
+// starts with `output_freq_hz_1`. Except for the toggling, it does the same
 // thing as AcmReceiveTestOldApi.
 class AcmReceiveTestToggleOutputFreqOldApi : public AcmReceiveTestOldApi {
  public:
diff --git a/modules/audio_coding/acm2/acm_receiver.cc b/modules/audio_coding/acm2/acm_receiver.cc
index 80cb3c5..6d9211c 100644
--- a/modules/audio_coding/acm2/acm_receiver.cc
+++ b/modules/audio_coding/acm2/acm_receiver.cc
@@ -131,7 +131,7 @@
                                   /*num_channels=*/format->num_channels,
                                   /*sdp_format=*/std::move(format->sdp_format)};
     }
-  }  // |mutex_| is released.
+  }  // `mutex_` is released.
 
   if (neteq_->InsertPacket(rtp_header, incoming_payload) < 0) {
     RTC_LOG(LERROR) << "AcmReceiver::InsertPacket "
@@ -201,7 +201,7 @@
     // We might end up here ONLY if codec is changed.
   }
 
-  // Store current audio in |last_audio_buffer_| for next time.
+  // Store current audio in `last_audio_buffer_` for next time.
   memcpy(last_audio_buffer_.get(), audio_frame->data(),
          sizeof(int16_t) * audio_frame->samples_per_channel_ *
              audio_frame->num_channels_);
diff --git a/modules/audio_coding/acm2/acm_receiver.h b/modules/audio_coding/acm2/acm_receiver.h
index 19dc577..9963603 100644
--- a/modules/audio_coding/acm2/acm_receiver.h
+++ b/modules/audio_coding/acm2/acm_receiver.h
@@ -177,9 +177,9 @@
   // enabled then the maximum NACK list size is modified accordingly.
   //
   // If the sequence number of last received packet is N, the sequence numbers
-  // of NACK list are in the range of [N - |max_nack_list_size|, N).
+  // of NACK list are in the range of [N - `max_nack_list_size`, N).
   //
-  // |max_nack_list_size| should be positive (none zero) and less than or
+  // `max_nack_list_size` should be positive (none zero) and less than or
   // equal to |Nack::kNackListSizeLimit|. Otherwise, No change is applied and -1
   // is returned. 0 is returned at success.
   //
@@ -189,12 +189,12 @@
   void DisableNack();
 
   //
-  // Get a list of packets to be retransmitted. |round_trip_time_ms| is an
+  // Get a list of packets to be retransmitted. `round_trip_time_ms` is an
   // estimate of the round-trip-time (in milliseconds). Missing packets which
   // will be playout in a shorter time than the round-trip-time (with respect
   // to the time this API is called) will not be included in the list.
   //
-  // Negative |round_trip_time_ms| results is an error message and empty list
+  // Negative `round_trip_time_ms` results is an error message and empty list
   // is returned.
   //
   std::vector<uint16_t> GetNackList(int64_t round_trip_time_ms) const;
diff --git a/modules/audio_coding/acm2/audio_coding_module.cc b/modules/audio_coding/acm2/audio_coding_module.cc
index b5c0c3b..d629139 100644
--- a/modules/audio_coding/acm2/audio_coding_module.cc
+++ b/modules/audio_coding/acm2/audio_coding_module.cc
@@ -125,7 +125,7 @@
   int Add10MsDataInternal(const AudioFrame& audio_frame, InputData* input_data)
       RTC_EXCLUSIVE_LOCKS_REQUIRED(acm_mutex_);
 
-  // TODO(bugs.webrtc.org/10739): change |absolute_capture_timestamp_ms| to
+  // TODO(bugs.webrtc.org/10739): change `absolute_capture_timestamp_ms` to
   // int64_t when it always receives a valid value.
   int Encode(const InputData& input_data,
              absl::optional<int64_t> absolute_capture_timestamp_ms)
@@ -141,8 +141,8 @@
   //
   // in_frame: input audio-frame
   // ptr_out: pointer to output audio_frame. If no preprocessing is required
-  //          |ptr_out| will be pointing to |in_frame|, otherwise pointing to
-  //          |preprocess_frame_|.
+  //          `ptr_out` will be pointing to `in_frame`, otherwise pointing to
+  //          `preprocess_frame_`.
   //
   // Return value:
   //   -1: if encountering an error.
@@ -152,7 +152,7 @@
       RTC_EXCLUSIVE_LOCKS_REQUIRED(acm_mutex_);
 
   // Change required states after starting to receive the codec corresponding
-  // to |index|.
+  // to `index`.
   int UpdateUponReceivingCodec(int index);
 
   mutable Mutex acm_mutex_;
@@ -397,7 +397,7 @@
     // output data if needed.
     ReMixFrame(*ptr_frame, current_num_channels, &input_data->buffer);
 
-    // For pushing data to primary, point the |ptr_audio| to correct buffer.
+    // For pushing data to primary, point the `ptr_audio` to correct buffer.
     input_data->audio = input_data->buffer.data();
     RTC_DCHECK_GE(input_data->buffer.size(),
                   input_data->length_per_channel * input_data->audio_channel);
@@ -414,7 +414,7 @@
 // encoder is mono and input is stereo. In case of dual-streaming, both
 // encoders has to be mono for down-mix to take place.
 // |*ptr_out| will point to the pre-processed audio-frame. If no pre-processing
-// is required, |*ptr_out| points to |in_frame|.
+// is required, |*ptr_out| points to `in_frame`.
 // TODO(yujo): Make this more efficient for muted frames.
 int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
                                                const AudioFrame** ptr_out) {
diff --git a/modules/audio_coding/acm2/audio_coding_module_unittest.cc b/modules/audio_coding/acm2/audio_coding_module_unittest.cc
index 7465456..a0a8854 100644
--- a/modules/audio_coding/acm2/audio_coding_module_unittest.cc
+++ b/modules/audio_coding/acm2/audio_coding_module_unittest.cc
@@ -342,7 +342,7 @@
 
 // Introduce this class to set different expectations on the number of encoded
 // bytes. This class expects all encoded packets to be 9 bytes (matching one
-// CNG SID frame) or 0 bytes. This test depends on |input_frame_| containing
+// CNG SID frame) or 0 bytes. This test depends on `input_frame_` containing
 // (near-)zero values. It also introduces a way to register comfort noise with
 // a custom payload type.
 class AudioCodingModuleTestWithComfortNoiseOldApi
@@ -593,7 +593,7 @@
       InsertAudio();
       ASSERT_LT(loop_counter++, 10);
     }
-    // Set |last_packet_number_| to one less that |num_calls| so that the packet
+    // Set `last_packet_number_` to one less that `num_calls` so that the packet
     // will be fetched in the next InsertPacket() call.
     last_packet_number_ = packet_cb_.num_calls() - 1;
 
@@ -617,7 +617,7 @@
     if (num_calls > last_packet_number_) {
       // Get the new payload out from the callback handler.
       // Note that since we swap buffers here instead of directly inserting
-      // a pointer to the data in |packet_cb_|, we avoid locking the callback
+      // a pointer to the data in `packet_cb_`, we avoid locking the callback
       // for the duration of the IncomingPacket() call.
       packet_cb_.SwapBuffers(&last_payload_vec_);
       ASSERT_GT(last_payload_vec_.size(), 0u);
@@ -1140,8 +1140,8 @@
   // Sets up the test::AcmSendTest object. Returns true on success, otherwise
   // false.
   bool SetUpSender(std::string input_file_name, int source_rate) {
-    // Note that |audio_source_| will loop forever. The test duration is set
-    // explicitly by |kTestDurationMs|.
+    // Note that `audio_source_` will loop forever. The test duration is set
+    // explicitly by `kTestDurationMs`.
     audio_source_.reset(new test::InputAudioFile(input_file_name));
     send_test_.reset(new test::AcmSendTestOldApi(audio_source_.get(),
                                                  source_rate, kTestDurationMs));
@@ -1243,7 +1243,7 @@
     VerifyPacket(packet.get());
     // TODO(henrik.lundin) Save the packet to file as well.
 
-    // Pass it on to the caller. The caller becomes the owner of |packet|.
+    // Pass it on to the caller. The caller becomes the owner of `packet`.
     return packet;
   }
 
@@ -1631,8 +1631,8 @@
   bool SetUpSender() {
     const std::string input_file_name =
         webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
-    // Note that |audio_source_| will loop forever. The test duration is set
-    // explicitly by |kTestDurationMs|.
+    // Note that `audio_source_` will loop forever. The test duration is set
+    // explicitly by `kTestDurationMs`.
     audio_source_.reset(new test::InputAudioFile(input_file_name));
     static const int kSourceRateHz = 32000;
     send_test_.reset(new test::AcmSendTestOldApi(
@@ -1859,7 +1859,7 @@
 
 // This test fixture is implemented to run ACM and change the desired output
 // frequency during the call. The input packets are simply PCM16b-wb encoded
-// payloads with a constant value of |kSampleValue|. The test fixture itself
+// payloads with a constant value of `kSampleValue`. The test fixture itself
 // acts as PacketSource in between the receive test class and the constant-
 // payload packet source class. The output is both written to file, and analyzed
 // in this test fixture.
diff --git a/modules/audio_coding/acm2/call_statistics.cc b/modules/audio_coding/acm2/call_statistics.cc
index e97e529..0aad594 100644
--- a/modules/audio_coding/acm2/call_statistics.cc
+++ b/modules/audio_coding/acm2/call_statistics.cc
@@ -44,7 +44,7 @@
       break;
     }
     case AudioFrame::kUndefined: {
-      // If the audio is decoded by NetEq, |kUndefined| is not an option.
+      // If the audio is decoded by NetEq, `kUndefined` is not an option.
       RTC_NOTREACHED();
     }
   }
diff --git a/modules/audio_coding/acm2/call_statistics.h b/modules/audio_coding/acm2/call_statistics.h
index 5d94ac4..a2db2a29 100644
--- a/modules/audio_coding/acm2/call_statistics.h
+++ b/modules/audio_coding/acm2/call_statistics.h
@@ -36,8 +36,8 @@
   CallStatistics() {}
   ~CallStatistics() {}
 
-  // Call this method to indicate that NetEq engaged in decoding. |speech_type|
-  // is the audio-type according to NetEq, and |muted| indicates if the decoded
+  // Call this method to indicate that NetEq engaged in decoding. `speech_type`
+  // is the audio-type according to NetEq, and `muted` indicates if the decoded
   // frame was produced in muted state.
   void DecodedByNetEq(AudioFrame::SpeechType speech_type, bool muted);
 
diff --git a/modules/audio_coding/audio_network_adaptor/bitrate_controller.cc b/modules/audio_coding/audio_network_adaptor/bitrate_controller.cc
index 40c8659..88ca38d 100644
--- a/modules/audio_coding/audio_network_adaptor/bitrate_controller.cc
+++ b/modules/audio_coding/audio_network_adaptor/bitrate_controller.cc
@@ -50,7 +50,7 @@
 }
 
 void BitrateController::MakeDecision(AudioEncoderRuntimeConfig* config) {
-  // Decision on |bitrate_bps| should not have been made.
+  // Decision on `bitrate_bps` should not have been made.
   RTC_DCHECK(!config->bitrate_bps);
   if (target_audio_bitrate_bps_ && overhead_bytes_per_packet_) {
     if (config->frame_length_ms)
diff --git a/modules/audio_coding/audio_network_adaptor/channel_controller.cc b/modules/audio_coding/audio_network_adaptor/channel_controller.cc
index 2f5af67..2ef2f4c 100644
--- a/modules/audio_coding/audio_network_adaptor/channel_controller.cc
+++ b/modules/audio_coding/audio_network_adaptor/channel_controller.cc
@@ -28,7 +28,7 @@
 ChannelController::ChannelController(const Config& config)
     : config_(config), channels_to_encode_(config_.intial_channels_to_encode) {
   RTC_DCHECK_GT(config_.intial_channels_to_encode, 0lu);
-  // Currently, we require |intial_channels_to_encode| to be <= 2.
+  // Currently, we require `intial_channels_to_encode` to be <= 2.
   RTC_DCHECK_LE(config_.intial_channels_to_encode, 2lu);
   RTC_DCHECK_GE(config_.num_encoder_channels,
                 config_.intial_channels_to_encode);
@@ -43,7 +43,7 @@
 }
 
 void ChannelController::MakeDecision(AudioEncoderRuntimeConfig* config) {
-  // Decision on |num_channels| should not have been made.
+  // Decision on `num_channels` should not have been made.
   RTC_DCHECK(!config->num_channels);
 
   if (uplink_bandwidth_bps_) {
diff --git a/modules/audio_coding/audio_network_adaptor/config.proto b/modules/audio_coding/audio_network_adaptor/config.proto
index 347372e..4f8b2c7 100644
--- a/modules/audio_coding/audio_network_adaptor/config.proto
+++ b/modules/audio_coding/audio_network_adaptor/config.proto
@@ -23,8 +23,8 @@
     optional float high_bandwidth_packet_loss = 4;
   }
 
-  // |fec_enabling_threshold| defines a curve, above which FEC should be
-  // enabled. |fec_disabling_threshold| defines a curve, under which FEC
+  // `fec_enabling_threshold` defines a curve, above which FEC should be
+  // enabled. `fec_disabling_threshold` defines a curve, under which FEC
   // should be disabled. See below
   //
   // packet-loss ^   |  |
@@ -36,7 +36,7 @@
   optional Threshold fec_enabling_threshold = 1;
   optional Threshold fec_disabling_threshold = 2;
 
-  // |time_constant_ms| is the time constant for an exponential filter, which
+  // `time_constant_ms` is the time constant for an exponential filter, which
   // is used for smoothing the packet loss fraction.
   optional int32 time_constant_ms = 3;
 }
@@ -62,8 +62,8 @@
     optional float high_bandwidth_recoverable_packet_loss = 4;
   }
 
-  // |fec_enabling_threshold| defines a curve, above which FEC should be
-  // enabled. |fec_disabling_threshold| defines a curve, under which FEC
+  // `fec_enabling_threshold` defines a curve, above which FEC should be
+  // enabled. `fec_disabling_threshold` defines a curve, under which FEC
   // should be disabled. See below
   //
   // packet-loss ^   |  |
@@ -122,7 +122,7 @@
   // FrameLengthControllerV2 chooses the frame length by taking the target
   // bitrate and subtracting the overhead bitrate to obtain the remaining
   // bitrate for the payload. The chosen frame length is the shortest possible
-  // where the payload bitrate is more than |min_payload_bitrate_bps|.
+  // where the payload bitrate is more than `min_payload_bitrate_bps`.
   optional int32 min_payload_bitrate_bps = 1;
 
   // If true, uses the stable target bitrate to decide the frame length. This
@@ -158,17 +158,17 @@
 
 message Controller {
   message ScoringPoint {
-    // |ScoringPoint| is a subspace of network condition. It is used for
+    // `ScoringPoint` is a subspace of network condition. It is used for
     // comparing the significance of controllers.
     optional int32 uplink_bandwidth_bps = 1;
     optional float uplink_packet_loss_fraction = 2;
   }
 
-  // The distance from |scoring_point| to a given network condition defines
+  // The distance from `scoring_point` to a given network condition defines
   // the significance of this controller with respect that network condition.
   // Shorter distance means higher significance. The significances of
   // controllers determine their order in the processing pipeline. Controllers
-  // without |scoring_point| follow their default order in
+  // without `scoring_point` follow their default order in
   // |ControllerManager::controllers|.
   optional ScoringPoint scoring_point = 1;
 
diff --git a/modules/audio_coding/audio_network_adaptor/controller_manager.cc b/modules/audio_coding/audio_network_adaptor/controller_manager.cc
index 415b9fc..6708bc0 100644
--- a/modules/audio_coding/audio_network_adaptor/controller_manager.cc
+++ b/modules/audio_coding/audio_network_adaptor/controller_manager.cc
@@ -373,14 +373,14 @@
           config_.min_reordering_squared_distance)
     return sorted_controllers_;
 
-  // Sort controllers according to the distances of |scoring_point| to the
+  // Sort controllers according to the distances of `scoring_point` to the
   // scoring points of controllers.
   //
   // A controller that does not associate with any scoring point
   // are treated as if
   // 1) they are less important than any controller that has a scoring point,
   // 2) they are equally important to any controller that has no scoring point,
-  //    and their relative order will follow |default_sorted_controllers_|.
+  //    and their relative order will follow `default_sorted_controllers_`.
   std::vector<Controller*> sorted_controllers(default_sorted_controllers_);
   std::stable_sort(
       sorted_controllers.begin(), sorted_controllers.end(),
@@ -430,7 +430,7 @@
 }
 
 float NormalizePacketLossFraction(float uplink_packet_loss_fraction) {
-  // |uplink_packet_loss_fraction| is seldom larger than 0.3, so we scale it up
+  // `uplink_packet_loss_fraction` is seldom larger than 0.3, so we scale it up
   // by 3.3333f.
   return std::min(uplink_packet_loss_fraction * 3.3333f, 1.0f);
 }
diff --git a/modules/audio_coding/audio_network_adaptor/controller_manager.h b/modules/audio_coding/audio_network_adaptor/controller_manager.h
index f46450d..c168ebc 100644
--- a/modules/audio_coding/audio_network_adaptor/controller_manager.h
+++ b/modules/audio_coding/audio_network_adaptor/controller_manager.h
@@ -111,7 +111,7 @@
 
   std::vector<Controller*> sorted_controllers_;
 
-  // |scoring_points_| saves the scoring points of various
+  // `scoring_points_` saves the scoring points of various
   // controllers.
   std::map<const Controller*, ScoringPoint> controller_scoring_points_;
 
diff --git a/modules/audio_coding/audio_network_adaptor/controller_manager_unittest.cc b/modules/audio_coding/audio_network_adaptor/controller_manager_unittest.cc
index c71bbc9..7b7ced9 100644
--- a/modules/audio_coding/audio_network_adaptor/controller_manager_unittest.cc
+++ b/modules/audio_coding/audio_network_adaptor/controller_manager_unittest.cc
@@ -43,7 +43,7 @@
 constexpr int kFactor = 100;
 constexpr float kMinReorderingSquareDistance = 1.0f / kFactor / kFactor;
 
-// |kMinUplinkBandwidthBps| and |kMaxUplinkBandwidthBps| are copied from
+// `kMinUplinkBandwidthBps` and `kMaxUplinkBandwidthBps` are copied from
 // controller_manager.cc
 constexpr int kMinUplinkBandwidthBps = 0;
 constexpr int kMaxUplinkBandwidthBps = 120000;
@@ -82,7 +82,7 @@
   return states;
 }
 
-// |expected_order| contains the expected indices of all controllers in the
+// `expected_order` contains the expected indices of all controllers in the
 // vector of controllers returned by GetSortedControllers(). A negative index
 // means that we do not care about its exact place, but we do check that it
 // exists in the vector.
@@ -112,8 +112,8 @@
 TEST(ControllerManagerTest, GetControllersReturnAllControllers) {
   auto states = CreateControllerManager();
   auto check = states.controller_manager->GetControllers();
-  // Verify that controllers in |check| are one-to-one mapped to those in
-  // |mock_controllers_|.
+  // Verify that controllers in `check` are one-to-one mapped to those in
+  // `mock_controllers_`.
   EXPECT_EQ(states.mock_controllers.size(), check.size());
   for (auto& controller : check)
     EXPECT_NE(states.mock_controllers.end(),
@@ -123,7 +123,7 @@
 
 TEST(ControllerManagerTest, ControllersInDefaultOrderOnEmptyNetworkMetrics) {
   auto states = CreateControllerManager();
-  // |network_metrics| are empty, and the controllers are supposed to follow the
+  // `network_metrics` are empty, and the controllers are supposed to follow the
   // default order.
   CheckControllersOrder(&states, absl::nullopt, absl::nullopt, {0, 1, 2, 3});
 }
@@ -304,7 +304,7 @@
 
   for (size_t i = 0; i < controllers.size(); ++i) {
     AudioEncoderRuntimeConfig encoder_config;
-    // We check the order of |controllers| by judging their decisions.
+    // We check the order of `controllers` by judging their decisions.
     controllers[i]->MakeDecision(&encoder_config);
 
     // Since controllers are not provided with network metrics, they give the
diff --git a/modules/audio_coding/audio_network_adaptor/debug_dump.proto b/modules/audio_coding/audio_network_adaptor/debug_dump.proto
index 93b31c3..3aa6a50 100644
--- a/modules/audio_coding/audio_network_adaptor/debug_dump.proto
+++ b/modules/audio_coding/audio_network_adaptor/debug_dump.proto
@@ -21,7 +21,7 @@
   optional bool enable_fec = 4;
   optional bool enable_dtx = 5;
   // Some encoders can encode fewer channels than the actual input to make
-  // better use of the bandwidth. |num_channels| sets the number of channels
+  // better use of the bandwidth. `num_channels` sets the number of channels
   // to encode.
   optional uint32 num_channels = 6;
 }
diff --git a/modules/audio_coding/audio_network_adaptor/dtx_controller.cc b/modules/audio_coding/audio_network_adaptor/dtx_controller.cc
index 48384c9..b0a7d5d 100644
--- a/modules/audio_coding/audio_network_adaptor/dtx_controller.cc
+++ b/modules/audio_coding/audio_network_adaptor/dtx_controller.cc
@@ -33,7 +33,7 @@
 }
 
 void DtxController::MakeDecision(AudioEncoderRuntimeConfig* config) {
-  // Decision on |enable_dtx| should not have been made.
+  // Decision on `enable_dtx` should not have been made.
   RTC_DCHECK(!config->enable_dtx);
 
   if (uplink_bandwidth_bps_) {
diff --git a/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.h b/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.h
index 87afe2e..85d235e 100644
--- a/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.h
+++ b/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.h
@@ -25,8 +25,8 @@
 class FecControllerPlrBased final : public Controller {
  public:
   struct Config {
-    // |fec_enabling_threshold| defines a curve, above which FEC should be
-    // enabled. |fec_disabling_threshold| defines a curve, under which FEC
+    // `fec_enabling_threshold` defines a curve, above which FEC should be
+    // enabled. `fec_disabling_threshold` defines a curve, under which FEC
     // should be disabled. See below
     //
     // packet-loss ^   |  |
diff --git a/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based_unittest.cc b/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based_unittest.cc
index d95cbce..355431a 100644
--- a/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based_unittest.cc
+++ b/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based_unittest.cc
@@ -100,9 +100,9 @@
   }
 }
 
-// Checks that the FEC decision and |uplink_packet_loss_fraction| given by
-// |states->controller->MakeDecision| matches |expected_enable_fec| and
-// |expected_uplink_packet_loss_fraction|, respectively.
+// Checks that the FEC decision and `uplink_packet_loss_fraction` given by
+// |states->controller->MakeDecision| matches `expected_enable_fec` and
+// `expected_uplink_packet_loss_fraction`, respectively.
 void CheckDecision(FecControllerPlrBasedTestStates* states,
                    bool expected_enable_fec,
                    float expected_uplink_packet_loss_fraction) {
@@ -221,7 +221,7 @@
 
 TEST(FecControllerPlrBasedTest, MaintainFecOffForVeryLowBandwidth) {
   auto states = CreateFecControllerPlrBased(false);
-  // Below |kEnablingBandwidthLow|, no packet loss fraction can cause FEC to
+  // Below `kEnablingBandwidthLow`, no packet loss fraction can cause FEC to
   // turn on.
   UpdateNetworkMetrics(&states, kEnablingBandwidthLow - 1, 1.0);
   CheckDecision(&states, false, 1.0);
@@ -272,7 +272,7 @@
 
 TEST(FecControllerPlrBasedTest, DisableFecForVeryLowBandwidth) {
   auto states = CreateFecControllerPlrBased(true);
-  // Below |kEnablingBandwidthLow|, any packet loss fraction can cause FEC to
+  // Below `kEnablingBandwidthLow`, any packet loss fraction can cause FEC to
   // turn off.
   UpdateNetworkMetrics(&states, kDisablingBandwidthLow - 1, 1.0);
   CheckDecision(&states, false, 1.0);
diff --git a/modules/audio_coding/audio_network_adaptor/frame_length_controller.cc b/modules/audio_coding/audio_network_adaptor/frame_length_controller.cc
index 36e9eb9..c47434f 100644
--- a/modules/audio_coding/audio_network_adaptor/frame_length_controller.cc
+++ b/modules/audio_coding/audio_network_adaptor/frame_length_controller.cc
@@ -54,7 +54,7 @@
   frame_length_ms_ = std::find(config_.encoder_frame_lengths_ms.begin(),
                                config_.encoder_frame_lengths_ms.end(),
                                config_.initial_frame_length_ms);
-  // |encoder_frame_lengths_ms| must contain |initial_frame_length_ms|.
+  // `encoder_frame_lengths_ms` must contain `initial_frame_length_ms`.
   RTC_DCHECK(frame_length_ms_ != config_.encoder_frame_lengths_ms.end());
 }
 
@@ -71,7 +71,7 @@
 }
 
 void FrameLengthController::MakeDecision(AudioEncoderRuntimeConfig* config) {
-  // Decision on |frame_length_ms| should not have been made.
+  // Decision on `frame_length_ms` should not have been made.
   RTC_DCHECK(!config->frame_length_ms);
 
   if (FrameLengthIncreasingDecision(*config)) {
@@ -99,12 +99,12 @@
 bool FrameLengthController::FrameLengthIncreasingDecision(
     const AudioEncoderRuntimeConfig& config) {
   // Increase frame length if
-  // 1. |uplink_bandwidth_bps| is known to be smaller or equal than
-  //    |min_encoder_bitrate_bps| plus |prevent_overuse_margin_bps| plus the
+  // 1. `uplink_bandwidth_bps` is known to be smaller or equal than
+  //    `min_encoder_bitrate_bps` plus `prevent_overuse_margin_bps` plus the
   //    current overhead rate OR all the following:
   // 2. longer frame length is available AND
-  // 3. |uplink_bandwidth_bps| is known to be smaller than a threshold AND
-  // 4. |uplink_packet_loss_fraction| is known to be smaller than a threshold.
+  // 3. `uplink_bandwidth_bps` is known to be smaller than a threshold AND
+  // 4. `uplink_packet_loss_fraction` is known to be smaller than a threshold.
 
   // Find next frame length to which a criterion is defined to shift from
   // current frame length.
@@ -156,12 +156,12 @@
     const AudioEncoderRuntimeConfig& config) {
   // Decrease frame length if
   // 1. shorter frame length is available AND
-  // 2. |uplink_bandwidth_bps| is known to be bigger than
-  // |min_encoder_bitrate_bps| plus |prevent_overuse_margin_bps| plus the
+  // 2. `uplink_bandwidth_bps` is known to be bigger than
+  // `min_encoder_bitrate_bps` plus `prevent_overuse_margin_bps` plus the
   // overhead which would be produced with the shorter frame length AND
   // one or more of the followings:
-  // 3. |uplink_bandwidth_bps| is known to be larger than a threshold,
-  // 4. |uplink_packet_loss_fraction| is known to be larger than a threshold,
+  // 3. `uplink_bandwidth_bps` is known to be larger than a threshold,
+  // 4. `uplink_packet_loss_fraction` is known to be larger than a threshold,
 
   // Find next frame length to which a criterion is defined to shift from
   // current frame length.
diff --git a/modules/audio_coding/audio_network_adaptor/frame_length_controller_unittest.cc b/modules/audio_coding/audio_network_adaptor/frame_length_controller_unittest.cc
index 0ffa54a..2312393 100644
--- a/modules/audio_coding/audio_network_adaptor/frame_length_controller_unittest.cc
+++ b/modules/audio_coding/audio_network_adaptor/frame_length_controller_unittest.cc
@@ -184,8 +184,8 @@
 
 TEST(FrameLengthControllerTest, IncreaseTo40MsOnMultipleConditions) {
   // Increase to 40ms frame length if
-  // 1. |uplink_bandwidth_bps| is known to be smaller than a threshold AND
-  // 2. |uplink_packet_loss_fraction| is known to be smaller than a threshold
+  // 1. `uplink_bandwidth_bps` is known to be smaller than a threshold AND
+  // 2. `uplink_packet_loss_fraction` is known to be smaller than a threshold
   //    AND
   // 3. FEC is not decided or OFF.
   auto controller = CreateController(CreateChangeCriteriaFor20msAnd40ms(),
@@ -206,8 +206,8 @@
 
 TEST(FrameLengthControllerTest, Maintain60MsOnMultipleConditions) {
   // Maintain 60ms frame length if
-  // 1. |uplink_bandwidth_bps| is at medium level,
-  // 2. |uplink_packet_loss_fraction| is at medium,
+  // 1. `uplink_bandwidth_bps` is at medium level,
+  // 2. `uplink_packet_loss_fraction` is at medium,
   // 3. FEC is not decided ON.
   auto controller = CreateController(CreateChangeCriteriaFor20msAnd60ms(),
                                      kDefaultEncoderFrameLengthsMs, 60);
@@ -218,8 +218,8 @@
 
 TEST(FrameLengthControllerTest, IncreaseTo60MsOnMultipleConditions) {
   // Increase to 60ms frame length if
-  // 1. |uplink_bandwidth_bps| is known to be smaller than a threshold AND
-  // 2. |uplink_packet_loss_fraction| is known to be smaller than a threshold
+  // 1. `uplink_bandwidth_bps` is known to be smaller than a threshold AND
+  // 2. `uplink_packet_loss_fraction` is known to be smaller than a threshold
   //    AND
   // 3. FEC is not decided or OFF.
   auto controller = CreateController(CreateChangeCriteriaFor20msAnd60ms(),
@@ -365,8 +365,8 @@
 
 TEST(FrameLengthControllerTest, From20MsTo120MsOnMultipleConditions) {
   // Increase to 120ms frame length if
-  // 1. |uplink_bandwidth_bps| is known to be smaller than a threshold AND
-  // 2. |uplink_packet_loss_fraction| is known to be smaller than a threshold.
+  // 1. `uplink_bandwidth_bps` is known to be smaller than a threshold AND
+  // 2. `uplink_packet_loss_fraction` is known to be smaller than a threshold.
   auto controller = CreateController(CreateChangeCriteriaFor20ms60msAnd120ms(),
                                      kDefaultEncoderFrameLengthsMs, 20);
   // It takes two steps for frame length to go from 20ms to 120ms.
diff --git a/modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h b/modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h
index 94e8ed9..bd16292 100644
--- a/modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h
+++ b/modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h
@@ -32,7 +32,7 @@
   absl::optional<bool> enable_dtx;
 
   // Some encoders can encode fewer channels than the actual input to make
-  // better use of the bandwidth. |num_channels| sets the number of channels
+  // better use of the bandwidth. `num_channels` sets the number of channels
   // to encode.
   absl::optional<size_t> num_channels;
 
diff --git a/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc b/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc
index 547fedd..a34c563 100644
--- a/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc
+++ b/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc
@@ -92,8 +92,8 @@
     timestamp_ += static_cast<uint32_t>(num_audio_samples_10ms_);
   }
 
-  // Expect |num_calls| calls to the encoder, all successful. The last call
-  // claims to have encoded |kMockReturnEncodedBytes| bytes, and all the
+  // Expect `num_calls` calls to the encoder, all successful. The last call
+  // claims to have encoded `kMockReturnEncodedBytes` bytes, and all the
   // preceding ones 0 bytes.
   void ExpectEncodeCalls(size_t num_calls) {
     InSequence s;
@@ -108,7 +108,7 @@
   }
 
   // Verifies that the cng_ object waits until it has collected
-  // |blocks_per_frame| blocks of audio, and then dispatches all of them to
+  // `blocks_per_frame` blocks of audio, and then dispatches all of them to
   // the underlying codec (speech or cng).
   void CheckBlockGrouping(size_t blocks_per_frame, bool active_speech) {
     EXPECT_CALL(*mock_encoder_, Num10MsFramesInNextPacket())
@@ -169,7 +169,7 @@
           .WillOnce(Return(Vad::kPassive));
     }
 
-    // With this call to Encode(), |mock_vad_| should be called according to the
+    // With this call to Encode(), `mock_vad_` should be called according to the
     // above expectations.
     Encode();
   }
@@ -201,7 +201,7 @@
   std::unique_ptr<AudioEncoder> cng_;
   std::unique_ptr<MockAudioEncoder> mock_encoder_owner_;
   MockAudioEncoder* mock_encoder_;
-  MockVad* mock_vad_;  // Ownership is transferred to |cng_|.
+  MockVad* mock_vad_;  // Ownership is transferred to `cng_`.
   uint32_t timestamp_;
   int16_t audio_[kMaxNumSamples];
   size_t num_audio_samples_10ms_;
@@ -294,7 +294,7 @@
   for (size_t i = 0; i < 100; ++i) {
     Encode();
     // Check if it was time to call the cng encoder. This is done once every
-    // |kBlocksPerFrame| calls.
+    // `kBlocksPerFrame` calls.
     if ((i + 1) % kBlocksPerFrame == 0) {
       // Now check if a SID interval has elapsed.
       if ((i % (sid_frame_interval_ms / 10)) < kBlocksPerFrame) {
@@ -334,7 +334,7 @@
   EXPECT_TRUE(CheckMixedActivePassive(Vad::kPassive, Vad::kActive));
   EXPECT_TRUE(encoded_info_.speech);
 
-  // All of the frame is passive speech. Expect no calls to |mock_encoder_|.
+  // All of the frame is passive speech. Expect no calls to `mock_encoder_`.
   EXPECT_FALSE(CheckMixedActivePassive(Vad::kPassive, Vad::kPassive));
   EXPECT_FALSE(encoded_info_.speech);
 }
@@ -442,7 +442,7 @@
   }
 
   // Override AudioEncoderCngTest::TearDown, since that one expects a call to
-  // the destructor of |mock_vad_|. In this case, that object is already
+  // the destructor of `mock_vad_`. In this case, that object is already
   // deleted.
   void TearDown() override { cng_.reset(); }
 
diff --git a/modules/audio_coding/codecs/cng/webrtc_cng.cc b/modules/audio_coding/codecs/cng/webrtc_cng.cc
index 2acaf2b..bfe77c7 100644
--- a/modules/audio_coding/codecs/cng/webrtc_cng.cc
+++ b/modules/audio_coding/codecs/cng/webrtc_cng.cc
@@ -193,10 +193,10 @@
   WebRtcSpl_ScaleVector(excitation, excitation, dec_used_scale_factor_,
                         num_samples, 13);
 
-  /* |lpPoly| - Coefficients in Q12.
-   * |excitation| - Speech samples.
+  /* `lpPoly` - Coefficients in Q12.
+   * `excitation` - Speech samples.
    * |nst->dec_filtstate| - State preservation.
-   * |out_data| - Filtered speech samples. */
+   * `out_data` - Filtered speech samples. */
   WebRtcSpl_FilterAR(lpPoly, WEBRTC_CNG_MAX_LPC_ORDER + 1, excitation,
                      num_samples, dec_filtstate_, WEBRTC_CNG_MAX_LPC_ORDER,
                      dec_filtstateLow_, WEBRTC_CNG_MAX_LPC_ORDER,
@@ -395,7 +395,7 @@
 }
 
 namespace {
-/* Values in |k| are Q15, and |a| Q12. */
+/* Values in `k` are Q15, and `a` Q12. */
 void WebRtcCng_K2a16(int16_t* k, int useOrder, int16_t* a) {
   int16_t any[WEBRTC_SPL_MAX_LPC_ORDER + 1];
   int16_t* aptr;
diff --git a/modules/audio_coding/codecs/cng/webrtc_cng.h b/modules/audio_coding/codecs/cng/webrtc_cng.h
index 563f676..7afd243 100644
--- a/modules/audio_coding/codecs/cng/webrtc_cng.h
+++ b/modules/audio_coding/codecs/cng/webrtc_cng.h
@@ -33,13 +33,13 @@
   void Reset();
 
   // Updates the CN state when a new SID packet arrives.
-  // |sid| is a view of the SID packet without the headers.
+  // `sid` is a view of the SID packet without the headers.
   void UpdateSid(rtc::ArrayView<const uint8_t> sid);
 
   // Generates comfort noise.
-  // |out_data| will be filled with samples - its size determines the number of
-  // samples generated. When |new_period| is true, CNG history will be reset
-  // before any audio is generated.  Returns |false| if outData is too large -
+  // `out_data` will be filled with samples - its size determines the number of
+  // samples generated. When `new_period` is true, CNG history will be reset
+  // before any audio is generated.  Returns `false` if outData is too large -
   // currently 640 bytes (equalling 10ms at 64kHz).
   // TODO(ossu): Specify better limits for the size of out_data. Either let it
   //             be unbounded or limit to 10ms in the current sample rate.
@@ -61,9 +61,9 @@
 class ComfortNoiseEncoder {
  public:
   // Creates a comfort noise encoder.
-  // |fs| selects sample rate: 8000 for narrowband or 16000 for wideband.
-  // |interval| sets the interval at which to generate SID data (in ms).
-  // |quality| selects the number of refl. coeffs. Maximum allowed is 12.
+  // `fs` selects sample rate: 8000 for narrowband or 16000 for wideband.
+  // `interval` sets the interval at which to generate SID data (in ms).
+  // `quality` selects the number of refl. coeffs. Maximum allowed is 12.
   ComfortNoiseEncoder(int fs, int interval, int quality);
   ~ComfortNoiseEncoder() = default;
 
@@ -74,8 +74,8 @@
   // Parameters are set as during construction.
   void Reset(int fs, int interval, int quality);
 
-  // Analyzes background noise from |speech| and appends coefficients to
-  // |output|.  Returns the number of coefficients generated.  If |force_sid| is
+  // Analyzes background noise from `speech` and appends coefficients to
+  // `output`.  Returns the number of coefficients generated.  If `force_sid` is
   // true, a SID frame is forced and the internal sid interval counter is reset.
   // Will fail if the input size is too large (> 640 samples, see
   // ComfortNoiseDecoder::Generate).
diff --git a/modules/audio_coding/codecs/g722/audio_decoder_g722.h b/modules/audio_coding/codecs/g722/audio_decoder_g722.h
index 6911e0b..eeca139 100644
--- a/modules/audio_coding/codecs/g722/audio_decoder_g722.h
+++ b/modules/audio_coding/codecs/g722/audio_decoder_g722.h
@@ -60,11 +60,11 @@
                      SpeechType* speech_type) override;
 
  private:
-  // Splits the stereo-interleaved payload in |encoded| into separate payloads
+  // Splits the stereo-interleaved payload in `encoded` into separate payloads
   // for left and right channels. The separated payloads are written to
-  // |encoded_deinterleaved|, which must hold at least |encoded_len| samples.
+  // `encoded_deinterleaved`, which must hold at least `encoded_len` samples.
   // The left channel starts at offset 0, while the right channel starts at
-  // offset encoded_len / 2 into |encoded_deinterleaved|.
+  // offset encoded_len / 2 into `encoded_deinterleaved`.
   void SplitStereoPacket(const uint8_t* encoded,
                          size_t encoded_len,
                          uint8_t* encoded_deinterleaved);
diff --git a/modules/audio_coding/codecs/ilbc/create_augmented_vec.c b/modules/audio_coding/codecs/ilbc/create_augmented_vec.c
index 8033c95..7e21fae 100644
--- a/modules/audio_coding/codecs/ilbc/create_augmented_vec.c
+++ b/modules/audio_coding/codecs/ilbc/create_augmented_vec.c
@@ -39,7 +39,7 @@
   const int16_t *ppo, *ppi;
   int16_t cbVecTmp[4];
   /* Interpolation starts 4 elements before cbVec+index, but must not start
-     outside |cbVec|; clamping interp_len to stay within |cbVec|.
+     outside `cbVec`; clamping interp_len to stay within `cbVec`.
    */
   size_t interp_len = WEBRTC_SPL_MIN(index, 4);
 
@@ -69,12 +69,12 @@
 
   /* copy the second noninterpolated part */
   ppo = buffer - index;
-  /* |tempbuff2| is declared in WebRtcIlbcfix_GetCbVec and is SUBL+5 elements
-     long. |buffer| points one element past the end of that vector, i.e., at
+  /* `tempbuff2` is declared in WebRtcIlbcfix_GetCbVec and is SUBL+5 elements
+     long. `buffer` points one element past the end of that vector, i.e., at
      tempbuff2+SUBL+5. Since ppo=buffer-index, we cannot read any more than
-     |index| elements from |ppo|.
+     `index` elements from `ppo`.
 
-     |cbVec| is declared to be SUBL elements long in WebRtcIlbcfix_CbConstruct.
+     `cbVec` is declared to be SUBL elements long in WebRtcIlbcfix_CbConstruct.
      Therefore, we can only write SUBL-index elements to cbVec+index.
 
      These two conditions limit the number of elements to copy.
diff --git a/modules/audio_coding/codecs/ilbc/get_cd_vec.c b/modules/audio_coding/codecs/ilbc/get_cd_vec.c
index 145cb96..e9cd200 100644
--- a/modules/audio_coding/codecs/ilbc/get_cd_vec.c
+++ b/modules/audio_coding/codecs/ilbc/get_cd_vec.c
@@ -99,7 +99,7 @@
         // We're going to fill in cbveclen + 5 elements of tempbuff2 in
         // WebRtcSpl_FilterMAFastQ12, less than the SUBL + 5 elements we'll be
         // using in WebRtcIlbcfix_CreateAugmentedVec. This error is caused by
-        // bad values in |index| (which come from the encoded stream). Tell the
+        // bad values in `index` (which come from the encoded stream). Tell the
         // caller that things went south, and that the decoder state is now
         // corrupt (because it's half-way through an update that we can't
         // complete).
diff --git a/modules/audio_coding/codecs/isac/fix/source/entropy_coding.c b/modules/audio_coding/codecs/isac/fix/source/entropy_coding.c
index cb15445..842e77f 100644
--- a/modules/audio_coding/codecs/isac/fix/source/entropy_coding.c
+++ b/modules/audio_coding/codecs/isac/fix/source/entropy_coding.c
@@ -831,15 +831,15 @@
 two matrixes, multiply them, and write the results into an output buffer.
 
 Note that two factors (or, multipliers) determine the initialization values of
-the variable |matrix1_index| in the code. The relationship is
-|matrix1_index| = |matrix1_index_factor1| * |matrix1_index_factor2|, where
-|matrix1_index_factor1| is given by the argument while |matrix1_index_factor2|
-is determined by the value of argument |matrix1_index_init_case|;
-|matrix1_index_factor2| is the value of the outmost loop counter j (when
-|matrix1_index_init_case| is 0), or the value of the middle loop counter k (when
-|matrix1_index_init_case| is non-zero).
+the variable `matrix1_index` in the code. The relationship is
+`matrix1_index` = `matrix1_index_factor1` * `matrix1_index_factor2`, where
+`matrix1_index_factor1` is given by the argument while `matrix1_index_factor2`
+is determined by the value of argument `matrix1_index_init_case`;
+`matrix1_index_factor2` is the value of the outmost loop counter j (when
+`matrix1_index_init_case` is 0), or the value of the middle loop counter k (when
+`matrix1_index_init_case` is non-zero).
 
-|matrix0_index| is determined the same way.
+`matrix0_index` is determined the same way.
 
 Arguments:
   matrix0[]:                 matrix0 data in Q15 domain.
diff --git a/modules/audio_coding/codecs/isac/fix/source/filters.c b/modules/audio_coding/codecs/isac/fix/source/filters.c
index 85860f7..838ba4b 100644
--- a/modules/audio_coding/codecs/isac/fix/source/filters.c
+++ b/modules/audio_coding/codecs/isac/fix/source/filters.c
@@ -75,7 +75,7 @@
       a = WEBRTC_SPL_MUL_16_32_RSFT16(InOut16[n], APSectionFactors[j]); //Q0*Q31=Q31 shifted 16 gives Q15
       a <<= 1;  // Q15 -> Q16
       b = WebRtcSpl_AddSatW32(a, FilterState[j]);  //Q16+Q16=Q16
-      // |a| in Q15 (Q0*Q31=Q31 shifted 16 gives Q15).
+      // `a` in Q15 (Q0*Q31=Q31 shifted 16 gives Q15).
       a = WEBRTC_SPL_MUL_16_32_RSFT16(b >> 16, -APSectionFactors[j]);
       // FilterState[j]: Q15<<1 + Q0<<16 = Q16 + Q16 = Q16
       FilterState[j] = WebRtcSpl_AddSatW32(a << 1, (uint32_t)InOut16[n] << 16);
diff --git a/modules/audio_coding/codecs/isac/fix/source/isacfix.c b/modules/audio_coding/codecs/isac/fix/source/isacfix.c
index 067d8f3..9a66591 100644
--- a/modules/audio_coding/codecs/isac/fix/source/isacfix.c
+++ b/modules/audio_coding/codecs/isac/fix/source/isacfix.c
@@ -34,7 +34,7 @@
 MatrixProduct1 WebRtcIsacfix_MatrixProduct1;
 MatrixProduct2 WebRtcIsacfix_MatrixProduct2;
 
-/* This method assumes that |stream_size_bytes| is in valid range,
+/* This method assumes that `stream_size_bytes` is in valid range,
  * i.e. >= 0 && <=  STREAM_MAXW16_60MS
  */
 static void InitializeDecoderBitstream(size_t stream_size_bytes,
@@ -294,8 +294,8 @@
   return statusInit;
 }
 
-/* Read the given number of bytes of big-endian 16-bit integers from |src| and
-   write them to |dest| in host endian. If |nbytes| is odd, the number of
+/* Read the given number of bytes of big-endian 16-bit integers from `src` and
+   write them to `dest` in host endian. If `nbytes` is odd, the number of
    output elements is rounded up, and the least significant byte of the last
    element is set to 0. */
 static void read_be16(const uint8_t* src, size_t nbytes, uint16_t* dest) {
@@ -306,8 +306,8 @@
     dest[nbytes / 2] = src[nbytes - 1] << 8;
 }
 
-/* Read the given number of bytes of host-endian 16-bit integers from |src| and
-   write them to |dest| in big endian. If |nbytes| is odd, the number of source
+/* Read the given number of bytes of host-endian 16-bit integers from `src` and
+   write them to `dest` in big endian. If `nbytes` is odd, the number of source
    elements is rounded up (but only the most significant byte of the last
    element is used), and the number of output bytes written will be
    nbytes + 1. */
diff --git a/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.c b/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.c
index b538085..f151cd1 100644
--- a/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.c
+++ b/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.c
@@ -663,7 +663,7 @@
     /* less noise for lower frequencies, by filtering/scaling autocorrelation sequences */
 
     /* Calculate corrlo2[0] = tmpQQlo * corrlo[0] - 2.0*tmpQQlo * corrlo[1];*/
-    // |corrlo2QQ| in Q(QdomLO-5).
+    // `corrlo2QQ` in Q(QdomLO-5).
     corrlo2QQ[0] = (WEBRTC_SPL_MUL_16_32_RSFT16(tmpQQlo, corrloQQ[0]) >> 1) -
         (WEBRTC_SPL_MUL_16_32_RSFT16(aaQ14, corrloQQ[1]) >> 2);
 
@@ -721,12 +721,12 @@
           tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha, tmp);
         } else if ((sh-shMem)<7){
           tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufLoQQ[n], shMem); // Shift up CorrBufLoQQ as much as possible
-          // Shift |alpha| the number of times required to get |tmp| in QdomLO.
+          // Shift `alpha` the number of times required to get `tmp` in QdomLO.
           tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha << (sh - shMem), tmp);
         } else {
           tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufLoQQ[n], shMem); // Shift up CorrBufHiQQ as much as possible
-          // Shift |alpha| as much as possible without overflow the number of
-          // times required to get |tmp| in QdomLO.
+          // Shift `alpha` as much as possible without overflow the number of
+          // times required to get `tmp` in QdomLO.
           tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha << 6, tmp);
           tmpCorr = corrloQQ[n] >> (sh - shMem - 6);
           tmp = tmp + tmpCorr;
@@ -774,7 +774,7 @@
           maskdata->CorrBufHiQdom[n] = QdomHI;
         } else if ((sh-shMem)<7) {
           tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufHiQQ[n], shMem); // Shift up CorrBufHiQQ as much as possible
-          // Shift |alpha| the number of times required to get |tmp| in QdomHI.
+          // Shift `alpha` the number of times required to get `tmp` in QdomHI.
           tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha << (sh - shMem), tmp);
           tmpCorr = corrhiQQ[n];
           tmp = tmp + tmpCorr;
@@ -782,8 +782,8 @@
           maskdata->CorrBufHiQdom[n] = QdomHI;
         } else {
           tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufHiQQ[n], shMem); // Shift up CorrBufHiQQ as much as possible
-          // Shift |alpha| as much as possible without overflow the number of
-          // times required to get |tmp| in QdomHI.
+          // Shift `alpha` as much as possible without overflow the number of
+          // times required to get `tmp` in QdomHI.
           tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha << 6, tmp);
           tmpCorr = corrhiQQ[n] >> (sh - shMem - 6);
           tmp = tmp + tmpCorr;
@@ -919,7 +919,7 @@
 
       tmp32a = varscaleQ14 >> 1;  // H_T_HQ19=65536 (16-17=-1)
 
-      ssh = sh_hi >> 1;  // |sqrt_nrg| is in Qssh.
+      ssh = sh_hi >> 1;  // `sqrt_nrg` is in Qssh.
       sh = ssh - 14;
       tmp32b = WEBRTC_SPL_SHIFT_W32(tmp32a, sh); // Q14->Qssh
       tmp32c = sqrt_nrg + tmp32b;  // Qssh  (denominator)
diff --git a/modules/audio_coding/codecs/isac/isac_webrtc_api_test.cc b/modules/audio_coding/codecs/isac/isac_webrtc_api_test.cc
index a2e1e08..40381d8 100644
--- a/modules/audio_coding/codecs/isac/isac_webrtc_api_test.cc
+++ b/modules/audio_coding/codecs/isac/isac_webrtc_api_test.cc
@@ -203,7 +203,7 @@
       e->Encode(/*rtp_timestamp=*/0, AudioFrameToView(in), &encoded);
       num_bytes += encoded.size();
     }
-    // Inverse of the duration of |kNumFrames| 10 ms frames (unit: seconds^-1).
+    // Inverse of the duration of `kNumFrames` 10 ms frames (unit: seconds^-1).
     constexpr float kAudioDurationInv = 100.f / kNumFrames;
     const int measured_bitrate_bps = 8 * num_bytes * kAudioDurationInv;
     EXPECT_LT(measured_bitrate_bps, bitrate_bps + 2000);  // Max 2 kbps extra.
diff --git a/modules/audio_coding/codecs/isac/main/include/isac.h b/modules/audio_coding/codecs/isac/main/include/isac.h
index 3d2caef..f45bbb3 100644
--- a/modules/audio_coding/codecs/isac/main/include/isac.h
+++ b/modules/audio_coding/codecs/isac/main/include/isac.h
@@ -606,7 +606,7 @@
                          int16_t* decoded,
                          int16_t* speechType);
 
-/* If |inst| is a decoder but not an encoder: tell it what sample rate the
+/* If `inst` is a decoder but not an encoder: tell it what sample rate the
    encoder is using, for bandwidth estimation purposes. */
 void WebRtcIsac_SetEncSampRateInDecoder(ISACStruct* inst, int sample_rate_hz);
 
diff --git a/modules/audio_coding/codecs/isac/main/source/entropy_coding.c b/modules/audio_coding/codecs/isac/main/source/entropy_coding.c
index 6692a51..188c8f6 100644
--- a/modules/audio_coding/codecs/isac/main/source/entropy_coding.c
+++ b/modules/audio_coding/codecs/isac/main/source/entropy_coding.c
@@ -1446,7 +1446,7 @@
     index[k] = WebRtcIsac_kQArRcInitIndex[k];
     // The safe-guards in following while conditions are to suppress gcc 4.8.3
     // warnings, Issue 2888. Otherwise, first and last elements of
-    // |WebRtcIsac_kQArBoundaryLevels| are such that the following search
+    // `WebRtcIsac_kQArBoundaryLevels` are such that the following search
     // *never* cause an out-of-boundary read.
     if (RCQ15[k] > WebRtcIsac_kQArBoundaryLevels[index[k]]) {
       while (index[k] + 1 < NUM_AR_RC_QUANT_BAUNDARY &&
diff --git a/modules/audio_coding/codecs/isac/main/source/pitch_filter.c b/modules/audio_coding/codecs/isac/main/source/pitch_filter.c
index 61cd533..899d842 100644
--- a/modules/audio_coding/codecs/isac/main/source/pitch_filter.c
+++ b/modules/audio_coding/codecs/isac/main/source/pitch_filter.c
@@ -25,8 +25,8 @@
  * Post-filtering:
  *   y(z) = x(z) - damper(z) * gain * (x(z) + y(z)) * z ^ (-lag);
  *
- * Note that |lag| is a floating number so we perform an interpolation to
- * obtain the correct |lag|.
+ * Note that `lag` is a floating number so we perform an interpolation to
+ * obtain the correct `lag`.
  *
  */
 
@@ -86,7 +86,7 @@
  * buffer           : a buffer where the sum of previous inputs and outputs
  *                    are stored.
  * damper_state     : the state of the damping filter. The filter is defined by
- *                    |kDampFilter|.
+ *                    `kDampFilter`.
  * interpol_coeff   : pointer to a set of coefficient which are used to utilize
  *                    fractional pitch by interpolation.
  * gain             : pitch-gain to be applied to the current segment of input.
@@ -353,7 +353,7 @@
 
   if ((mode == kPitchFilterPreGain) || (mode == kPitchFilterPreLa)) {
     /* Filter the lookahead segment, this is treated as the last sub-frame. So
-     * set |pf_param| to last sub-frame. */
+     * set `pf_param` to last sub-frame. */
     filter_parameters.sub_frame = PITCH_SUBFRAMES - 1;
     filter_parameters.num_samples = QLOOKAHEAD;
     FilterSegment(in_data, &filter_parameters, out_data, out_dg);
diff --git a/modules/audio_coding/codecs/legacy_encoded_audio_frame.cc b/modules/audio_coding/codecs/legacy_encoded_audio_frame.cc
index d9efc21..dacf325 100644
--- a/modules/audio_coding/codecs/legacy_encoded_audio_frame.cc
+++ b/modules/audio_coding/codecs/legacy_encoded_audio_frame.cc
@@ -59,7 +59,7 @@
         new LegacyEncodedAudioFrame(decoder, std::move(payload)));
     results.emplace_back(timestamp, 0, std::move(frame));
   } else {
-    // Reduce the split size by half as long as |split_size_bytes| is at least
+    // Reduce the split size by half as long as `split_size_bytes` is at least
     // twice the minimum chunk size (so that the resulting size is at least as
     // large as the minimum chunk size).
     while (split_size_bytes >= 2 * min_chunk_size) {
diff --git a/modules/audio_coding/codecs/opus/audio_encoder_opus.cc b/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
index 7c62e98..e4d3b9e 100644
--- a/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
+++ b/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
@@ -564,9 +564,9 @@
 void AudioEncoderOpusImpl::SetReceiverFrameLengthRange(
     int min_frame_length_ms,
     int max_frame_length_ms) {
-  // Ensure that |SetReceiverFrameLengthRange| is called before
-  // |EnableAudioNetworkAdaptor|, otherwise we need to recreate
-  // |audio_network_adaptor_|, which is not a needed use case.
+  // Ensure that `SetReceiverFrameLengthRange` is called before
+  // `EnableAudioNetworkAdaptor`, otherwise we need to recreate
+  // `audio_network_adaptor_`, which is not a needed use case.
   RTC_DCHECK(!audio_network_adaptor_);
   FindSupportedFrameLengths(min_frame_length_ms, max_frame_length_ms,
                             &config_.supported_frame_lengths_ms);
diff --git a/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc b/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc
index f1953ea..daca6aa 100644
--- a/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc
+++ b/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc
@@ -228,8 +228,8 @@
 
 TEST_P(AudioEncoderOpusTest, SetReceiverFrameLengthRange) {
   auto states = CreateCodec(sample_rate_hz_, 2);
-  // Before calling to |SetReceiverFrameLengthRange|,
-  // |supported_frame_lengths_ms| should contain only the frame length being
+  // Before calling to `SetReceiverFrameLengthRange`,
+  // `supported_frame_lengths_ms` should contain only the frame length being
   // used.
   using ::testing::ElementsAre;
   EXPECT_THAT(states->encoder->supported_frame_lengths_ms(),
@@ -348,7 +348,7 @@
   // will fail.
   constexpr float kPacketLossFraction_1 = 0.02f;
   constexpr float kPacketLossFraction_2 = 0.198f;
-  // |kSecondSampleTimeMs| is chosen to ease the calculation since
+  // `kSecondSampleTimeMs` is chosen to ease the calculation since
   // 0.9999 ^ 6931 = 0.5.
   constexpr int64_t kSecondSampleTimeMs = 6931;
 
@@ -380,7 +380,7 @@
   states->encoder->OnReceivedUplinkBandwidth(kDefaultOpusRate * 2,
                                              absl::nullopt);
 
-  // Since |OnReceivedOverhead| has not been called, the codec bitrate should
+  // Since `OnReceivedOverhead` has not been called, the codec bitrate should
   // not change.
   EXPECT_EQ(kDefaultOpusRate, states->encoder->GetTargetBitrate());
 }
diff --git a/modules/audio_coding/codecs/opus/opus_fec_test.cc b/modules/audio_coding/codecs/opus/opus_fec_test.cc
index 1923647..0636935 100644
--- a/modules/audio_coding/codecs/opus/opus_fec_test.cc
+++ b/modules/audio_coding/codecs/opus/opus_fec_test.cc
@@ -218,8 +218,8 @@
 
       time_now_ms += block_duration_ms_;
 
-      // |data_pointer_| is incremented and wrapped across
-      // |loop_length_samples_|.
+      // `data_pointer_` is incremented and wrapped across
+      // `loop_length_samples_`.
       data_pointer_ = (data_pointer_ + block_length_sample_ * channels_) %
                       loop_length_samples_;
     }
diff --git a/modules/audio_coding/codecs/opus/opus_interface.cc b/modules/audio_coding/codecs/opus/opus_interface.cc
index f684452..0337919 100644
--- a/modules/audio_coding/codecs/opus/opus_interface.cc
+++ b/modules/audio_coding/codecs/opus/opus_interface.cc
@@ -574,8 +574,8 @@
 
 /* For decoder to determine if it is to output speech or comfort noise. */
 static int16_t DetermineAudioType(OpusDecInst* inst, size_t encoded_bytes) {
-  // Audio type becomes comfort noise if |encoded_byte| is 1 and keeps
-  // to be so if the following |encoded_byte| are 0 or 1.
+  // Audio type becomes comfort noise if `encoded_byte` is 1 and keeps
+  // to be so if the following `encoded_byte` are 0 or 1.
   if (encoded_bytes == 0 && inst->in_dtx_mode) {
     return 2;  // Comfort noise.
   } else if (encoded_bytes == 1 || encoded_bytes == 2) {
@@ -595,7 +595,7 @@
   }
 }
 
-/* |frame_size| is set to maximum Opus frame size in the normal case, and
+/* `frame_size` is set to maximum Opus frame size in the normal case, and
  * is set to the number of samples needed for PLC in case of losses.
  * It is up to the caller to make sure the value is correct. */
 static int DecodeNative(OpusDecInst* inst,
@@ -632,9 +632,9 @@
       FrameSizePerChannel(kWebRtcOpusPlcFrameSizeMs, inst->sample_rate_hz);
 
   if (inst->plc_use_prev_decoded_samples) {
-    /* The number of samples we ask for is |number_of_lost_frames| times
-     * |prev_decoded_samples_|. Limit the number of samples to maximum
-     * |MaxFrameSizePerChannel()|. */
+    /* The number of samples we ask for is `number_of_lost_frames` times
+     * `prev_decoded_samples_`. Limit the number of samples to maximum
+     * `MaxFrameSizePerChannel()`. */
     plc_samples = inst->prev_decoded_samples;
     const int max_samples_per_channel =
         MaxFrameSizePerChannel(inst->sample_rate_hz);
@@ -729,9 +729,9 @@
 
 int WebRtcOpus_PlcDuration(OpusDecInst* inst) {
   if (inst->plc_use_prev_decoded_samples) {
-    /* The number of samples we ask for is |number_of_lost_frames| times
-     * |prev_decoded_samples_|. Limit the number of samples to maximum
-     * |MaxFrameSizePerChannel()|. */
+    /* The number of samples we ask for is `number_of_lost_frames` times
+     * `prev_decoded_samples_`. Limit the number of samples to maximum
+     * `MaxFrameSizePerChannel()`. */
     const int plc_samples = inst->prev_decoded_samples;
     const int max_samples_per_channel =
         MaxFrameSizePerChannel(inst->sample_rate_hz);
@@ -826,8 +826,8 @@
   // as binary values with uniform probability, they can be extracted directly
   // from the most significant bits of the first byte of compressed data.
   for (int n = 0; n < channels; n++) {
-    // The LBRR bit for channel 1 is on the (|silk_frames| + 1)-th bit, and
-    // that of channel 2 is on the |(|silk_frames| + 1) * 2 + 1|-th bit.
+    // The LBRR bit for channel 1 is on the (`silk_frames` + 1)-th bit, and
+    // that of channel 2 is on the |(`silk_frames` + 1) * 2 + 1|-th bit.
     if (frame_data[0][0] & (0x80 >> ((n + 1) * (silk_frames + 1) - 1)))
       return 1;
   }
diff --git a/modules/audio_coding/codecs/opus/opus_unittest.cc b/modules/audio_coding/codecs/opus/opus_unittest.cc
index 80cab50..b507a32 100644
--- a/modules/audio_coding/codecs/opus/opus_unittest.cc
+++ b/modules/audio_coding/codecs/opus/opus_unittest.cc
@@ -115,10 +115,10 @@
 
   void TestCbrEffect(bool dtx, int block_length_ms);
 
-  // Prepare |speech_data_| for encoding, read from a hard-coded file.
+  // Prepare `speech_data_` for encoding, read from a hard-coded file.
   // After preparation, |speech_data_.GetNextBlock()| returns a pointer to a
-  // block of |block_length_ms| milliseconds. The data is looped every
-  // |loop_length_ms| milliseconds.
+  // block of `block_length_ms` milliseconds. The data is looped every
+  // `loop_length_ms` milliseconds.
   void PrepareSpeechData(int block_length_ms, int loop_length_ms);
 
   int EncodeDecode(WebRtcOpusEncInst* encoder,
@@ -310,24 +310,24 @@
   // one with an arbitrary size and the other of 1-byte, then stops sending for
   // a certain number of frames.
 
-  // |max_dtx_frames| is the maximum number of frames Opus can stay in DTX.
+  // `max_dtx_frames` is the maximum number of frames Opus can stay in DTX.
   // TODO(kwiberg): Why does this number depend on the encoding sample rate?
   const int max_dtx_frames =
       (encoder_sample_rate_hz_ == 16000 ? 800 : 400) / block_length_ms + 1;
 
-  // We run |kRunTimeMs| milliseconds of pure silence.
+  // We run `kRunTimeMs` milliseconds of pure silence.
   const int kRunTimeMs = 4500;
 
-  // We check that, after a |kCheckTimeMs| milliseconds (given that the CNG in
+  // We check that, after a `kCheckTimeMs` milliseconds (given that the CNG in
   // Opus needs time to adapt), the absolute values of DTX decoded signal are
-  // bounded by |kOutputValueBound|.
+  // bounded by `kOutputValueBound`.
   const int kCheckTimeMs = 4000;
 
 #if defined(OPUS_FIXED_POINT)
   // Fixed-point Opus generates a random (comfort) noise, which has a less
   // predictable value bound than its floating-point Opus. This value depends on
   // input signal, and the time window for checking the output values (between
-  // |kCheckTimeMs| and |kRunTimeMs|).
+  // `kCheckTimeMs` and `kRunTimeMs`).
   const uint16_t kOutputValueBound = 30;
 
 #else
@@ -336,7 +336,7 @@
 
   int time = 0;
   while (time < kRunTimeMs) {
-    // DTX mode is maintained for maximum |max_dtx_frames| frames.
+    // DTX mode is maintained for maximum `max_dtx_frames` frames.
     int i = 0;
     for (; i < max_dtx_frames; ++i) {
       time += block_length_ms;
diff --git a/modules/audio_coding/codecs/opus/test/audio_ring_buffer.h b/modules/audio_coding/codecs/opus/test/audio_ring_buffer.h
index a89dfd8..a280ca2 100644
--- a/modules/audio_coding/codecs/opus/test/audio_ring_buffer.h
+++ b/modules/audio_coding/codecs/opus/test/audio_ring_buffer.h
@@ -29,11 +29,11 @@
   AudioRingBuffer(size_t channels, size_t max_frames);
   ~AudioRingBuffer();
 
-  // Copies |data| to the buffer and advances the write pointer. |channels| must
+  // Copies `data` to the buffer and advances the write pointer. `channels` must
   // be the same as at creation time.
   void Write(const float* const* data, size_t channels, size_t frames);
 
-  // Copies from the buffer to |data| and advances the read pointer. |channels|
+  // Copies from the buffer to `data` and advances the read pointer. `channels`
   // must be the same as at creation time.
   void Read(float* const* data, size_t channels, size_t frames);
 
diff --git a/modules/audio_coding/codecs/opus/test/blocker.cc b/modules/audio_coding/codecs/opus/test/blocker.cc
index 7f102b5..33406ce 100644
--- a/modules/audio_coding/codecs/opus/test/blocker.cc
+++ b/modules/audio_coding/codecs/opus/test/blocker.cc
@@ -16,7 +16,7 @@
 
 namespace {
 
-// Adds |a| and |b| frame by frame into |result| (basically matrix addition).
+// Adds `a` and `b` frame by frame into `result` (basically matrix addition).
 void AddFrames(const float* const* a,
                size_t a_start_index,
                const float* const* b,
@@ -33,7 +33,7 @@
   }
 }
 
-// Copies |src| into |dst| channel by channel.
+// Copies `src` into `dst` channel by channel.
 void CopyFrames(const float* const* src,
                 size_t src_start_index,
                 size_t num_frames,
@@ -46,7 +46,7 @@
   }
 }
 
-// Moves |src| into |dst| channel by channel.
+// Moves `src` into `dst` channel by channel.
 void MoveFrames(const float* const* src,
                 size_t src_start_index,
                 size_t num_frames,
@@ -69,8 +69,8 @@
   }
 }
 
-// Pointwise multiplies each channel of |frames| with |window|. Results are
-// stored in |frames|.
+// Pointwise multiplies each channel of `frames` with `window`. Results are
+// stored in `frames`.
 void ApplyWindow(const float* window,
                  size_t num_frames,
                  size_t num_channels,
@@ -134,7 +134,7 @@
 // On each call to ProcessChunk():
 // 1. New input gets read into sections _b_ and _c_ of the input buffer.
 // 2. We block starting from frame_offset.
-// 3. We block until we reach a block |bl| that doesn't contain any frames
+// 3. We block until we reach a block `bl` that doesn't contain any frames
 //    from sections _a_ or _b_ of the input buffer.
 // 4. We window the current block, fire the callback for processing, window
 //    again, and overlap/add to the output buffer.
@@ -142,7 +142,7 @@
 // 6. For both the input and the output buffers, we copy section _c_ into
 //    section _a_.
 // 7. We set the new frame_offset to be the difference between the first frame
-//    of |bl| and the border between sections _b_ and _c_.
+//    of `bl` and the border between sections _b_ and _c_.
 //
 // When block_size > chunk_size the input and output buffers look like this:
 //
@@ -153,13 +153,13 @@
 // On each call to ProcessChunk():
 // The procedure is the same as above, except for:
 // 1. New input gets read into section _c_ of the input buffer.
-// 3. We block until we reach a block |bl| that doesn't contain any frames
+// 3. We block until we reach a block `bl` that doesn't contain any frames
 //    from section _a_ of the input buffer.
 // 5. We copy section _a_ of the output buffer into output.
 // 6. For both the input and the output buffers, we copy sections _b_ and _c_
 //    into section _a_ and _b_.
 // 7. We set the new frame_offset to be the difference between the first frame
-//    of |bl| and the border between sections _a_ and _b_.
+//    of `bl` and the border between sections _a_ and _b_.
 //
 // * delay here refers to inintial_delay_
 //
diff --git a/modules/audio_coding/codecs/opus/test/blocker.h b/modules/audio_coding/codecs/opus/test/blocker.h
index 26177bc..59b7e29 100644
--- a/modules/audio_coding/codecs/opus/test/blocker.h
+++ b/modules/audio_coding/codecs/opus/test/blocker.h
@@ -39,7 +39,7 @@
 // of audio, which is not a power of 2. Blocker allows us to specify the
 // transform and all other necessary processing via the Process() callback
 // function without any constraints on the transform-size
-// (read: |block_size_|) or received-audio-size (read: |chunk_size_|).
+// (read: `block_size_`) or received-audio-size (read: `chunk_size_`).
 // We handle this for the multichannel audio case, allowing for different
 // numbers of input and output channels (for example, beamforming takes 2 or
 // more input channels and returns 1 output channel). Audio signals are
@@ -53,8 +53,8 @@
 //   sending back a processed chunk
 //
 // To use blocker:
-// 1. Impelment a BlockerCallback object |bc|.
-// 2. Instantiate a Blocker object |b|, passing in |bc|.
+// 1. Impelment a BlockerCallback object `bc`.
+// 2. Instantiate a Blocker object `b`, passing in `bc`.
 // 3. As you receive audio, call b.ProcessChunk() to get processed audio.
 //
 // A small amount of delay is added to the first received chunk to deal with
@@ -101,7 +101,7 @@
   // input and output buffers are responsible for saving those frames between
   // calls to ProcessChunk().
   //
-  // Both contain |initial delay| + |chunk_size| frames. The input is a fairly
+  // Both contain |initial delay| + `chunk_size` frames. The input is a fairly
   // standard FIFO, but due to the overlap-add it's harder to use an
   // AudioRingBuffer for the output.
   AudioRingBuffer input_buffer_;
@@ -116,7 +116,7 @@
   std::unique_ptr<float[]> window_;
 
   // The amount of frames between the start of contiguous blocks. For example,
-  // |shift_amount_| = |block_size_| / 2 for a Hann window.
+  // `shift_amount_` = `block_size_` / 2 for a Hann window.
   size_t shift_amount_;
 
   BlockerCallback* callback_;
diff --git a/modules/audio_coding/codecs/opus/test/lapped_transform.h b/modules/audio_coding/codecs/opus/test/lapped_transform.h
index 3620df3..bb25c34 100644
--- a/modules/audio_coding/codecs/opus/test/lapped_transform.h
+++ b/modules/audio_coding/codecs/opus/test/lapped_transform.h
@@ -84,11 +84,11 @@
                                    std::complex<float>* const* out_block) = 0;
   };
 
-  // Construct a transform instance. |chunk_length| is the number of samples in
-  // each channel. |window| defines the window, owned by the caller (a copy is
-  // made internally); |window| should have length equal to |block_length|.
-  // |block_length| defines the length of a block, in samples.
-  // |shift_amount| is in samples. |callback| is the caller-owned audio
+  // Construct a transform instance. `chunk_length` is the number of samples in
+  // each channel. `window` defines the window, owned by the caller (a copy is
+  // made internally); `window` should have length equal to `block_length`.
+  // `block_length` defines the length of a block, in samples.
+  // `shift_amount` is in samples. `callback` is the caller-owned audio
   // processing function called for each block of the input chunk.
   LappedTransform(size_t num_in_channels,
                   size_t num_out_channels,
@@ -99,10 +99,10 @@
                   Callback* callback);
   ~LappedTransform();
 
-  // Main audio processing helper method. Internally slices |in_chunk| into
+  // Main audio processing helper method. Internally slices `in_chunk` into
   // blocks, transforms them to frequency domain, calls the callback for each
   // block and returns a de-blocked time domain chunk of audio through
-  // |out_chunk|. Both buffers are caller-owned.
+  // `out_chunk`. Both buffers are caller-owned.
   void ProcessChunk(const float* const* in_chunk, float* const* out_chunk);
 
   // Get the chunk length.
@@ -132,8 +132,8 @@
 
   // Returns the initial delay.
   //
-  // This is the delay introduced by the |blocker_| to be able to get and return
-  // chunks of |chunk_length|, but process blocks of |block_length|.
+  // This is the delay introduced by the `blocker_` to be able to get and return
+  // chunks of `chunk_length`, but process blocks of `block_length`.
   size_t initial_delay() const { return blocker_.initial_delay(); }
 
  private:
diff --git a/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc b/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc
index c72768e..1ca7a84 100644
--- a/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc
+++ b/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc
@@ -145,7 +145,7 @@
     info.redundant.push_back(it->first);
   }
 
-  // |info| will be implicitly cast to an EncodedInfoLeaf struct, effectively
+  // `info` will be implicitly cast to an EncodedInfoLeaf struct, effectively
   // discarding the (empty) vector of redundant information. This is
   // intentional.
   if (header_length_bytes > 0) {
diff --git a/modules/audio_coding/codecs/tools/audio_codec_speed_test.h b/modules/audio_coding/codecs/tools/audio_codec_speed_test.h
index 59c2f16..c5f1d7c 100644
--- a/modules/audio_coding/codecs/tools/audio_codec_speed_test.h
+++ b/modules/audio_coding/codecs/tools/audio_codec_speed_test.h
@@ -31,9 +31,9 @@
   virtual void TearDown();
 
   // EncodeABlock(...) does the following:
-  // 1. encodes a block of audio, saved in |in_data|,
-  // 2. save the bit stream to |bit_stream| of |max_bytes| bytes in size,
-  // 3. assign |encoded_bytes| with the length of the bit stream (in bytes),
+  // 1. encodes a block of audio, saved in `in_data`,
+  // 2. save the bit stream to `bit_stream` of `max_bytes` bytes in size,
+  // 3. assign `encoded_bytes` with the length of the bit stream (in bytes),
   // 4. return the cost of time (in millisecond) spent on actual encoding.
   virtual float EncodeABlock(int16_t* in_data,
                              uint8_t* bit_stream,
@@ -41,15 +41,15 @@
                              size_t* encoded_bytes) = 0;
 
   // DecodeABlock(...) does the following:
-  // 1. decodes the bit stream in |bit_stream| with a length of |encoded_bytes|
+  // 1. decodes the bit stream in `bit_stream` with a length of `encoded_bytes`
   // (in bytes),
-  // 2. save the decoded audio in |out_data|,
+  // 2. save the decoded audio in `out_data`,
   // 3. return the cost of time (in millisecond) spent on actual decoding.
   virtual float DecodeABlock(const uint8_t* bit_stream,
                              size_t encoded_bytes,
                              int16_t* out_data) = 0;
 
-  // Encoding and decode an audio of |audio_duration| (in seconds) and
+  // Encoding and decode an audio of `audio_duration` (in seconds) and
   // record the runtime for encoding and decoding separately.
   void EncodeDecode(size_t audio_duration);
 
diff --git a/modules/audio_coding/include/audio_coding_module.h b/modules/audio_coding/include/audio_coding_module.h
index 102e2de..7551814 100644
--- a/modules/audio_coding/include/audio_coding_module.h
+++ b/modules/audio_coding/include/audio_coding_module.h
@@ -83,9 +83,9 @@
   //   Sender
   //
 
-  // |modifier| is called exactly once with one argument: a pointer to the
+  // `modifier` is called exactly once with one argument: a pointer to the
   // unique_ptr that holds the current encoder (which is null if there is no
-  // current encoder). For the duration of the call, |modifier| has exclusive
+  // current encoder). For the duration of the call, `modifier` has exclusive
   // access to the unique_ptr; it may call the encoder, steal the encoder and
   // replace it with another encoder or with nullptr, etc.
   virtual void ModifyEncoder(
diff --git a/modules/audio_coding/neteq/accelerate.cc b/modules/audio_coding/neteq/accelerate.cc
index 954b148..f4ef6cd 100644
--- a/modules/audio_coding/neteq/accelerate.cc
+++ b/modules/audio_coding/neteq/accelerate.cc
@@ -57,12 +57,12 @@
   if ((best_correlation > correlation_threshold) || !active_speech) {
     // Do accelerate operation by overlap add.
 
-    // Pre-calculate common multiplication with |fs_mult_|.
+    // Pre-calculate common multiplication with `fs_mult_`.
     // 120 corresponds to 15 ms.
     size_t fs_mult_120 = fs_mult_ * 120;
 
     if (fast_mode) {
-      // Fit as many multiples of |peak_index| as possible in fs_mult_120.
+      // Fit as many multiples of `peak_index` as possible in fs_mult_120.
       // TODO(henrik.lundin) Consider finding multiple correlation peaks and
       // pick the one with the longest correlation lag in this case.
       peak_index = (fs_mult_120 / peak_index) * peak_index;
@@ -72,11 +72,11 @@
     // Copy first part; 0 to 15 ms.
     output->PushBackInterleaved(
         rtc::ArrayView<const int16_t>(input, fs_mult_120 * num_channels_));
-    // Copy the |peak_index| starting at 15 ms to |temp_vector|.
+    // Copy the `peak_index` starting at 15 ms to `temp_vector`.
     AudioMultiVector temp_vector(num_channels_);
     temp_vector.PushBackInterleaved(rtc::ArrayView<const int16_t>(
         &input[fs_mult_120 * num_channels_], peak_index * num_channels_));
-    // Cross-fade |temp_vector| onto the end of |output|.
+    // Cross-fade `temp_vector` onto the end of `output`.
     output->CrossFade(temp_vector, peak_index);
     // Copy the last unmodified part, 15 ms + pitch period until the end.
     output->PushBackInterleaved(rtc::ArrayView<const int16_t>(
diff --git a/modules/audio_coding/neteq/accelerate.h b/modules/audio_coding/neteq/accelerate.h
index 124b633..e03f609 100644
--- a/modules/audio_coding/neteq/accelerate.h
+++ b/modules/audio_coding/neteq/accelerate.h
@@ -34,10 +34,10 @@
       : TimeStretch(sample_rate_hz, num_channels, background_noise) {}
 
   // This method performs the actual Accelerate operation. The samples are
-  // read from |input|, of length |input_length| elements, and are written to
-  // |output|. The number of samples removed through time-stretching is
-  // is provided in the output |length_change_samples|. The method returns
-  // the outcome of the operation as an enumerator value. If |fast_accelerate|
+  // read from `input`, of length `input_length` elements, and are written to
+  // `output`. The number of samples removed through time-stretching is
+  // is provided in the output `length_change_samples`. The method returns
+  // the outcome of the operation as an enumerator value. If `fast_accelerate`
   // is true, the algorithm will relax the requirements on finding strong
   // correlations, and may remove multiple pitch periods if possible.
   ReturnCodes Process(const int16_t* input,
@@ -47,7 +47,7 @@
                       size_t* length_change_samples);
 
  protected:
-  // Sets the parameters |best_correlation| and |peak_index| to suitable
+  // Sets the parameters `best_correlation` and `peak_index` to suitable
   // values when the signal contains no active speech.
   void SetParametersForPassiveSpeech(size_t len,
                                      int16_t* best_correlation,
diff --git a/modules/audio_coding/neteq/audio_decoder_unittest.cc b/modules/audio_coding/neteq/audio_decoder_unittest.cc
index b13fe44..66b99b4 100644
--- a/modules/audio_coding/neteq/audio_decoder_unittest.cc
+++ b/modules/audio_coding/neteq/audio_decoder_unittest.cc
@@ -40,7 +40,7 @@
 constexpr int kOverheadBytesPerPacket = 50;
 
 // The absolute difference between the input and output (the first channel) is
-// compared vs |tolerance|. The parameter |delay| is used to correct for codec
+// compared vs `tolerance`. The parameter `delay` is used to correct for codec
 // delays.
 void CompareInputOutput(const std::vector<int16_t>& input,
                         const std::vector<int16_t>& output,
@@ -56,8 +56,8 @@
   }
 }
 
-// The absolute difference between the first two channels in |output| is
-// compared vs |tolerance|.
+// The absolute difference between the first two channels in `output` is
+// compared vs `tolerance`.
 void CompareTwoChannels(const std::vector<int16_t>& output,
                         size_t samples_per_channel,
                         size_t channels,
@@ -70,7 +70,7 @@
 }
 
 // Calculates mean-squared error between input and output (the first channel).
-// The parameter |delay| is used to correct for codec delays.
+// The parameter `delay` is used to correct for codec delays.
 double MseInputOutput(const std::vector<int16_t>& input,
                       const std::vector<int16_t>& output,
                       size_t num_samples,
@@ -152,10 +152,10 @@
   }
 
   // Encodes and decodes audio. The absolute difference between the input and
-  // output is compared vs |tolerance|, and the mean-squared error is compared
-  // with |mse|. The encoded stream should contain |expected_bytes|. For stereo
+  // output is compared vs `tolerance`, and the mean-squared error is compared
+  // with `mse`. The encoded stream should contain `expected_bytes`. For stereo
   // audio, the absolute difference between the two channels is compared vs
-  // |channel_diff_tolerance|.
+  // `channel_diff_tolerance`.
   void EncodeDecodeTest(size_t expected_bytes,
                         int tolerance,
                         double mse,
@@ -170,7 +170,7 @@
     std::vector<int16_t> input;
     std::vector<int16_t> decoded;
     while (processed_samples + frame_size_ <= data_length_) {
-      // Extend input vector with |frame_size_|.
+      // Extend input vector with `frame_size_`.
       input.resize(input.size() + frame_size_, 0);
       // Read from input file.
       ASSERT_GE(input.size() - processed_samples, frame_size_);
diff --git a/modules/audio_coding/neteq/audio_multi_vector.cc b/modules/audio_coding/neteq/audio_multi_vector.cc
index a3b5ce3..220d5a1 100644
--- a/modules/audio_coding/neteq/audio_multi_vector.cc
+++ b/modules/audio_coding/neteq/audio_multi_vector.cc
@@ -77,8 +77,8 @@
   size_t length_per_channel = append_this.size() / num_channels_;
   int16_t* temp_array = new int16_t[length_per_channel];  // Temporary storage.
   for (size_t channel = 0; channel < num_channels_; ++channel) {
-    // Copy elements to |temp_array|.
-    // Set |source_ptr| to first element of this channel.
+    // Copy elements to `temp_array`.
+    // Set `source_ptr` to first element of this channel.
     const int16_t* source_ptr = &append_this[channel];
     for (size_t i = 0; i < length_per_channel; ++i) {
       temp_array[i] = *source_ptr;
@@ -132,7 +132,7 @@
                                                   size_t length,
                                                   int16_t* destination) const {
   RTC_DCHECK(destination);
-  size_t index = 0;  // Number of elements written to |destination| so far.
+  size_t index = 0;  // Number of elements written to `destination` so far.
   RTC_DCHECK_LE(start_index, Size());
   start_index = std::min(start_index, Size());
   if (length + start_index > Size()) {
@@ -162,7 +162,7 @@
                                    size_t length,
                                    size_t position) {
   RTC_DCHECK_EQ(num_channels_, insert_this.num_channels_);
-  // Cap |length| at the length of |insert_this|.
+  // Cap `length` at the length of `insert_this`.
   RTC_DCHECK_LE(length, insert_this.Size());
   length = std::min(length, insert_this.Size());
   if (num_channels_ == insert_this.num_channels_) {
diff --git a/modules/audio_coding/neteq/audio_multi_vector.h b/modules/audio_coding/neteq/audio_multi_vector.h
index 0bb0b28..10179d7 100644
--- a/modules/audio_coding/neteq/audio_multi_vector.h
+++ b/modules/audio_coding/neteq/audio_multi_vector.h
@@ -24,12 +24,12 @@
 
 class AudioMultiVector {
  public:
-  // Creates an empty AudioMultiVector with |N| audio channels. |N| must be
+  // Creates an empty AudioMultiVector with `N` audio channels. `N` must be
   // larger than 0.
   explicit AudioMultiVector(size_t N);
 
-  // Creates an AudioMultiVector with |N| audio channels, each channel having
-  // an initial size. |N| must be larger than 0.
+  // Creates an AudioMultiVector with `N` audio channels, each channel having
+  // an initial size. `N` must be larger than 0.
   AudioMultiVector(size_t N, size_t initial_size);
 
   virtual ~AudioMultiVector();
@@ -37,47 +37,47 @@
   // Deletes all values and make the vector empty.
   virtual void Clear();
 
-  // Clears the vector and inserts |length| zeros into each channel.
+  // Clears the vector and inserts `length` zeros into each channel.
   virtual void Zeros(size_t length);
 
-  // Copies all values from this vector to |copy_to|. Any contents in |copy_to|
-  // are deleted. After the operation is done, |copy_to| will be an exact
+  // Copies all values from this vector to `copy_to`. Any contents in `copy_to`
+  // are deleted. After the operation is done, `copy_to` will be an exact
   // replica of this object. The source and the destination must have the same
   // number of channels.
   virtual void CopyTo(AudioMultiVector* copy_to) const;
 
-  // Appends the contents of |append_this| to the end of this object. The array
+  // Appends the contents of `append_this` to the end of this object. The array
   // is assumed to be channel-interleaved. The length must be an even multiple
   // of this object's number of channels. The length of this object is increased
   // with the length of the array divided by the number of channels.
   void PushBackInterleaved(rtc::ArrayView<const int16_t> append_this);
 
-  // Appends the contents of AudioMultiVector |append_this| to this object. The
-  // length of this object is increased with the length of |append_this|.
+  // Appends the contents of AudioMultiVector `append_this` to this object. The
+  // length of this object is increased with the length of `append_this`.
   virtual void PushBack(const AudioMultiVector& append_this);
 
-  // Appends the contents of AudioMultiVector |append_this| to this object,
-  // taken from |index| up until the end of |append_this|. The length of this
+  // Appends the contents of AudioMultiVector `append_this` to this object,
+  // taken from `index` up until the end of `append_this`. The length of this
   // object is increased.
   virtual void PushBackFromIndex(const AudioMultiVector& append_this,
                                  size_t index);
 
-  // Removes |length| elements from the beginning of this object, from each
+  // Removes `length` elements from the beginning of this object, from each
   // channel.
   virtual void PopFront(size_t length);
 
-  // Removes |length| elements from the end of this object, from each
+  // Removes `length` elements from the end of this object, from each
   // channel.
   virtual void PopBack(size_t length);
 
-  // Reads |length| samples from each channel and writes them interleaved to
-  // |destination|. The total number of elements written to |destination| is
-  // returned, i.e., |length| * number of channels. If the AudioMultiVector
-  // contains less than |length| samples per channel, this is reflected in the
+  // Reads `length` samples from each channel and writes them interleaved to
+  // `destination`. The total number of elements written to `destination` is
+  // returned, i.e., `length` * number of channels. If the AudioMultiVector
+  // contains less than `length` samples per channel, this is reflected in the
   // return value.
   virtual size_t ReadInterleaved(size_t length, int16_t* destination) const;
 
-  // Like ReadInterleaved() above, but reads from |start_index| instead of from
+  // Like ReadInterleaved() above, but reads from `start_index` instead of from
   // the beginning.
   virtual size_t ReadInterleavedFromIndex(size_t start_index,
                                           size_t length,
@@ -89,18 +89,18 @@
                                         int16_t* destination) const;
 
   // Overwrites each channel in this AudioMultiVector with values taken from
-  // |insert_this|. The values are taken from the beginning of |insert_this| and
-  // are inserted starting at |position|. |length| values are written into each
-  // channel. If |length| and |position| are selected such that the new data
+  // `insert_this`. The values are taken from the beginning of `insert_this` and
+  // are inserted starting at `position`. `length` values are written into each
+  // channel. If `length` and `position` are selected such that the new data
   // extends beyond the end of the current AudioVector, the vector is extended
-  // to accommodate the new data. |length| is limited to the length of
-  // |insert_this|.
+  // to accommodate the new data. `length` is limited to the length of
+  // `insert_this`.
   virtual void OverwriteAt(const AudioMultiVector& insert_this,
                            size_t length,
                            size_t position);
 
-  // Appends |append_this| to the end of the current vector. Lets the two
-  // vectors overlap by |fade_length| samples (per channel), and cross-fade
+  // Appends `append_this` to the end of the current vector. Lets the two
+  // vectors overlap by `fade_length` samples (per channel), and cross-fade
   // linearly in this region.
   virtual void CrossFade(const AudioMultiVector& append_this,
                          size_t fade_length);
@@ -111,14 +111,14 @@
   // Returns the number of elements per channel in this AudioMultiVector.
   virtual size_t Size() const;
 
-  // Verify that each channel can hold at least |required_size| elements. If
+  // Verify that each channel can hold at least `required_size` elements. If
   // not, extend accordingly.
   virtual void AssertSize(size_t required_size);
 
   virtual bool Empty() const;
 
   // Copies the data between two channels in the AudioMultiVector. The method
-  // does not add any new channel. Thus, |from_channel| and |to_channel| must
+  // does not add any new channel. Thus, `from_channel` and `to_channel` must
   // both be valid channel numbers.
   virtual void CopyChannel(size_t from_channel, size_t to_channel);
 
diff --git a/modules/audio_coding/neteq/audio_multi_vector_unittest.cc b/modules/audio_coding/neteq/audio_multi_vector_unittest.cc
index d1351d8..329377a 100644
--- a/modules/audio_coding/neteq/audio_multi_vector_unittest.cc
+++ b/modules/audio_coding/neteq/audio_multi_vector_unittest.cc
@@ -94,7 +94,7 @@
   AudioMultiVector vec(num_channels_);
   vec.PushBackInterleaved(array_interleaved_);
   AudioMultiVector vec_copy(num_channels_);
-  vec.CopyTo(&vec_copy);  // Copy from |vec| to |vec_copy|.
+  vec.CopyTo(&vec_copy);  // Copy from `vec` to `vec_copy`.
   ASSERT_EQ(num_channels_, vec.Channels());
   ASSERT_EQ(array_length(), vec.Size());
   ASSERT_EQ(num_channels_, vec_copy.Channels());
@@ -106,7 +106,7 @@
     }
   }
 
-  // Clear |vec| and verify that it is empty.
+  // Clear `vec` and verify that it is empty.
   vec.Clear();
   EXPECT_TRUE(vec.Empty());
 
@@ -208,7 +208,7 @@
   vec.PushBackInterleaved(array_interleaved_);
   vec.PopFront(1);  // Remove one element from each channel.
   ASSERT_EQ(array_length() - 1u, vec.Size());
-  // Let |ptr| point to the second element of the first channel in the
+  // Let `ptr` point to the second element of the first channel in the
   // interleaved array.
   int16_t* ptr = &array_interleaved_[num_channels_];
   for (size_t i = 0; i < array_length() - 1; ++i) {
@@ -227,7 +227,7 @@
   vec.PushBackInterleaved(array_interleaved_);
   vec.PopBack(1);  // Remove one element from each channel.
   ASSERT_EQ(array_length() - 1u, vec.Size());
-  // Let |ptr| point to the first element of the first channel in the
+  // Let `ptr` point to the first element of the first channel in the
   // interleaved array.
   int16_t* ptr = array_interleaved_.data();
   for (size_t i = 0; i < array_length() - 1; ++i) {
diff --git a/modules/audio_coding/neteq/audio_vector.cc b/modules/audio_coding/neteq/audio_vector.cc
index ce27a88..10e8936 100644
--- a/modules/audio_coding/neteq/audio_vector.cc
+++ b/modules/audio_coding/neteq/audio_vector.cc
@@ -245,14 +245,14 @@
 
 void AudioVector::CrossFade(const AudioVector& append_this,
                             size_t fade_length) {
-  // Fade length cannot be longer than the current vector or |append_this|.
+  // Fade length cannot be longer than the current vector or `append_this`.
   RTC_DCHECK_LE(fade_length, Size());
   RTC_DCHECK_LE(fade_length, append_this.Size());
   fade_length = std::min(fade_length, Size());
   fade_length = std::min(fade_length, append_this.Size());
   size_t position = Size() - fade_length + begin_index_;
   // Cross fade the overlapping regions.
-  // |alpha| is the mixing factor in Q14.
+  // `alpha` is the mixing factor in Q14.
   // TODO(hlundin): Consider skipping +1 in the denominator to produce a
   // smoother cross-fade, in particular at the end of the fade.
   int alpha_step = 16384 / (static_cast<int>(fade_length) + 1);
@@ -265,7 +265,7 @@
         14;
   }
   RTC_DCHECK_GE(alpha, 0);  // Verify that the slope was correct.
-  // Append what is left of |append_this|.
+  // Append what is left of `append_this`.
   size_t samples_to_push_back = append_this.Size() - fade_length;
   if (samples_to_push_back > 0)
     PushBack(append_this, samples_to_push_back, fade_length);
@@ -286,8 +286,8 @@
     return;
   const size_t length = Size();
   // Reserve one more sample to remove the ambiguity between empty vector and
-  // full vector. Therefore |begin_index_| == |end_index_| indicates empty
-  // vector, and |begin_index_| == (|end_index_| + 1) % capacity indicates
+  // full vector. Therefore `begin_index_` == `end_index_` indicates empty
+  // vector, and `begin_index_` == (`end_index_` + 1) % capacity indicates
   // full vector.
   std::unique_ptr<int16_t[]> temp_array(new int16_t[n + 1]);
   CopyTo(length, 0, temp_array.get());
diff --git a/modules/audio_coding/neteq/audio_vector.h b/modules/audio_coding/neteq/audio_vector.h
index a257586..c722b56 100644
--- a/modules/audio_coding/neteq/audio_vector.h
+++ b/modules/audio_coding/neteq/audio_vector.h
@@ -34,27 +34,27 @@
   // Deletes all values and make the vector empty.
   virtual void Clear();
 
-  // Copies all values from this vector to |copy_to|. Any contents in |copy_to|
+  // Copies all values from this vector to `copy_to`. Any contents in `copy_to`
   // are deleted before the copy operation. After the operation is done,
-  // |copy_to| will be an exact replica of this object.
+  // `copy_to` will be an exact replica of this object.
   virtual void CopyTo(AudioVector* copy_to) const;
 
-  // Copies |length| values from |position| in this vector to |copy_to|.
+  // Copies `length` values from `position` in this vector to `copy_to`.
   virtual void CopyTo(size_t length, size_t position, int16_t* copy_to) const;
 
-  // Prepends the contents of AudioVector |prepend_this| to this object. The
-  // length of this object is increased with the length of |prepend_this|.
+  // Prepends the contents of AudioVector `prepend_this` to this object. The
+  // length of this object is increased with the length of `prepend_this`.
   virtual void PushFront(const AudioVector& prepend_this);
 
-  // Same as above, but with an array |prepend_this| with |length| elements as
+  // Same as above, but with an array `prepend_this` with `length` elements as
   // source.
   virtual void PushFront(const int16_t* prepend_this, size_t length);
 
   // Same as PushFront but will append to the end of this object.
   virtual void PushBack(const AudioVector& append_this);
 
-  // Appends a segment of |append_this| to the end of this object. The segment
-  // starts from |position| and has |length| samples.
+  // Appends a segment of `append_this` to the end of this object. The segment
+  // starts from `position` and has `length` samples.
   virtual void PushBack(const AudioVector& append_this,
                         size_t length,
                         size_t position);
@@ -62,47 +62,47 @@
   // Same as PushFront but will append to the end of this object.
   virtual void PushBack(const int16_t* append_this, size_t length);
 
-  // Removes |length| elements from the beginning of this object.
+  // Removes `length` elements from the beginning of this object.
   virtual void PopFront(size_t length);
 
-  // Removes |length| elements from the end of this object.
+  // Removes `length` elements from the end of this object.
   virtual void PopBack(size_t length);
 
-  // Extends this object with |extra_length| elements at the end. The new
+  // Extends this object with `extra_length` elements at the end. The new
   // elements are initialized to zero.
   virtual void Extend(size_t extra_length);
 
-  // Inserts |length| elements taken from the array |insert_this| and insert
-  // them at |position|. The length of the AudioVector is increased by |length|.
-  // |position| = 0 means that the new values are prepended to the vector.
-  // |position| = Size() means that the new values are appended to the vector.
+  // Inserts `length` elements taken from the array `insert_this` and insert
+  // them at `position`. The length of the AudioVector is increased by `length`.
+  // `position` = 0 means that the new values are prepended to the vector.
+  // `position` = Size() means that the new values are appended to the vector.
   virtual void InsertAt(const int16_t* insert_this,
                         size_t length,
                         size_t position);
 
-  // Like InsertAt, but inserts |length| zero elements at |position|.
+  // Like InsertAt, but inserts `length` zero elements at `position`.
   virtual void InsertZerosAt(size_t length, size_t position);
 
-  // Overwrites |length| elements of this AudioVector starting from |position|
-  // with first values in |AudioVector|. The definition of |position|
-  // is the same as for InsertAt(). If |length| and |position| are selected
+  // Overwrites `length` elements of this AudioVector starting from `position`
+  // with first values in `AudioVector`. The definition of `position`
+  // is the same as for InsertAt(). If `length` and `position` are selected
   // such that the new data extends beyond the end of the current AudioVector,
   // the vector is extended to accommodate the new data.
   virtual void OverwriteAt(const AudioVector& insert_this,
                            size_t length,
                            size_t position);
 
-  // Overwrites |length| elements of this AudioVector with values taken from the
-  // array |insert_this|, starting at |position|. The definition of |position|
-  // is the same as for InsertAt(). If |length| and |position| are selected
+  // Overwrites `length` elements of this AudioVector with values taken from the
+  // array `insert_this`, starting at `position`. The definition of `position`
+  // is the same as for InsertAt(). If `length` and `position` are selected
   // such that the new data extends beyond the end of the current AudioVector,
   // the vector is extended to accommodate the new data.
   virtual void OverwriteAt(const int16_t* insert_this,
                            size_t length,
                            size_t position);
 
-  // Appends |append_this| to the end of the current vector. Lets the two
-  // vectors overlap by |fade_length| samples, and cross-fade linearly in this
+  // Appends `append_this` to the end of the current vector. Lets the two
+  // vectors overlap by `fade_length` samples, and cross-fade linearly in this
   // region.
   virtual void CrossFade(const AudioVector& append_this, size_t fade_length);
 
@@ -158,11 +158,11 @@
 
   size_t capacity_;  // Allocated number of samples in the array.
 
-  // The index of the first sample in |array_|, except when
+  // The index of the first sample in `array_`, except when
   // |begin_index_ == end_index_|, which indicates an empty buffer.
   size_t begin_index_;
 
-  // The index of the sample after the last sample in |array_|.
+  // The index of the sample after the last sample in `array_`.
   size_t end_index_;
 
   RTC_DISALLOW_COPY_AND_ASSIGN(AudioVector);
diff --git a/modules/audio_coding/neteq/audio_vector_unittest.cc b/modules/audio_coding/neteq/audio_vector_unittest.cc
index e39774c..ae9dd88 100644
--- a/modules/audio_coding/neteq/audio_vector_unittest.cc
+++ b/modules/audio_coding/neteq/audio_vector_unittest.cc
@@ -62,7 +62,7 @@
   AudioVector vec;
   AudioVector vec_copy;
   vec.PushBack(array_, array_length());
-  vec.CopyTo(&vec_copy);  // Copy from |vec| to |vec_copy|.
+  vec.CopyTo(&vec_copy);  // Copy from `vec` to `vec_copy`.
   ASSERT_EQ(array_length(), vec.Size());
   ASSERT_EQ(array_length(), vec_copy.Size());
   for (size_t i = 0; i < array_length(); ++i) {
@@ -70,7 +70,7 @@
     EXPECT_EQ(array_[i], vec_copy[i]);
   }
 
-  // Clear |vec| and verify that it is empty.
+  // Clear `vec` and verify that it is empty.
   vec.Clear();
   EXPECT_TRUE(vec.Empty());
 
@@ -178,8 +178,8 @@
   int insert_position = 5;
   vec.InsertAt(new_array, kNewLength, insert_position);
   // Verify that the vector looks as follows:
-  // {0, 1, ..., |insert_position| - 1, 100, 101, ..., 100 + kNewLength - 1,
-  //  |insert_position|, |insert_position| + 1, ..., kLength - 1}.
+  // {0, 1, ..., `insert_position` - 1, 100, 101, ..., 100 + kNewLength - 1,
+  //  `insert_position`, `insert_position` + 1, ..., kLength - 1}.
   size_t pos = 0;
   for (int i = 0; i < insert_position; ++i) {
     EXPECT_EQ(array_[i], vec[pos]);
@@ -309,8 +309,8 @@
   size_t insert_position = 2;
   vec.OverwriteAt(new_array, kNewLength, insert_position);
   // Verify that the vector looks as follows:
-  // {0, ..., |insert_position| - 1, 100, 101, ..., 100 + kNewLength - 1,
-  //  |insert_position|, |insert_position| + 1, ..., kLength - 1}.
+  // {0, ..., `insert_position` - 1, 100, 101, ..., 100 + kNewLength - 1,
+  //  `insert_position`, `insert_position` + 1, ..., kLength - 1}.
   size_t pos = 0;
   for (pos = 0; pos < insert_position; ++pos) {
     EXPECT_EQ(array_[pos], vec[pos]);
@@ -340,8 +340,8 @@
   vec.OverwriteAt(new_array, kNewLength, insert_position);
   ASSERT_EQ(array_length() - 2u + kNewLength, vec.Size());
   // Verify that the vector looks as follows:
-  // {0, ..., |insert_position| - 1, 100, 101, ..., 100 + kNewLength - 1,
-  //  |insert_position|, |insert_position| + 1, ..., kLength - 1}.
+  // {0, ..., `insert_position` - 1, 100, 101, ..., 100 + kNewLength - 1,
+  //  `insert_position`, `insert_position` + 1, ..., kLength - 1}.
   int pos = 0;
   for (pos = 0; pos < insert_position; ++pos) {
     EXPECT_EQ(array_[pos], vec[pos]);
@@ -350,7 +350,7 @@
     EXPECT_EQ(new_array[i], vec[pos]);
     ++pos;
   }
-  // Verify that we checked to the end of |vec|.
+  // Verify that we checked to the end of `vec`.
   EXPECT_EQ(vec.Size(), static_cast<size_t>(pos));
 }
 
@@ -359,7 +359,7 @@
   static const size_t kFadeLength = 10;
   AudioVector vec1(kLength);
   AudioVector vec2(kLength);
-  // Set all vector elements to 0 in |vec1| and 100 in |vec2|.
+  // Set all vector elements to 0 in `vec1` and 100 in `vec2`.
   for (size_t i = 0; i < kLength; ++i) {
     vec1[i] = 0;
     vec2[i] = 100;
diff --git a/modules/audio_coding/neteq/background_noise.cc b/modules/audio_coding/neteq/background_noise.cc
index 8f61598..2c95d3b 100644
--- a/modules/audio_coding/neteq/background_noise.cc
+++ b/modules/audio_coding/neteq/background_noise.cc
@@ -108,8 +108,8 @@
       if ((sample_energy > 0) &&
           (int64_t{5} * residual_energy >= int64_t{16} * sample_energy)) {
         // Spectrum is flat enough; save filter parameters.
-        // |temp_signal| + |kVecLen| - |kMaxLpcOrder| points at the first of the
-        // |kMaxLpcOrder| samples in the residual signal, which will form the
+        // `temp_signal` + `kVecLen` - `kMaxLpcOrder` points at the first of the
+        // `kMaxLpcOrder` samples in the residual signal, which will form the
         // filter state for the next noise generation.
         SaveParameters(channel_ix, lpc_coefficients,
                        temp_signal + kVecLen - kMaxLpcOrder, sample_energy,
@@ -117,7 +117,7 @@
         filter_params_saved = true;
       }
     } else {
-      // Will only happen if post-decode VAD is disabled and |sample_energy| is
+      // Will only happen if post-decode VAD is disabled and `sample_energy` is
       // not low enough. Increase the threshold for update so that it increases
       // by a factor 4 in 4 seconds.
       IncrementEnergyThreshold(channel_ix, sample_energy);
@@ -264,8 +264,8 @@
     parameters.max_energy = sample_energy;
   }
 
-  // Set |energy_update_threshold| to no less than 60 dB lower than
-  // |max_energy_|. Adding 524288 assures proper rounding.
+  // Set `energy_update_threshold` to no less than 60 dB lower than
+  // `max_energy_`. Adding 524288 assures proper rounding.
   int32_t energy_update_threshold = (parameters.max_energy + 524288) >> 20;
   if (energy_update_threshold > parameters.energy_update_threshold) {
     parameters.energy_update_threshold = energy_update_threshold;
@@ -297,9 +297,9 @@
 
   // Calculate scale and shift factor.
   parameters.scale = static_cast<int16_t>(WebRtcSpl_SqrtFloor(residual_energy));
-  // Add 13 to the |scale_shift_|, since the random numbers table is in
+  // Add 13 to the `scale_shift_`, since the random numbers table is in
   // Q13.
-  // TODO(hlundin): Move the "13" to where the |scale_shift_| is used?
+  // TODO(hlundin): Move the "13" to where the `scale_shift_` is used?
   parameters.scale_shift =
       static_cast<int16_t>(13 + ((kLogResidualLength + norm_shift) / 2));
 
diff --git a/modules/audio_coding/neteq/background_noise.h b/modules/audio_coding/neteq/background_noise.h
index 631db0d..005b376 100644
--- a/modules/audio_coding/neteq/background_noise.h
+++ b/modules/audio_coding/neteq/background_noise.h
@@ -37,12 +37,12 @@
   void Reset();
 
   // Updates the parameter estimates based on the signal currently in the
-  // |sync_buffer|, and on the latest decision in |vad| if it is running.
+  // `sync_buffer`, and on the latest decision in `vad` if it is running.
   // Returns true if the filter parameters are updated.
   bool Update(const AudioMultiVector& sync_buffer, const PostDecodeVad& vad);
 
   // Generates background noise given a random vector and writes the output to
-  // |buffer|.
+  // `buffer`.
   void GenerateBackgroundNoise(rtc::ArrayView<const int16_t> random_vector,
                                size_t channel,
                                int mute_slope,
@@ -50,29 +50,29 @@
                                size_t num_noise_samples,
                                int16_t* buffer);
 
-  // Returns |energy_| for |channel|.
+  // Returns `energy_` for `channel`.
   int32_t Energy(size_t channel) const;
 
-  // Sets the value of |mute_factor_| for |channel| to |value|.
+  // Sets the value of `mute_factor_` for `channel` to `value`.
   void SetMuteFactor(size_t channel, int16_t value);
 
-  // Returns |mute_factor_| for |channel|.
+  // Returns `mute_factor_` for `channel`.
   int16_t MuteFactor(size_t channel) const;
 
-  // Returns a pointer to |filter_| for |channel|.
+  // Returns a pointer to `filter_` for `channel`.
   const int16_t* Filter(size_t channel) const;
 
-  // Returns a pointer to |filter_state_| for |channel|.
+  // Returns a pointer to `filter_state_` for `channel`.
   const int16_t* FilterState(size_t channel) const;
 
-  // Copies |input| to the filter state. Will not copy more than |kMaxLpcOrder|
+  // Copies `input` to the filter state. Will not copy more than `kMaxLpcOrder`
   // elements.
   void SetFilterState(size_t channel, rtc::ArrayView<const int16_t> input);
 
-  // Returns |scale_| for |channel|.
+  // Returns `scale_` for `channel`.
   int16_t Scale(size_t channel) const;
 
-  // Returns |scale_shift_| for |channel|.
+  // Returns `scale_shift_` for `channel`.
   int16_t ScaleShift(size_t channel) const;
 
   // Accessors.
@@ -117,7 +117,7 @@
                                    size_t length,
                                    int32_t* auto_correlation) const;
 
-  // Increments the energy threshold by a factor 1 + |kThresholdIncrement|.
+  // Increments the energy threshold by a factor 1 + `kThresholdIncrement`.
   void IncrementEnergyThreshold(size_t channel, int32_t sample_energy);
 
   // Updates the filter parameters.
diff --git a/modules/audio_coding/neteq/buffer_level_filter.cc b/modules/audio_coding/neteq/buffer_level_filter.cc
index 8901c01..0ccc7bb 100644
--- a/modules/audio_coding/neteq/buffer_level_filter.cc
+++ b/modules/audio_coding/neteq/buffer_level_filter.cc
@@ -30,10 +30,10 @@
 void BufferLevelFilter::Update(size_t buffer_size_samples,
                                int time_stretched_samples) {
   // Filter:
-  // |filtered_current_level_| = |level_factor_| * |filtered_current_level_| +
-  //                            (1 - |level_factor_|) * |buffer_size_samples|
-  // |level_factor_| and |filtered_current_level_| are in Q8.
-  // |buffer_size_samples| is in Q0.
+  // `filtered_current_level_` = `level_factor_` * `filtered_current_level_` +
+  //                            (1 - `level_factor_`) * `buffer_size_samples`
+  // `level_factor_` and `filtered_current_level_` are in Q8.
+  // `buffer_size_samples` is in Q0.
   const int64_t filtered_current_level =
       (level_factor_ * int64_t{filtered_current_level_} >> 8) +
       (256 - level_factor_) * rtc::dchecked_cast<int64_t>(buffer_size_samples);
diff --git a/modules/audio_coding/neteq/buffer_level_filter.h b/modules/audio_coding/neteq/buffer_level_filter.h
index 218a142..94a3715 100644
--- a/modules/audio_coding/neteq/buffer_level_filter.h
+++ b/modules/audio_coding/neteq/buffer_level_filter.h
@@ -24,8 +24,8 @@
   virtual ~BufferLevelFilter() {}
   virtual void Reset();
 
-  // Updates the filter. Current buffer size is |buffer_size_samples|.
-  // |time_stretched_samples| is subtracted from the filtered value (thus
+  // Updates the filter. Current buffer size is `buffer_size_samples`.
+  // `time_stretched_samples` is subtracted from the filtered value (thus
   // bypassing the filter operation).
   virtual void Update(size_t buffer_size_samples, int time_stretched_samples);
 
diff --git a/modules/audio_coding/neteq/buffer_level_filter_unittest.cc b/modules/audio_coding/neteq/buffer_level_filter_unittest.cc
index 63fc83b..6773e96 100644
--- a/modules/audio_coding/neteq/buffer_level_filter_unittest.cc
+++ b/modules/audio_coding/neteq/buffer_level_filter_unittest.cc
@@ -38,7 +38,7 @@
         filter.Update(value, 0 /* time_stretched_samples */);
       }
       // Expect the filtered value to be (theoretically)
-      // (1 - (251/256) ^ |times|) * |value|.
+      // (1 - (251/256) ^ `times`) * `value`.
       double expected_value_double = (1 - pow(251.0 / 256.0, times)) * value;
       int expected_value = static_cast<int>(expected_value_double);
 
@@ -62,7 +62,7 @@
     filter.Update(kValue, 0 /* time_stretched_samples */);
   }
   // Expect the filtered value to be
-  // (1 - (252/256) ^ |kTimes|) * |kValue|.
+  // (1 - (252/256) ^ `kTimes`) * `kValue`.
   int expected_value = 15;
   EXPECT_EQ(expected_value, filter.filtered_current_level());
 
@@ -72,7 +72,7 @@
     filter.Update(kValue, 0 /* time_stretched_samples */);
   }
   // Expect the filtered value to be
-  // (1 - (253/256) ^ |kTimes|) * |kValue|.
+  // (1 - (253/256) ^ `kTimes`) * `kValue`.
   expected_value = 11;
   EXPECT_EQ(expected_value, filter.filtered_current_level());
 
@@ -82,7 +82,7 @@
     filter.Update(kValue, 0 /* time_stretched_samples */);
   }
   // Expect the filtered value to be
-  // (1 - (254/256) ^ |kTimes|) * |kValue|.
+  // (1 - (254/256) ^ `kTimes`) * `kValue`.
   expected_value = 8;
   EXPECT_EQ(expected_value, filter.filtered_current_level());
 }
@@ -98,13 +98,13 @@
     filter.Update(kValue, 0);
   }
   // Expect the filtered value to be
-  // (1 - (251/256) ^ |kTimes|) * |kValue|.
+  // (1 - (251/256) ^ `kTimes`) * `kValue`.
   const int kExpectedValue = 18;
   EXPECT_EQ(kExpectedValue, filter.filtered_current_level());
 
   // Update filter again, now with non-zero value for packet length.
   // Set the current filtered value to be the input, in order to isolate the
-  // impact of |kTimeStretchedSamples|.
+  // impact of `kTimeStretchedSamples`.
   filter.Update(filter.filtered_current_level(), kTimeStretchedSamples);
   EXPECT_EQ(kExpectedValue - kTimeStretchedSamples,
             filter.filtered_current_level());
diff --git a/modules/audio_coding/neteq/comfort_noise.cc b/modules/audio_coding/neteq/comfort_noise.cc
index 7169f06..a2ce888 100644
--- a/modules/audio_coding/neteq/comfort_noise.cc
+++ b/modules/audio_coding/neteq/comfort_noise.cc
@@ -119,8 +119,8 @@
       muting_window += muting_window_increment;
       unmuting_window += unmuting_window_increment;
     }
-    // Remove |overlap_length_| samples from the front of |output| since they
-    // were mixed into |sync_buffer_| above.
+    // Remove `overlap_length_` samples from the front of `output` since they
+    // were mixed into `sync_buffer_` above.
     output->PopFront(overlap_length_);
   }
   first_call_ = false;
diff --git a/modules/audio_coding/neteq/comfort_noise.h b/modules/audio_coding/neteq/comfort_noise.h
index f748772..6419d39 100644
--- a/modules/audio_coding/neteq/comfort_noise.h
+++ b/modules/audio_coding/neteq/comfort_noise.h
@@ -45,11 +45,11 @@
   // Resets the state. Should be called before each new comfort noise period.
   void Reset();
 
-  // Update the comfort noise generator with the parameters in |packet|.
+  // Update the comfort noise generator with the parameters in `packet`.
   int UpdateParameters(const Packet& packet);
 
-  // Generates |requested_length| samples of comfort noise and writes to
-  // |output|. If this is the first in call after Reset (or first after creating
+  // Generates `requested_length` samples of comfort noise and writes to
+  // `output`. If this is the first in call after Reset (or first after creating
   // the object), it will also mix in comfort noise at the end of the
   // SyncBuffer object provided in the constructor.
   int Generate(size_t requested_length, AudioMultiVector* output);
diff --git a/modules/audio_coding/neteq/comfort_noise_unittest.cc b/modules/audio_coding/neteq/comfort_noise_unittest.cc
index b3fbb4e..b436800 100644
--- a/modules/audio_coding/neteq/comfort_noise_unittest.cc
+++ b/modules/audio_coding/neteq/comfort_noise_unittest.cc
@@ -23,7 +23,7 @@
   MockDecoderDatabase db;
   SyncBuffer sync_buffer(1, 1000);
   ComfortNoise cn(fs, &db, &sync_buffer);
-  EXPECT_CALL(db, Die());  // Called when |db| goes out of scope.
+  EXPECT_CALL(db, Die());  // Called when `db` goes out of scope.
 }
 
 // TODO(hlundin): Write more tests.
diff --git a/modules/audio_coding/neteq/cross_correlation.h b/modules/audio_coding/neteq/cross_correlation.h
index 9ce8be8..5082ce6 100644
--- a/modules/audio_coding/neteq/cross_correlation.h
+++ b/modules/audio_coding/neteq/cross_correlation.h
@@ -17,19 +17,19 @@
 namespace webrtc {
 
 // The function calculates the cross-correlation between two sequences
-// |sequence_1| and |sequence_2|. |sequence_1| is taken as reference, with
-// |sequence_1_length| as its length. |sequence_2| slides for the calculation of
-// cross-correlation. The result will be saved in |cross_correlation|.
-// |cross_correlation_length| correlation points are calculated.
+// `sequence_1` and `sequence_2`. `sequence_1` is taken as reference, with
+// `sequence_1_length` as its length. `sequence_2` slides for the calculation of
+// cross-correlation. The result will be saved in `cross_correlation`.
+// `cross_correlation_length` correlation points are calculated.
 // The corresponding lag starts from 0, and increases with a step of
-// |cross_correlation_step|. The result is without normalization. To avoid
+// `cross_correlation_step`. The result is without normalization. To avoid
 // overflow, the result will be right shifted. The amount of shifts will be
 // returned.
 //
 // Input:
 //     - sequence_1     : First sequence (reference).
 //     - sequence_2     : Second sequence (sliding during calculation).
-//     - sequence_1_length : Length of |sequence_1|.
+//     - sequence_1_length : Length of `sequence_1`.
 //     - cross_correlation_length : Number of cross-correlations to calculate.
 //     - cross_correlation_step : Step in the lag for the cross-correlation.
 //
diff --git a/modules/audio_coding/neteq/decision_logic.cc b/modules/audio_coding/neteq/decision_logic.cc
index edefbe6..ceefe50 100644
--- a/modules/audio_coding/neteq/decision_logic.cc
+++ b/modules/audio_coding/neteq/decision_logic.cc
@@ -309,8 +309,8 @@
         std::max(target_level_samples * 3 / 4,
                  target_level_samples -
                      kDecelerationTargetLevelOffsetMs * samples_per_ms);
-    // |higher_limit| is equal to |target_level|, but should at
-    // least be 20 ms higher than |lower_limit|.
+    // `higher_limit` is equal to `target_level`, but should at
+    // least be 20 ms higher than `lower_limit`.
     const int high_limit =
         std::max(target_level_samples, low_limit + 20 * samples_per_ms);
 
diff --git a/modules/audio_coding/neteq/decision_logic.h b/modules/audio_coding/neteq/decision_logic.h
index 8be4511..693f616 100644
--- a/modules/audio_coding/neteq/decision_logic.h
+++ b/modules/audio_coding/neteq/decision_logic.h
@@ -47,23 +47,23 @@
   void SetSampleRate(int fs_hz, size_t output_size_samples) override;
 
   // Given info about the latest received packet, and current jitter buffer
-  // status, returns the operation. |target_timestamp| and |expand_mutefactor|
-  // are provided for reference. |last_packet_samples| is the number of samples
+  // status, returns the operation. `target_timestamp` and `expand_mutefactor`
+  // are provided for reference. `last_packet_samples` is the number of samples
   // obtained from the last decoded frame. If there is a packet available, it
-  // should be supplied in |packet|; otherwise it should be NULL. The mode
+  // should be supplied in `packet`; otherwise it should be NULL. The mode
   // resulting from the last call to NetEqImpl::GetAudio is supplied in
-  // |last_mode|. If there is a DTMF event to play, |play_dtmf| should be set to
-  // true. The output variable |reset_decoder| will be set to true if a reset is
+  // `last_mode`. If there is a DTMF event to play, `play_dtmf` should be set to
+  // true. The output variable `reset_decoder` will be set to true if a reset is
   // required; otherwise it is left unchanged (i.e., it can remain true if it
   // was true before the call).
   NetEq::Operation GetDecision(const NetEqController::NetEqStatus& status,
                                bool* reset_decoder) override;
 
-  // These methods test the |cng_state_| for different conditions.
+  // These methods test the `cng_state_` for different conditions.
   bool CngRfc3389On() const override { return cng_state_ == kCngRfc3389On; }
   bool CngOff() const override { return cng_state_ == kCngOff; }
 
-  // Resets the |cng_state_| to kCngOff.
+  // Resets the `cng_state_` to kCngOff.
   void SetCngOff() override { cng_state_ = kCngOff; }
 
   // Reports back to DecisionLogic whether the decision to do expand remains or
@@ -72,7 +72,7 @@
   // sync buffer.
   void ExpandDecision(NetEq::Operation operation) override;
 
-  // Adds |value| to |sample_memory_|.
+  // Adds `value` to `sample_memory_`.
   void AddSampleMemory(int32_t value) override { sample_memory_ += value; }
 
   int TargetLevelMs() const override { return delay_manager_->TargetDelayMs(); }
@@ -120,8 +120,8 @@
 
   enum CngState { kCngOff, kCngRfc3389On, kCngInternalOn };
 
-  // Updates the |buffer_level_filter_| with the current buffer level
-  // |buffer_size_samples|.
+  // Updates the `buffer_level_filter_` with the current buffer level
+  // `buffer_size_samples`.
   void FilterBufferLevel(size_t buffer_size_samples);
 
   // Returns the operation given that the next available packet is a comfort
@@ -132,7 +132,7 @@
                                         size_t generated_noise_samples);
 
   // Returns the operation given that no packets are available (except maybe
-  // a DTMF event, flagged by setting |play_dtmf| true).
+  // a DTMF event, flagged by setting `play_dtmf` true).
   virtual NetEq::Operation NoPacket(bool play_dtmf);
 
   // Returns the operation to do given that the expected packet is available.
@@ -160,13 +160,13 @@
   // Checks if the current (filtered) buffer level is under the target level.
   bool UnderTargetLevel() const;
 
-  // Checks if |timestamp_leap| is so long into the future that a reset due
+  // Checks if `timestamp_leap` is so long into the future that a reset due
   // to exceeding kReinitAfterExpands will be done.
   bool ReinitAfterExpands(uint32_t timestamp_leap) const;
 
   // Checks if we still have not done enough expands to cover the distance from
   // the last decoded packet to the next available packet, the distance beeing
-  // conveyed in |timestamp_leap|.
+  // conveyed in `timestamp_leap`.
   bool PacketTooEarly(uint32_t timestamp_leap) const;
 
   // Checks if num_consecutive_expands_ >= kMaxWaitForPacket.
diff --git a/modules/audio_coding/neteq/decoder_database.cc b/modules/audio_coding/neteq/decoder_database.cc
index e755e7b..e9176f4 100644
--- a/modules/audio_coding/neteq/decoder_database.cc
+++ b/modules/audio_coding/neteq/decoder_database.cc
@@ -161,7 +161,7 @@
       rtp_payload_type,
       DecoderInfo(audio_format, codec_pair_id_, decoder_factory_.get())));
   if (ret.second == false) {
-    // Database already contains a decoder with type |rtp_payload_type|.
+    // Database already contains a decoder with type `rtp_payload_type`.
     return kDecoderExists;
   }
   return kOK;
@@ -169,7 +169,7 @@
 
 int DecoderDatabase::Remove(uint8_t rtp_payload_type) {
   if (decoders_.erase(rtp_payload_type) == 0) {
-    // No decoder with that |rtp_payload_type|.
+    // No decoder with that `rtp_payload_type`.
     return kDecoderNotFound;
   }
   if (active_decoder_type_ == rtp_payload_type) {
@@ -199,7 +199,7 @@
 
 int DecoderDatabase::SetActiveDecoder(uint8_t rtp_payload_type,
                                       bool* new_decoder) {
-  // Check that |rtp_payload_type| exists in the database.
+  // Check that `rtp_payload_type` exists in the database.
   const DecoderInfo* info = GetDecoderInfo(rtp_payload_type);
   if (!info) {
     // Decoder not found.
@@ -231,7 +231,7 @@
 }
 
 int DecoderDatabase::SetActiveCngDecoder(uint8_t rtp_payload_type) {
-  // Check that |rtp_payload_type| exists in the database.
+  // Check that `rtp_payload_type` exists in the database.
   const DecoderInfo* info = GetDecoderInfo(rtp_payload_type);
   if (!info) {
     // Decoder not found.
diff --git a/modules/audio_coding/neteq/decoder_database.h b/modules/audio_coding/neteq/decoder_database.h
index e0a3fe3..a63a9cf 100644
--- a/modules/audio_coding/neteq/decoder_database.h
+++ b/modules/audio_coding/neteq/decoder_database.h
@@ -80,15 +80,15 @@
     // Returns true if the decoder's format is RED.
     bool IsRed() const { return subtype_ == Subtype::kRed; }
 
-    // Returns true if the decoder's format is named |name|.
+    // Returns true if the decoder's format is named `name`.
     bool IsType(const char* name) const;
-    // Returns true if the decoder's format is named |name|.
+    // Returns true if the decoder's format is named `name`.
     bool IsType(const std::string& name) const;
 
     const std::string& get_name() const { return name_; }
 
    private:
-    // TODO(ossu): |name_| is kept here while we retain the old external
+    // TODO(ossu): `name_` is kept here while we retain the old external
     //             decoder interface. Remove this once using an
     //             AudioDecoderFactory has supplanted the old functionality.
     const std::string name_;
@@ -143,26 +143,26 @@
   virtual int RegisterPayload(int rtp_payload_type,
                               const SdpAudioFormat& audio_format);
 
-  // Removes the entry for |rtp_payload_type| from the database.
+  // Removes the entry for `rtp_payload_type` from the database.
   // Returns kDecoderNotFound or kOK depending on the outcome of the operation.
   virtual int Remove(uint8_t rtp_payload_type);
 
   // Remove all entries.
   virtual void RemoveAll();
 
-  // Returns a pointer to the DecoderInfo struct for |rtp_payload_type|. If
-  // no decoder is registered with that |rtp_payload_type|, NULL is returned.
+  // Returns a pointer to the DecoderInfo struct for `rtp_payload_type`. If
+  // no decoder is registered with that `rtp_payload_type`, NULL is returned.
   virtual const DecoderInfo* GetDecoderInfo(uint8_t rtp_payload_type) const;
 
-  // Sets the active decoder to be |rtp_payload_type|. If this call results in a
-  // change of active decoder, |new_decoder| is set to true. The previous active
+  // Sets the active decoder to be `rtp_payload_type`. If this call results in a
+  // change of active decoder, `new_decoder` is set to true. The previous active
   // decoder's AudioDecoder object is deleted.
   virtual int SetActiveDecoder(uint8_t rtp_payload_type, bool* new_decoder);
 
   // Returns the current active decoder, or NULL if no active decoder exists.
   virtual AudioDecoder* GetActiveDecoder() const;
 
-  // Sets the active comfort noise decoder to be |rtp_payload_type|. If this
+  // Sets the active comfort noise decoder to be `rtp_payload_type`. If this
   // call results in a change of active comfort noise decoder, the previous
   // active decoder's AudioDecoder object is deleted.
   virtual int SetActiveCngDecoder(uint8_t rtp_payload_type);
@@ -176,26 +176,26 @@
   // exists.
 
   // Returns a pointer to the AudioDecoder object associated with
-  // |rtp_payload_type|, or NULL if none is registered. If the AudioDecoder
+  // `rtp_payload_type`, or NULL if none is registered. If the AudioDecoder
   // object does not exist for that decoder, the object is created.
   AudioDecoder* GetDecoder(uint8_t rtp_payload_type) const;
 
-  // Returns if |rtp_payload_type| is registered with a format named |name|.
+  // Returns if `rtp_payload_type` is registered with a format named `name`.
   bool IsType(uint8_t rtp_payload_type, const char* name) const;
 
-  // Returns if |rtp_payload_type| is registered with a format named |name|.
+  // Returns if `rtp_payload_type` is registered with a format named `name`.
   bool IsType(uint8_t rtp_payload_type, const std::string& name) const;
 
-  // Returns true if |rtp_payload_type| is registered as comfort noise.
+  // Returns true if `rtp_payload_type` is registered as comfort noise.
   bool IsComfortNoise(uint8_t rtp_payload_type) const;
 
-  // Returns true if |rtp_payload_type| is registered as DTMF.
+  // Returns true if `rtp_payload_type` is registered as DTMF.
   bool IsDtmf(uint8_t rtp_payload_type) const;
 
-  // Returns true if |rtp_payload_type| is registered as RED.
+  // Returns true if `rtp_payload_type` is registered as RED.
   bool IsRed(uint8_t rtp_payload_type) const;
 
-  // Returns kOK if all packets in |packet_list| carry payload types that are
+  // Returns kOK if all packets in `packet_list` carry payload types that are
   // registered in the database. Otherwise, returns kDecoderNotFound.
   int CheckPayloadTypes(const PacketList& packet_list) const;
 
diff --git a/modules/audio_coding/neteq/decoder_database_unittest.cc b/modules/audio_coding/neteq/decoder_database_unittest.cc
index 33bee8d..f28a0fd 100644
--- a/modules/audio_coding/neteq/decoder_database_unittest.cc
+++ b/modules/audio_coding/neteq/decoder_database_unittest.cc
@@ -148,7 +148,7 @@
   }
   PacketList packet_list;
   for (int i = 0; i < kNumPayloads + 1; ++i) {
-    // Create packet with payload type |i|. The last packet will have a payload
+    // Create packet with payload type `i`. The last packet will have a payload
     // type that is not registered in the decoder database.
     Packet packet;
     packet.payload_type = i;
diff --git a/modules/audio_coding/neteq/delay_manager.cc b/modules/audio_coding/neteq/delay_manager.cc
index 41de274..c244902 100644
--- a/modules/audio_coding/neteq/delay_manager.cc
+++ b/modules/audio_coding/neteq/delay_manager.cc
@@ -191,7 +191,7 @@
     }
   }
 
-  // Calculate new |target_level_ms_| based on updated statistics.
+  // Calculate new `target_level_ms_` based on updated statistics.
   int bucket_index = histogram_->Quantile(histogram_quantile_);
   target_level_ms_ = (1 + bucket_index) * kBucketSizeMs;
   target_level_ms_ = std::max(target_level_ms_, effective_minimum_delay_ms_);
@@ -293,7 +293,7 @@
 }
 
 bool DelayManager::SetMaximumDelay(int delay_ms) {
-  // If |delay_ms| is zero then it unsets the maximum delay and target level is
+  // If `delay_ms` is zero then it unsets the maximum delay and target level is
   // unconstrained by maximum delay.
   if (delay_ms != 0 &&
       (delay_ms < minimum_delay_ms_ || delay_ms < packet_len_ms_)) {
@@ -321,7 +321,7 @@
 }
 
 void DelayManager::UpdateEffectiveMinimumDelay() {
-  // Clamp |base_minimum_delay_ms_| into the range which can be effectively
+  // Clamp `base_minimum_delay_ms_` into the range which can be effectively
   // used.
   const int base_minimum_delay_ms =
       rtc::SafeClamp(base_minimum_delay_ms_, 0, MinimumDelayUpperBound());
diff --git a/modules/audio_coding/neteq/delay_manager.h b/modules/audio_coding/neteq/delay_manager.h
index 9832ced..cb35274 100644
--- a/modules/audio_coding/neteq/delay_manager.h
+++ b/modules/audio_coding/neteq/delay_manager.h
@@ -34,9 +34,9 @@
                std::unique_ptr<Histogram> histogram);
 
   // Create a DelayManager object. Notify the delay manager that the packet
-  // buffer can hold no more than |max_packets_in_buffer| packets (i.e., this
+  // buffer can hold no more than `max_packets_in_buffer` packets (i.e., this
   // is the number of packet slots in the buffer) and that the target delay
-  // should be greater than or equal to |base_minimum_delay_ms|. Supply a
+  // should be greater than or equal to `base_minimum_delay_ms`. Supply a
   // PeakDetector object to the DelayManager.
   static std::unique_ptr<DelayManager> Create(int max_packets_in_buffer,
                                               int base_minimum_delay_ms,
@@ -44,10 +44,10 @@
 
   virtual ~DelayManager();
 
-  // Updates the delay manager with a new incoming packet, with |timestamp| from
+  // Updates the delay manager with a new incoming packet, with `timestamp` from
   // the RTP header. This updates the statistics and a new target buffer level
   // is calculated. Returns the relative delay if it can be calculated. If
-  // |reset| is true, restarts the relative arrival delay calculation from this
+  // `reset` is true, restarts the relative arrival delay calculation from this
   // packet.
   virtual absl::optional<int> Update(uint32_t timestamp,
                                      int sample_rate_hz,
@@ -63,7 +63,7 @@
   virtual int SetPacketAudioLength(int length_ms);
 
   // Accessors and mutators.
-  // Assuming |delay| is in valid range.
+  // Assuming `delay` is in valid range.
   virtual bool SetMinimumDelay(int delay_ms);
   virtual bool SetMaximumDelay(int delay_ms);
   virtual bool SetBaseMinimumDelay(int delay_ms);
@@ -78,25 +78,25 @@
 
  private:
   // Provides value which minimum delay can't exceed based on current buffer
-  // size and given |maximum_delay_ms_|. Lower bound is a constant 0.
+  // size and given `maximum_delay_ms_`. Lower bound is a constant 0.
   int MinimumDelayUpperBound() const;
 
-  // Updates |delay_history_|.
+  // Updates `delay_history_`.
   void UpdateDelayHistory(int iat_delay_ms,
                           uint32_t timestamp,
                           int sample_rate_hz);
 
-  // Calculate relative packet arrival delay from |delay_history_|.
+  // Calculate relative packet arrival delay from `delay_history_`.
   int CalculateRelativePacketArrivalDelay() const;
 
-  // Updates |effective_minimum_delay_ms_| delay based on current
-  // |minimum_delay_ms_|, |base_minimum_delay_ms_| and |maximum_delay_ms_|
+  // Updates `effective_minimum_delay_ms_` delay based on current
+  // `minimum_delay_ms_`, `base_minimum_delay_ms_` and `maximum_delay_ms_`
   // and buffer size.
   void UpdateEffectiveMinimumDelay();
 
-  // Makes sure that |delay_ms| is less than maximum delay, if any maximum
-  // is set. Also, if possible check |delay_ms| to be less than 75% of
-  // |max_packets_in_buffer_|.
+  // Makes sure that `delay_ms` is less than maximum delay, if any maximum
+  // is set. Also, if possible check `delay_ms` to be less than 75% of
+  // `max_packets_in_buffer_`.
   bool IsValidMinimumDelay(int delay_ms) const;
 
   bool IsValidBaseMinimumDelay(int delay_ms) const;
diff --git a/modules/audio_coding/neteq/dsp_helper.cc b/modules/audio_coding/neteq/dsp_helper.cc
index 2b1518e..54ec556 100644
--- a/modules/audio_coding/neteq/dsp_helper.cc
+++ b/modules/audio_coding/neteq/dsp_helper.cc
@@ -94,7 +94,7 @@
     return factor;
   }
   int end_factor = 0;
-  // Loop over the channels, starting at the same |factor| each time.
+  // Loop over the channels, starting at the same `factor` each time.
   for (size_t channel = 0; channel < signal->Channels(); ++channel) {
     end_factor =
         RampSignal(&(*signal)[channel], start_index, length, factor, increment);
@@ -116,7 +116,7 @@
       // Single peak.  The parabola fit assumes that an extra point is
       // available; worst case it gets a zero on the high end of the signal.
       // TODO(hlundin): This can potentially get much worse. It breaks the
-      // API contract, that the length of |data| is |data_length|.
+      // API contract, that the length of `data` is `data_length`.
       data_length++;
     }
 
diff --git a/modules/audio_coding/neteq/dsp_helper.h b/modules/audio_coding/neteq/dsp_helper.h
index 82fe14e..7bdeba6 100644
--- a/modules/audio_coding/neteq/dsp_helper.h
+++ b/modules/audio_coding/neteq/dsp_helper.h
@@ -51,8 +51,8 @@
   static const int kUnmuteFactorIncrement48kHz = 1057;
 
   // Multiplies the signal with a gradually changing factor.
-  // The first sample is multiplied with |factor| (in Q14). For each sample,
-  // |factor| is increased (additive) by the |increment| (in Q20), which can
+  // The first sample is multiplied with `factor` (in Q14). For each sample,
+  // `factor` is increased (additive) by the `increment` (in Q20), which can
   // be negative. Returns the scale factor after the last increment.
   static int RampSignal(const int16_t* input,
                         size_t length,
@@ -60,14 +60,14 @@
                         int increment,
                         int16_t* output);
 
-  // Same as above, but with the samples of |signal| being modified in-place.
+  // Same as above, but with the samples of `signal` being modified in-place.
   static int RampSignal(int16_t* signal,
                         size_t length,
                         int factor,
                         int increment);
 
-  // Same as above, but processes |length| samples from |signal|, starting at
-  // |start_index|.
+  // Same as above, but processes `length` samples from `signal`, starting at
+  // `start_index`.
   static int RampSignal(AudioVector* signal,
                         size_t start_index,
                         size_t length,
@@ -81,10 +81,10 @@
                         int factor,
                         int increment);
 
-  // Peak detection with parabolic fit. Looks for |num_peaks| maxima in |data|,
-  // having length |data_length| and sample rate multiplier |fs_mult|. The peak
-  // locations and values are written to the arrays |peak_index| and
-  // |peak_value|, respectively. Both arrays must hold at least |num_peaks|
+  // Peak detection with parabolic fit. Looks for `num_peaks` maxima in `data`,
+  // having length `data_length` and sample rate multiplier `fs_mult`. The peak
+  // locations and values are written to the arrays `peak_index` and
+  // `peak_value`, respectively. Both arrays must hold at least `num_peaks`
   // elements.
   static void PeakDetection(int16_t* data,
                             size_t data_length,
@@ -94,30 +94,30 @@
                             int16_t* peak_value);
 
   // Estimates the height and location of a maximum. The three values in the
-  // array |signal_points| are used as basis for a parabolic fit, which is then
-  // used to find the maximum in an interpolated signal. The |signal_points| are
+  // array `signal_points` are used as basis for a parabolic fit, which is then
+  // used to find the maximum in an interpolated signal. The `signal_points` are
   // assumed to be from a 4 kHz signal, while the maximum, written to
-  // |peak_index| and |peak_value| is given in the full sample rate, as
-  // indicated by the sample rate multiplier |fs_mult|.
+  // `peak_index` and `peak_value` is given in the full sample rate, as
+  // indicated by the sample rate multiplier `fs_mult`.
   static void ParabolicFit(int16_t* signal_points,
                            int fs_mult,
                            size_t* peak_index,
                            int16_t* peak_value);
 
-  // Calculates the sum-abs-diff for |signal| when compared to a displaced
+  // Calculates the sum-abs-diff for `signal` when compared to a displaced
   // version of itself. Returns the displacement lag that results in the minimum
-  // distortion. The resulting distortion is written to |distortion_value|.
-  // The values of |min_lag| and |max_lag| are boundaries for the search.
+  // distortion. The resulting distortion is written to `distortion_value`.
+  // The values of `min_lag` and `max_lag` are boundaries for the search.
   static size_t MinDistortion(const int16_t* signal,
                               size_t min_lag,
                               size_t max_lag,
                               size_t length,
                               int32_t* distortion_value);
 
-  // Mixes |length| samples from |input1| and |input2| together and writes the
-  // result to |output|. The gain for |input1| starts at |mix_factor| (Q14) and
-  // is decreased by |factor_decrement| (Q14) for each sample. The gain for
-  // |input2| is the complement 16384 - mix_factor.
+  // Mixes `length` samples from `input1` and `input2` together and writes the
+  // result to `output`. The gain for `input1` starts at `mix_factor` (Q14) and
+  // is decreased by `factor_decrement` (Q14) for each sample. The gain for
+  // `input2` is the complement 16384 - mix_factor.
   static void CrossFade(const int16_t* input1,
                         const int16_t* input2,
                         size_t length,
@@ -125,24 +125,24 @@
                         int16_t factor_decrement,
                         int16_t* output);
 
-  // Scales |input| with an increasing gain. Applies |factor| (Q14) to the first
-  // sample and increases the gain by |increment| (Q20) for each sample. The
-  // result is written to |output|. |length| samples are processed.
+  // Scales `input` with an increasing gain. Applies `factor` (Q14) to the first
+  // sample and increases the gain by `increment` (Q20) for each sample. The
+  // result is written to `output`. `length` samples are processed.
   static void UnmuteSignal(const int16_t* input,
                            size_t length,
                            int16_t* factor,
                            int increment,
                            int16_t* output);
 
-  // Starts at unity gain and gradually fades out |signal|. For each sample,
-  // the gain is reduced by |mute_slope| (Q14). |length| samples are processed.
+  // Starts at unity gain and gradually fades out `signal`. For each sample,
+  // the gain is reduced by `mute_slope` (Q14). `length` samples are processed.
   static void MuteSignal(int16_t* signal, int mute_slope, size_t length);
 
-  // Downsamples |input| from |sample_rate_hz| to 4 kHz sample rate. The input
-  // has |input_length| samples, and the method will write |output_length|
-  // samples to |output|. Compensates for the phase delay of the downsampling
-  // filters if |compensate_delay| is true. Returns -1 if the input is too short
-  // to produce |output_length| samples, otherwise 0.
+  // Downsamples `input` from `sample_rate_hz` to 4 kHz sample rate. The input
+  // has `input_length` samples, and the method will write `output_length`
+  // samples to `output`. Compensates for the phase delay of the downsampling
+  // filters if `compensate_delay` is true. Returns -1 if the input is too short
+  // to produce `output_length` samples, otherwise 0.
   static int DownsampleTo4kHz(const int16_t* input,
                               size_t input_length,
                               size_t output_length,
diff --git a/modules/audio_coding/neteq/dsp_helper_unittest.cc b/modules/audio_coding/neteq/dsp_helper_unittest.cc
index ec434a4..0924741 100644
--- a/modules/audio_coding/neteq/dsp_helper_unittest.cc
+++ b/modules/audio_coding/neteq/dsp_helper_unittest.cc
@@ -24,7 +24,7 @@
     input[i] = 1000;
   }
   int start_factor = 0;
-  // Ramp from 0 to 1 (in Q14) over the array. Note that |increment| is in Q20,
+  // Ramp from 0 to 1 (in Q14) over the array. Note that `increment` is in Q20,
   // while the factor is in Q14, hence the shift by 6.
   int increment = (16384 << 6) / kLen;
 
@@ -36,7 +36,7 @@
     EXPECT_EQ(1000 * i / kLen, output[i]);
   }
 
-  // Test second method. (Note that this modifies |input|.)
+  // Test second method. (Note that this modifies `input`.)
   stop_factor = DspHelper::RampSignal(input, kLen, start_factor, increment);
   EXPECT_EQ(16383, stop_factor);  // Almost reach 1 in Q14.
   for (int i = 0; i < kLen; ++i) {
@@ -54,31 +54,31 @@
       input[channel][i] = 1000;
     }
   }
-  // We want to start ramping at |start_index| and keep ramping for |kLen|
+  // We want to start ramping at `start_index` and keep ramping for `kLen`
   // samples.
   int start_index = kLen;
   int start_factor = 0;
-  // Ramp from 0 to 1 (in Q14) in |kLen| samples. Note that |increment| is in
+  // Ramp from 0 to 1 (in Q14) in `kLen` samples. Note that `increment` is in
   // Q20, while the factor is in Q14, hence the shift by 6.
   int increment = (16384 << 6) / kLen;
 
   int stop_factor =
       DspHelper::RampSignal(&input, start_index, kLen, start_factor, increment);
   EXPECT_EQ(16383, stop_factor);  // Almost reach 1 in Q14.
-  // Verify that the first |kLen| samples are left untouched.
+  // Verify that the first `kLen` samples are left untouched.
   int i;
   for (i = 0; i < kLen; ++i) {
     for (int channel = 0; channel < kChannels; ++channel) {
       EXPECT_EQ(1000, input[channel][i]);
     }
   }
-  // Verify that the next block of |kLen| samples are ramped.
+  // Verify that the next block of `kLen` samples are ramped.
   for (; i < 2 * kLen; ++i) {
     for (int channel = 0; channel < kChannels; ++channel) {
       EXPECT_EQ(1000 * (i - kLen) / kLen, input[channel][i]);
     }
   }
-  // Verify the last |kLen| samples are left untouched.
+  // Verify the last `kLen` samples are left untouched.
   for (; i < 3 * kLen; ++i) {
     for (int channel = 0; channel < kChannels; ++channel) {
       EXPECT_EQ(1000, input[channel][i]);
diff --git a/modules/audio_coding/neteq/dtmf_buffer.cc b/modules/audio_coding/neteq/dtmf_buffer.cc
index f81036b..9f78aca 100644
--- a/modules/audio_coding/neteq/dtmf_buffer.cc
+++ b/modules/audio_coding/neteq/dtmf_buffer.cc
@@ -32,7 +32,7 @@
   buffer_.clear();
 }
 
-// The ParseEvent method parses 4 bytes from |payload| according to this format
+// The ParseEvent method parses 4 bytes from `payload` according to this format
 // from RFC 4733:
 //
 //  0                   1                   2                   3
@@ -119,8 +119,8 @@
 bool DtmfBuffer::GetEvent(uint32_t current_timestamp, DtmfEvent* event) {
   DtmfList::iterator it = buffer_.begin();
   while (it != buffer_.end()) {
-    // |event_end| is an estimate of where the current event ends. If the end
-    // bit is set, we know that the event ends at |timestamp| + |duration|.
+    // `event_end` is an estimate of where the current event ends. If the end
+    // bit is set, we know that the event ends at `timestamp` + `duration`.
     uint32_t event_end = it->timestamp + it->duration;
 #ifdef LEGACY_BITEXACT
     bool next_available = false;
@@ -226,7 +226,7 @@
   }
 }
 
-// Returns true if |a| goes before |b| in the sorting order ("|a| < |b|").
+// Returns true if `a` goes before `b` in the sorting order ("`a` < `b`").
 // The events are ranked using their start timestamp (taking wrap-around into
 // account). In the unlikely situation that two events share the same start
 // timestamp, the event number is used to rank the two. Note that packets
diff --git a/modules/audio_coding/neteq/dtmf_buffer.h b/modules/audio_coding/neteq/dtmf_buffer.h
index 6bf75e1..9209cae 100644
--- a/modules/audio_coding/neteq/dtmf_buffer.h
+++ b/modules/audio_coding/neteq/dtmf_buffer.h
@@ -45,7 +45,7 @@
     kInvalidSampleRate
   };
 
-  // Set up the buffer for use at sample rate |fs_hz|.
+  // Set up the buffer for use at sample rate `fs_hz`.
   explicit DtmfBuffer(int fs_hz);
 
   virtual ~DtmfBuffer();
@@ -53,21 +53,21 @@
   // Flushes the buffer.
   virtual void Flush();
 
-  // Static method to parse 4 bytes from |payload| as a DTMF event (RFC 4733)
-  // and write the parsed information into the struct |event|. Input variable
-  // |rtp_timestamp| is simply copied into the struct.
+  // Static method to parse 4 bytes from `payload` as a DTMF event (RFC 4733)
+  // and write the parsed information into the struct `event`. Input variable
+  // `rtp_timestamp` is simply copied into the struct.
   static int ParseEvent(uint32_t rtp_timestamp,
                         const uint8_t* payload,
                         size_t payload_length_bytes,
                         DtmfEvent* event);
 
-  // Inserts |event| into the buffer. The method looks for a matching event and
+  // Inserts `event` into the buffer. The method looks for a matching event and
   // merges the two if a match is found.
   virtual int InsertEvent(const DtmfEvent& event);
 
-  // Checks if a DTMF event should be played at time |current_timestamp|. If so,
+  // Checks if a DTMF event should be played at time `current_timestamp`. If so,
   // the method returns true; otherwise false. The parameters of the event to
-  // play will be written to |event|.
+  // play will be written to `event`.
   virtual bool GetEvent(uint32_t current_timestamp, DtmfEvent* event);
 
   // Number of events in the buffer.
@@ -87,7 +87,7 @@
   // Compares two events and returns true if they are the same.
   static bool SameEvent(const DtmfEvent& a, const DtmfEvent& b);
 
-  // Merges |event| to the event pointed out by |it|. The method checks that
+  // Merges `event` to the event pointed out by `it`. The method checks that
   // the two events are the same (using the SameEvent method), and merges them
   // if that was the case, returning true. If the events are not the same, false
   // is returned.
diff --git a/modules/audio_coding/neteq/dtmf_buffer_unittest.cc b/modules/audio_coding/neteq/dtmf_buffer_unittest.cc
index 607a5ec..83745b6 100644
--- a/modules/audio_coding/neteq/dtmf_buffer_unittest.cc
+++ b/modules/audio_coding/neteq/dtmf_buffer_unittest.cc
@@ -208,12 +208,12 @@
   DtmfEvent event2(timestamp, event_no, volume, duration, end_bit);
   EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(event2));
   EXPECT_EQ(2u, buffer.Length());
-  // Now we expect to get the new event when supplying |timestamp_now|.
+  // Now we expect to get the new event when supplying `timestamp_now`.
   EXPECT_TRUE(buffer.GetEvent(timestamp_now, &out_event));
   EXPECT_TRUE(EqualEvents(event2, out_event));
   // Expect the the first event to be erased now.
   EXPECT_EQ(1u, buffer.Length());
-  // Move |timestamp_now| to more than 560 samples after the end of the second
+  // Move `timestamp_now` to more than 560 samples after the end of the second
   // event. Expect that event to be erased.
   timestamp_now = timestamp + duration + 600;
 #ifdef LEGACY_BITEXACT
diff --git a/modules/audio_coding/neteq/dtmf_tone_generator.cc b/modules/audio_coding/neteq/dtmf_tone_generator.cc
index 6c412e3..49cbf8f 100644
--- a/modules/audio_coding/neteq/dtmf_tone_generator.cc
+++ b/modules/audio_coding/neteq/dtmf_tone_generator.cc
@@ -167,7 +167,7 @@
   initialized_ = false;
 }
 
-// Generate num_samples of DTMF signal and write to |output|.
+// Generate num_samples of DTMF signal and write to `output`.
 int DtmfToneGenerator::Generate(size_t num_samples, AudioMultiVector* output) {
   if (!initialized_) {
     return kNotInitialized;
diff --git a/modules/audio_coding/neteq/expand.cc b/modules/audio_coding/neteq/expand.cc
index 37a08d6..9c32746 100644
--- a/modules/audio_coding/neteq/expand.cc
+++ b/modules/audio_coding/neteq/expand.cc
@@ -167,7 +167,7 @@
     }
 
     // Smooth the expanded if it has not been muted to a low amplitude and
-    // |current_voice_mix_factor| is larger than 0.5.
+    // `current_voice_mix_factor` is larger than 0.5.
     if ((parameters.mute_factor > 819) &&
         (parameters.current_voice_mix_factor > 8192)) {
       size_t start_ix = sync_buffer_->Size() - overlap_length_;
@@ -197,7 +197,7 @@
     }
 
     // Unvoiced part.
-    // Filter |scaled_random_vector| through |ar_filter_|.
+    // Filter `scaled_random_vector` through `ar_filter_`.
     memcpy(unvoiced_vector - kUnvoicedLpcOrder, parameters.ar_filter_state,
            sizeof(int16_t) * kUnvoicedLpcOrder);
     int32_t add_constant = 0;
@@ -402,7 +402,7 @@
 
   // Calculate correlation in downsampled domain (4 kHz sample rate).
   size_t correlation_length = 51;  // TODO(hlundin): Legacy bit-exactness.
-  // If it is decided to break bit-exactness |correlation_length| should be
+  // If it is decided to break bit-exactness `correlation_length` should be
   // initialized to the return value of Correlation().
   Correlation(audio_history.get(), signal_length, correlation_vector);
 
@@ -417,7 +417,7 @@
   best_correlation_index[1] += fs_mult_20;
   best_correlation_index[2] += fs_mult_20;
 
-  // Calculate distortion around the |kNumCorrelationCandidates| best lags.
+  // Calculate distortion around the `kNumCorrelationCandidates` best lags.
   int distortion_scale = 0;
   for (size_t i = 0; i < kNumCorrelationCandidates; i++) {
     size_t min_index =
@@ -434,7 +434,7 @@
   WebRtcSpl_VectorBitShiftW32ToW16(best_distortion, kNumCorrelationCandidates,
                                    best_distortion_w32, distortion_scale);
 
-  // Find the maximizing index |i| of the cost function
+  // Find the maximizing index `i` of the cost function
   // f[i] = best_correlation[i] / best_distortion[i].
   int32_t best_ratio = std::numeric_limits<int32_t>::min();
   size_t best_index = std::numeric_limits<size_t>::max();
@@ -458,7 +458,7 @@
   max_lag_ = std::max(distortion_lag, correlation_lag);
 
   // Calculate the exact best correlation in the range between
-  // |correlation_lag| and |distortion_lag|.
+  // `correlation_lag` and `distortion_lag`.
   correlation_length = std::max(std::min(distortion_lag + 10, fs_mult_120),
                                 static_cast<size_t>(60 * fs_mult));
 
@@ -487,7 +487,7 @@
         (31 - WebRtcSpl_NormW32(static_cast<int32_t>(correlation_length))) - 31;
     correlation_scale = std::max(0, correlation_scale);
 
-    // Calculate the correlation, store in |correlation_vector2|.
+    // Calculate the correlation, store in `correlation_vector2`.
     WebRtcSpl_CrossCorrelation(
         correlation_vector2,
         &(audio_history[signal_length - correlation_length]),
@@ -537,7 +537,7 @@
     }
 
     // Extract the two vectors expand_vector0 and expand_vector1 from
-    // |audio_history|.
+    // `audio_history`.
     size_t expansion_length = max_lag_ + overlap_length_;
     const int16_t* vector1 = &(audio_history[signal_length - expansion_length]);
     const int16_t* vector2 = vector1 - distortion_lag;
@@ -594,13 +594,13 @@
       expand_lags_[1] = distortion_lag;
       expand_lags_[2] = distortion_lag;
     } else {
-      // |distortion_lag| and |correlation_lag| are not equal; use different
+      // `distortion_lag` and `correlation_lag` are not equal; use different
       // combinations of the two.
-      // First lag is |distortion_lag| only.
+      // First lag is `distortion_lag` only.
       expand_lags_[0] = distortion_lag;
       // Second lag is the average of the two.
       expand_lags_[1] = (distortion_lag + correlation_lag) / 2;
-      // Third lag is the average again, but rounding towards |correlation_lag|.
+      // Third lag is the average again, but rounding towards `correlation_lag`.
       if (distortion_lag > correlation_lag) {
         expand_lags_[2] = (distortion_lag + correlation_lag - 1) / 2;
       } else {
@@ -638,7 +638,7 @@
       if (stability != 1) {
         // Set first coefficient to 4096 (1.0 in Q12).
         parameters.ar_filter[0] = 4096;
-        // Set remaining |kUnvoicedLpcOrder| coefficients to zero.
+        // Set remaining `kUnvoicedLpcOrder` coefficients to zero.
         WebRtcSpl_MemSetW16(parameters.ar_filter + 1, 0, kUnvoicedLpcOrder);
       }
     }
@@ -656,7 +656,7 @@
                sizeof(int16_t) * noise_length);
       } else {
         // Only applies to SWB where length could be larger than
-        // |kRandomTableSize|.
+        // `kRandomTableSize`.
         memcpy(random_vector, RandomVector::kRandomTable,
                sizeof(int16_t) * RandomVector::kRandomTableSize);
         RTC_DCHECK_LE(noise_length, kMaxSampleRate / 8000 * 120 + 30);
@@ -694,7 +694,7 @@
     int32_t unvoiced_energy = WebRtcSpl_DotProductWithScale(
         unvoiced_vector, unvoiced_vector, 128, unvoiced_prescale);
 
-    // Normalize |unvoiced_energy| to 28 or 29 bits to preserve sqrt() accuracy.
+    // Normalize `unvoiced_energy` to 28 or 29 bits to preserve sqrt() accuracy.
     int16_t unvoiced_scale = WebRtcSpl_NormW32(unvoiced_energy) - 3;
     // Make sure we do an odd number of shifts since we already have 7 shifts
     // from dividing with 128 earlier. This will make the total scale factor
@@ -715,7 +715,7 @@
     //   voice_mix_factor = 0;
     if (corr_coefficient > 7875) {
       int16_t x1, x2, x3;
-      // |corr_coefficient| is in Q14.
+      // `corr_coefficient` is in Q14.
       x1 = static_cast<int16_t>(corr_coefficient);
       x2 = (x1 * x1) >> 14;  // Shift 14 to keep result in Q14.
       x3 = (x1 * x2) >> 14;
@@ -733,13 +733,13 @@
     }
 
     // Calculate muting slope. Reuse value from earlier scaling of
-    // |expand_vector0| and |expand_vector1|.
+    // `expand_vector0` and `expand_vector1`.
     int16_t slope = amplitude_ratio;
     if (slope > 12288) {
       // slope > 1.5.
       // Calculate (1 - (1 / slope)) / distortion_lag =
       // (slope - 1) / (distortion_lag * slope).
-      // |slope| is in Q13, so 1 corresponds to 8192. Shift up to Q25 before
+      // `slope` is in Q13, so 1 corresponds to 8192. Shift up to Q25 before
       // the division.
       // Shift the denominator from Q13 to Q5 before the division. The result of
       // the division will then be in Q20.
@@ -757,7 +757,7 @@
       parameters.onset = true;
     } else {
       // Calculate (1 - slope) / distortion_lag.
-      // Shift |slope| by 7 to Q20 before the division. The result is in Q20.
+      // Shift `slope` by 7 to Q20 before the division. The result is in Q20.
       parameters.mute_slope = WebRtcSpl_DivW32W16(
           (8192 - slope) * 128, static_cast<int16_t>(distortion_lag));
       if (parameters.voice_mix_factor <= 13107) {
@@ -826,7 +826,7 @@
       kDownsampledLength, filter_coefficients, num_coefficients,
       downsampling_factor, kFilterDelay);
 
-  // Normalize |downsampled_input| to using all 16 bits.
+  // Normalize `downsampled_input` to using all 16 bits.
   int16_t max_value =
       WebRtcSpl_MaxAbsValueW16(downsampled_input, kDownsampledLength);
   int16_t norm_shift = 16 - WebRtcSpl_NormW32(max_value);
diff --git a/modules/audio_coding/neteq/expand.h b/modules/audio_coding/neteq/expand.h
index 35dee65..2d22b11 100644
--- a/modules/audio_coding/neteq/expand.h
+++ b/modules/audio_coding/neteq/expand.h
@@ -45,7 +45,7 @@
   virtual void Reset();
 
   // The main method to produce concealment data. The data is appended to the
-  // end of |output|.
+  // end of `output`.
   virtual int Process(AudioMultiVector* output);
 
   // Prepare the object to do extra expansion during normal operation following
@@ -56,7 +56,7 @@
   // a period of expands.
   virtual void SetParametersForMergeAfterExpand();
 
-  // Returns the mute factor for |channel|.
+  // Returns the mute factor for `channel`.
   int16_t MuteFactor(size_t channel) const {
     RTC_DCHECK_LT(channel, num_channels_);
     return channel_parameters_[channel].mute_factor;
@@ -81,7 +81,7 @@
 
   bool TooManyExpands();
 
-  // Analyzes the signal history in |sync_buffer_|, and set up all parameters
+  // Analyzes the signal history in `sync_buffer_`, and set up all parameters
   // necessary to produce concealment data.
   void AnalyzeSignal(int16_t* random_vector);
 
@@ -115,9 +115,9 @@
     int mute_slope; /* Q20 */
   };
 
-  // Calculate the auto-correlation of |input|, with length |input_length|
+  // Calculate the auto-correlation of `input`, with length `input_length`
   // samples. The correlation is calculated from a downsampled version of
-  // |input|, and is written to |output|.
+  // `input`, and is written to `output`.
   void Correlation(const int16_t* input,
                    size_t input_length,
                    int16_t* output) const;
diff --git a/modules/audio_coding/neteq/expand_unittest.cc b/modules/audio_coding/neteq/expand_unittest.cc
index 55a8866..9c3264f 100644
--- a/modules/audio_coding/neteq/expand_unittest.cc
+++ b/modules/audio_coding/neteq/expand_unittest.cc
@@ -124,7 +124,7 @@
     EXPECT_EQ(0, statistics_.last_outage_duration_samples());
   }
   expand_.SetParametersForNormalAfterExpand();
-  // Convert |sum_output_len_samples| to milliseconds.
+  // Convert `sum_output_len_samples` to milliseconds.
   EXPECT_EQ(rtc::checked_cast<int>(sum_output_len_samples),
             statistics_.last_outage_duration_samples());
 }
@@ -164,7 +164,7 @@
     EXPECT_EQ(0, statistics_.last_outage_duration_samples());
   }
   expand_.SetParametersForNormalAfterExpand();
-  // Convert |sum_output_len_samples| to milliseconds.
+  // Convert `sum_output_len_samples` to milliseconds.
   EXPECT_EQ(rtc::checked_cast<int>(sum_output_len_samples),
             statistics_.last_outage_duration_samples());
 }
diff --git a/modules/audio_coding/neteq/histogram.cc b/modules/audio_coding/neteq/histogram.cc
index 15a2394..e4b7f10 100644
--- a/modules/audio_coding/neteq/histogram.cc
+++ b/modules/audio_coding/neteq/histogram.cc
@@ -34,42 +34,42 @@
 Histogram::~Histogram() {}
 
 // Each element in the vector is first multiplied by the forgetting factor
-// |forget_factor_|. Then the vector element indicated by |iat_packets| is then
-// increased (additive) by 1 - |forget_factor_|. This way, the probability of
-// |value| is slightly increased, while the sum of the histogram remains
+// `forget_factor_`. Then the vector element indicated by `iat_packets` is then
+// increased (additive) by 1 - `forget_factor_`. This way, the probability of
+// `value` is slightly increased, while the sum of the histogram remains
 // constant (=1).
 // Due to inaccuracies in the fixed-point arithmetic, the histogram may no
 // longer sum up to 1 (in Q30) after the update. To correct this, a correction
 // term is added or subtracted from the first element (or elements) of the
 // vector.
-// The forgetting factor |forget_factor_| is also updated. When the DelayManager
+// The forgetting factor `forget_factor_` is also updated. When the DelayManager
 // is reset, the factor is set to 0 to facilitate rapid convergence in the
 // beginning. With each update of the histogram, the factor is increased towards
-// the steady-state value |base_forget_factor_|.
+// the steady-state value `base_forget_factor_`.
 void Histogram::Add(int value) {
   RTC_DCHECK(value >= 0);
   RTC_DCHECK(value < static_cast<int>(buckets_.size()));
   int vector_sum = 0;  // Sum up the vector elements as they are processed.
-  // Multiply each element in |buckets_| with |forget_factor_|.
+  // Multiply each element in `buckets_` with `forget_factor_`.
   for (int& bucket : buckets_) {
     bucket = (static_cast<int64_t>(bucket) * forget_factor_) >> 15;
     vector_sum += bucket;
   }
 
   // Increase the probability for the currently observed inter-arrival time
-  // by 1 - |forget_factor_|. The factor is in Q15, |buckets_| in Q30.
+  // by 1 - `forget_factor_`. The factor is in Q15, `buckets_` in Q30.
   // Thus, left-shift 15 steps to obtain result in Q30.
   buckets_[value] += (32768 - forget_factor_) << 15;
   vector_sum += (32768 - forget_factor_) << 15;  // Add to vector sum.
 
-  // |buckets_| should sum up to 1 (in Q30), but it may not due to
+  // `buckets_` should sum up to 1 (in Q30), but it may not due to
   // fixed-point rounding errors.
   vector_sum -= 1 << 30;  // Should be zero. Compensate if not.
   if (vector_sum != 0) {
-    // Modify a few values early in |buckets_|.
+    // Modify a few values early in `buckets_`.
     int flip_sign = vector_sum > 0 ? -1 : 1;
     for (int& bucket : buckets_) {
-      // Add/subtract 1/16 of the element, but not more than |vector_sum|.
+      // Add/subtract 1/16 of the element, but not more than `vector_sum`.
       int correction = flip_sign * std::min(std::abs(vector_sum), bucket >> 4);
       bucket += correction;
       vector_sum += correction;
@@ -82,8 +82,8 @@
 
   ++add_count_;
 
-  // Update |forget_factor_| (changes only during the first seconds after a
-  // reset). The factor converges to |base_forget_factor_|.
+  // Update `forget_factor_` (changes only during the first seconds after a
+  // reset). The factor converges to `base_forget_factor_`.
   if (start_forget_weight_) {
     if (forget_factor_ != base_forget_factor_) {
       int old_forget_factor = forget_factor_;
@@ -92,7 +92,7 @@
       forget_factor_ =
           std::max(0, std::min(base_forget_factor_, forget_factor));
       // The histogram is updated recursively by forgetting the old histogram
-      // with |forget_factor_| and adding a new sample multiplied by |1 -
+      // with `forget_factor_` and adding a new sample multiplied by |1 -
       // forget_factor_|. We need to make sure that the effective weight on the
       // new sample is no smaller than those on the old samples, i.e., to
       // satisfy the following DCHECK.
@@ -106,21 +106,21 @@
 
 int Histogram::Quantile(int probability) {
   // Find the bucket for which the probability of observing an
-  // inter-arrival time larger than or equal to |index| is larger than or
-  // equal to |probability|. The sought probability is estimated using
+  // inter-arrival time larger than or equal to `index` is larger than or
+  // equal to `probability`. The sought probability is estimated using
   // the histogram as the reverse cumulant PDF, i.e., the sum of elements from
-  // the end up until |index|. Now, since the sum of all elements is 1
+  // the end up until `index`. Now, since the sum of all elements is 1
   // (in Q30) by definition, and since the solution is often a low value for
-  // |iat_index|, it is more efficient to start with |sum| = 1 and subtract
+  // `iat_index`, it is more efficient to start with `sum` = 1 and subtract
   // elements from the start of the histogram.
   int inverse_probability = (1 << 30) - probability;
-  size_t index = 0;        // Start from the beginning of |buckets_|.
+  size_t index = 0;        // Start from the beginning of `buckets_`.
   int sum = 1 << 30;       // Assign to 1 in Q30.
   sum -= buckets_[index];
 
   while ((sum > inverse_probability) && (index < buckets_.size() - 1)) {
     // Subtract the probabilities one by one until the sum is no longer greater
-    // than |inverse_probability|.
+    // than `inverse_probability`.
     ++index;
     sum -= buckets_[index];
   }
diff --git a/modules/audio_coding/neteq/histogram.h b/modules/audio_coding/neteq/histogram.h
index 0567e3f..5b2f2b1 100644
--- a/modules/audio_coding/neteq/histogram.h
+++ b/modules/audio_coding/neteq/histogram.h
@@ -21,7 +21,7 @@
 
 class Histogram {
  public:
-  // Creates histogram with capacity |num_buckets| and |forget_factor| in Q15.
+  // Creates histogram with capacity `num_buckets` and `forget_factor` in Q15.
   Histogram(size_t num_buckets,
             int forget_factor,
             absl::optional<double> start_forget_weight = absl::nullopt);
@@ -31,10 +31,10 @@
   // Resets the histogram to the default start distribution.
   virtual void Reset();
 
-  // Add entry in bucket |index|.
+  // Add entry in bucket `index`.
   virtual void Add(int index);
 
-  // Calculates the quantile at |probability| (in Q30) of the histogram
+  // Calculates the quantile at `probability` (in Q30) of the histogram
   // distribution.
   virtual int Quantile(int probability);
 
diff --git a/modules/audio_coding/neteq/merge.cc b/modules/audio_coding/neteq/merge.cc
index 07d8722..ca5ec22 100644
--- a/modules/audio_coding/neteq/merge.cc
+++ b/modules/audio_coding/neteq/merge.cc
@@ -149,13 +149,13 @@
     (*output)[channel].OverwriteAt(temp_data_.data(), output_length, 0);
   }
 
-  // Copy back the first part of the data to |sync_buffer_| and remove it from
-  // |output|.
+  // Copy back the first part of the data to `sync_buffer_` and remove it from
+  // `output`.
   sync_buffer_->ReplaceAtIndex(*output, old_length, sync_buffer_->next_index());
   output->PopFront(old_length);
 
-  // Return new added length. |old_length| samples were borrowed from
-  // |sync_buffer_|.
+  // Return new added length. `old_length` samples were borrowed from
+  // `sync_buffer_`.
   RTC_DCHECK_GE(output_length, old_length);
   return output_length - old_length;
 }
@@ -200,7 +200,7 @@
       // Append one more pitch period each time.
       expanded_.PushBack(expanded_temp);
     }
-    // Trim the length to exactly |required_length|.
+    // Trim the length to exactly `required_length`.
     expanded_.PopBack(expanded_.Size() - required_length);
   }
   RTC_DCHECK_GE(expanded_.Size(), required_length);
@@ -240,17 +240,17 @@
   // Calculate muting factor to use for new frame.
   int16_t mute_factor;
   if (energy_input > energy_expanded) {
-    // Normalize |energy_input| to 14 bits.
+    // Normalize `energy_input` to 14 bits.
     int16_t temp_shift = WebRtcSpl_NormW32(energy_input) - 17;
     energy_input = WEBRTC_SPL_SHIFT_W32(energy_input, temp_shift);
-    // Put |energy_expanded| in a domain 14 higher, so that
+    // Put `energy_expanded` in a domain 14 higher, so that
     // energy_expanded / energy_input is in Q14.
     energy_expanded = WEBRTC_SPL_SHIFT_W32(energy_expanded, temp_shift + 14);
     // Calculate sqrt(energy_expanded / energy_input) in Q14.
     mute_factor = static_cast<int16_t>(
         WebRtcSpl_SqrtFloor((energy_expanded / energy_input) << 14));
   } else {
-    // Set to 1 (in Q14) when |expanded| has higher energy than |input|.
+    // Set to 1 (in Q14) when `expanded` has higher energy than `input`.
     mute_factor = 16384;
   }
 
@@ -295,7 +295,7 @@
     // there is not much we can do.
     const size_t temp_len =
         input_length > signal_offset ? input_length - signal_offset : 0;
-    // TODO(hlundin): Should |downsamp_temp_len| be corrected for round-off
+    // TODO(hlundin): Should `downsamp_temp_len` be corrected for round-off
     // errors? I.e., (temp_len + decimation_factor - 1) / decimation_factor?
     size_t downsamp_temp_len = temp_len / decimation_factor;
     if (downsamp_temp_len > 0) {
@@ -351,8 +351,8 @@
   // Downscale starting index to 4kHz domain. (fs_mult_ * 2 = fs_hz_ / 4000.)
   size_t start_index_downsamp = start_index / (fs_mult_ * 2);
 
-  // Calculate a modified |stop_position_downsamp| to account for the increased
-  // start index |start_index_downsamp| and the effective array length.
+  // Calculate a modified `stop_position_downsamp` to account for the increased
+  // start index `start_index_downsamp` and the effective array length.
   size_t modified_stop_pos =
       std::min(stop_position_downsamp,
                kMaxCorrelationLength + pad_length - start_index_downsamp);
diff --git a/modules/audio_coding/neteq/merge.h b/modules/audio_coding/neteq/merge.h
index a062a95..13aa31d 100644
--- a/modules/audio_coding/neteq/merge.h
+++ b/modules/audio_coding/neteq/merge.h
@@ -37,10 +37,10 @@
   virtual ~Merge();
 
   // The main method to produce the audio data. The decoded data is supplied in
-  // |input|, having |input_length| samples in total for all channels
-  // (interleaved). The result is written to |output|. The number of channels
-  // allocated in |output| defines the number of channels that will be used when
-  // de-interleaving |input|.
+  // `input`, having `input_length` samples in total for all channels
+  // (interleaved). The result is written to `output`. The number of channels
+  // allocated in `output` defines the number of channels that will be used when
+  // de-interleaving `input`.
   virtual size_t Process(int16_t* input,
                          size_t input_length,
                          AudioMultiVector* output);
@@ -57,29 +57,29 @@
   static const size_t kInputDownsampLength = 40;
   static const size_t kMaxCorrelationLength = 60;
 
-  // Calls |expand_| to get more expansion data to merge with. The data is
-  // written to |expanded_signal_|. Returns the length of the expanded data,
-  // while |expand_period| will be the number of samples in one expansion period
-  // (typically one pitch period). The value of |old_length| will be the number
-  // of samples that were taken from the |sync_buffer_|.
+  // Calls `expand_` to get more expansion data to merge with. The data is
+  // written to `expanded_signal_`. Returns the length of the expanded data,
+  // while `expand_period` will be the number of samples in one expansion period
+  // (typically one pitch period). The value of `old_length` will be the number
+  // of samples that were taken from the `sync_buffer_`.
   size_t GetExpandedSignal(size_t* old_length, size_t* expand_period);
 
-  // Analyzes |input| and |expanded_signal| and returns muting factor (Q14) to
+  // Analyzes `input` and `expanded_signal` and returns muting factor (Q14) to
   // be used on the new data.
   int16_t SignalScaling(const int16_t* input,
                         size_t input_length,
                         const int16_t* expanded_signal) const;
 
-  // Downsamples |input| (|input_length| samples) and |expanded_signal| to
+  // Downsamples `input` (`input_length` samples) and `expanded_signal` to
   // 4 kHz sample rate. The downsampled signals are written to
-  // |input_downsampled_| and |expanded_downsampled_|, respectively.
+  // `input_downsampled_` and `expanded_downsampled_`, respectively.
   void Downsample(const int16_t* input,
                   size_t input_length,
                   const int16_t* expanded_signal,
                   size_t expanded_length);
 
-  // Calculates cross-correlation between |input_downsampled_| and
-  // |expanded_downsampled_|, and finds the correlation maximum. The maximizing
+  // Calculates cross-correlation between `input_downsampled_` and
+  // `expanded_downsampled_`, and finds the correlation maximum. The maximizing
   // lag is returned.
   size_t CorrelateAndPeakSearch(size_t start_position,
                                 size_t input_length,
diff --git a/modules/audio_coding/neteq/nack_tracker.cc b/modules/audio_coding/neteq/nack_tracker.cc
index 8d94306..9f04534 100644
--- a/modules/audio_coding/neteq/nack_tracker.cc
+++ b/modules/audio_coding/neteq/nack_tracker.cc
@@ -123,7 +123,7 @@
              IsNewerSequenceNumber(sequence_number_current_received_rtp,
                                    sequence_num_last_decoded_rtp_));
 
-  // Packets with sequence numbers older than |upper_bound_missing| are
+  // Packets with sequence numbers older than `upper_bound_missing` are
   // considered missing, and the rest are considered late.
   uint16_t upper_bound_missing =
       sequence_number_current_received_rtp - nack_threshold_packets_;
diff --git a/modules/audio_coding/neteq/nack_tracker.h b/modules/audio_coding/neteq/nack_tracker.h
index 5a56734..ac0a77f 100644
--- a/modules/audio_coding/neteq/nack_tracker.h
+++ b/modules/audio_coding/neteq/nack_tracker.h
@@ -63,9 +63,9 @@
 
   // Set a maximum for the size of the NACK list. If the last received packet
   // has sequence number of N, then NACK list will not contain any element
-  // with sequence number earlier than N - |max_nack_list_size|.
+  // with sequence number earlier than N - `max_nack_list_size`.
   //
-  // The largest maximum size is defined by |kNackListSizeLimit|
+  // The largest maximum size is defined by `kNackListSizeLimit`
   void SetMaxNackListSize(size_t max_nack_list_size);
 
   // Set the sampling rate.
@@ -90,7 +90,7 @@
   std::vector<uint16_t> GetNackList(int64_t round_trip_time_ms) const;
 
   // Reset to default values. The NACK list is cleared.
-  // |nack_threshold_packets_| & |max_nack_list_size_| preserve their values.
+  // `nack_threshold_packets_` & `max_nack_list_size_` preserve their values.
   void Reset();
 
  private:
@@ -110,7 +110,7 @@
     int64_t time_to_play_ms;
 
     // A guess about the timestamp of the missing packet, it is used for
-    // estimation of |time_to_play_ms|. The estimate might be slightly wrong if
+    // estimation of `time_to_play_ms`. The estimate might be slightly wrong if
     // there has been frame-size change since the last received packet and the
     // missing packet. However, the risk of this is low, and in case of such
     // errors, there will be a minor misestimation in time-to-play of missing
@@ -139,7 +139,7 @@
   // computed correctly.
   NackList GetNackList() const;
 
-  // Given the |sequence_number_current_received_rtp| of currently received RTP,
+  // Given the `sequence_number_current_received_rtp` of currently received RTP,
   // recognize packets which are not arrive and add to the list.
   void AddToList(uint16_t sequence_number_current_received_rtp);
 
@@ -147,23 +147,23 @@
   // This is called when 10 ms elapsed with no new RTP packet decoded.
   void UpdateEstimatedPlayoutTimeBy10ms();
 
-  // Given the |sequence_number_current_received_rtp| and
-  // |timestamp_current_received_rtp| of currently received RTP update number
+  // Given the `sequence_number_current_received_rtp` and
+  // `timestamp_current_received_rtp` of currently received RTP update number
   // of samples per packet.
   void UpdateSamplesPerPacket(uint16_t sequence_number_current_received_rtp,
                               uint32_t timestamp_current_received_rtp);
 
-  // Given the |sequence_number_current_received_rtp| of currently received RTP
+  // Given the `sequence_number_current_received_rtp` of currently received RTP
   // update the list. That is; some packets will change from late to missing,
   // some packets are inserted as missing and some inserted as late.
   void UpdateList(uint16_t sequence_number_current_received_rtp);
 
   // Packets which are considered late for too long (according to
-  // |nack_threshold_packets_|) are flagged as missing.
+  // `nack_threshold_packets_`) are flagged as missing.
   void ChangeFromLateToMissing(uint16_t sequence_number_current_received_rtp);
 
   // Packets which have sequence number older that
-  // |sequence_num_last_received_rtp_| - |max_nack_list_size_| are removed
+  // `sequence_num_last_received_rtp_` - `max_nack_list_size_` are removed
   // from the NACK list.
   void LimitNackListSize();
 
@@ -173,9 +173,9 @@
   // Compute time-to-play given a timestamp.
   int64_t TimeToPlay(uint32_t timestamp) const;
 
-  // If packet N is arrived, any packet prior to N - |nack_threshold_packets_|
+  // If packet N is arrived, any packet prior to N - `nack_threshold_packets_`
   // which is not arrived is considered missing, and should be in NACK list.
-  // Also any packet in the range of N-1 and N - |nack_threshold_packets_|,
+  // Also any packet in the range of N-1 and N - `nack_threshold_packets_`,
   // exclusive, which is not arrived is considered late, and should should be
   // in the list of late packets.
   const int nack_threshold_packets_;
@@ -202,7 +202,7 @@
   NackList nack_list_;
 
   // NACK list will not keep track of missing packets prior to
-  // |sequence_num_last_received_rtp_| - |max_nack_list_size_|.
+  // `sequence_num_last_received_rtp_` - `max_nack_list_size_`.
   size_t max_nack_list_size_;
 };
 
diff --git a/modules/audio_coding/neteq/nack_tracker_unittest.cc b/modules/audio_coding/neteq/nack_tracker_unittest.cc
index a44f41b..3f5a05b 100644
--- a/modules/audio_coding/neteq/nack_tracker_unittest.cc
+++ b/modules/audio_coding/neteq/nack_tracker_unittest.cc
@@ -215,10 +215,10 @@
     std::unique_ptr<NackTracker> nack(NackTracker::Create(kNackThreshold));
     nack->UpdateSampleRate(kSampleRateHz);
 
-    // Sequence number wrap around if |k| is 2 or 3;
+    // Sequence number wrap around if `k` is 2 or 3;
     int seq_num_offset = (k < 2) ? 0 : 65531;
 
-    // Timestamp wrap around if |k| is 1 or 3.
+    // Timestamp wrap around if `k` is 1 or 3.
     uint32_t timestamp_offset =
         (k & 0x1) ? static_cast<uint32_t>(0xffffffff) - 6 : 0;
 
@@ -283,7 +283,7 @@
 TEST(NackTrackerTest,
      MissingPacketsPriorToLastDecodedRtpShouldNotBeInNackList) {
   for (int m = 0; m < 2; ++m) {
-    uint16_t seq_num_offset = (m == 0) ? 0 : 65531;  // Wrap around if |m| is 1.
+    uint16_t seq_num_offset = (m == 0) ? 0 : 65531;  // Wrap around if `m` is 1.
     std::unique_ptr<NackTracker> nack(NackTracker::Create(kNackThreshold));
     nack->UpdateSampleRate(kSampleRateHz);
 
@@ -361,7 +361,7 @@
 TEST(NackTrackerTest, ListSizeAppliedFromBeginning) {
   const size_t kNackListSize = 10;
   for (int m = 0; m < 2; ++m) {
-    uint16_t seq_num_offset = (m == 0) ? 0 : 65525;  // Wrap around if |m| is 1.
+    uint16_t seq_num_offset = (m == 0) ? 0 : 65525;  // Wrap around if `m` is 1.
     std::unique_ptr<NackTracker> nack(NackTracker::Create(kNackThreshold));
     nack->UpdateSampleRate(kSampleRateHz);
     nack->SetMaxNackListSize(kNackListSize);
@@ -385,7 +385,7 @@
 TEST(NackTrackerTest, ChangeOfListSizeAppliedAndOldElementsRemoved) {
   const size_t kNackListSize = 10;
   for (int m = 0; m < 2; ++m) {
-    uint16_t seq_num_offset = (m == 0) ? 0 : 65525;  // Wrap around if |m| is 1.
+    uint16_t seq_num_offset = (m == 0) ? 0 : 65525;  // Wrap around if `m` is 1.
     std::unique_ptr<NackTracker> nack(NackTracker::Create(kNackThreshold));
     nack->UpdateSampleRate(kSampleRateHz);
 
diff --git a/modules/audio_coding/neteq/neteq_impl.cc b/modules/audio_coding/neteq/neteq_impl.cc
index 7225227..fce857f 100644
--- a/modules/audio_coding/neteq/neteq_impl.cc
+++ b/modules/audio_coding/neteq/neteq_impl.cc
@@ -608,7 +608,7 @@
 
   // Reinitialize NetEq if it's needed (changed SSRC or first call).
   if (update_sample_rate_and_channels) {
-    // Note: |first_packet_| will be cleared further down in this method, once
+    // Note: `first_packet_` will be cleared further down in this method, once
     // the packet has been successfully inserted into the packet buffer.
 
     // Flush the packet buffer and DTMF buffer.
@@ -784,8 +784,8 @@
   }
 
   if (update_sample_rate_and_channels && !packet_buffer_->Empty()) {
-    // We do not use |current_rtp_payload_type_| to |set payload_type|, but
-    // get the next RTP header from |packet_buffer_| to obtain the payload type.
+    // We do not use `current_rtp_payload_type_` to |set payload_type|, but
+    // get the next RTP header from `packet_buffer_` to obtain the payload type.
     // The reason for it is the following corner case. If NetEq receives a
     // CNG packet with a sample rate different than the current CNG then it
     // flushes its buffer, assuming send codec must have been changed. However,
@@ -978,18 +978,18 @@
     comfort_noise_->Reset();
   }
 
-  // We treat it as if all packets referenced to by |last_decoded_packet_infos_|
-  // were mashed together when creating the samples in |algorithm_buffer_|.
+  // We treat it as if all packets referenced to by `last_decoded_packet_infos_`
+  // were mashed together when creating the samples in `algorithm_buffer_`.
   RtpPacketInfos packet_infos(last_decoded_packet_infos_);
 
-  // Copy samples from |algorithm_buffer_| to |sync_buffer_|.
+  // Copy samples from `algorithm_buffer_` to `sync_buffer_`.
   //
   // TODO(bugs.webrtc.org/10757):
-  //   We would in the future also like to pass |packet_infos| so that we can do
-  //   sample-perfect tracking of that information across |sync_buffer_|.
+  //   We would in the future also like to pass `packet_infos` so that we can do
+  //   sample-perfect tracking of that information across `sync_buffer_`.
   sync_buffer_->PushBack(*algorithm_buffer_);
 
-  // Extract data from |sync_buffer_| to |output|.
+  // Extract data from `sync_buffer_` to `output`.
   size_t num_output_samples_per_channel = output_size_samples_;
   size_t num_output_samples = output_size_samples_ * sync_buffer_->Channels();
   if (num_output_samples > AudioFrame::kMaxDataSizeSamples) {
@@ -1006,14 +1006,14 @@
   audio_frame->sample_rate_hz_ = fs_hz_;
   // TODO(bugs.webrtc.org/10757):
   //   We don't have the ability to properly track individual packets once their
-  //   audio samples have entered |sync_buffer_|. So for now, treat it as if
-  //   |packet_infos| from packets decoded by the current |GetAudioInternal()|
+  //   audio samples have entered `sync_buffer_`. So for now, treat it as if
+  //   `packet_infos` from packets decoded by the current `GetAudioInternal()`
   //   call were all consumed assembling the current audio frame and the current
   //   audio frame only.
   audio_frame->packet_infos_ = std::move(packet_infos);
   if (sync_buffer_->FutureLength() < expand_->overlap_length()) {
-    // The sync buffer should always contain |overlap_length| samples, but now
-    // too many samples have been extracted. Reinstall the |overlap_length|
+    // The sync buffer should always contain `overlap_length` samples, but now
+    // too many samples have been extracted. Reinstall the `overlap_length`
     // lookahead by moving the index.
     const size_t missing_lookahead_samples =
         expand_->overlap_length() - sync_buffer_->FutureLength();
@@ -1031,7 +1031,7 @@
     return kSampleUnderrun;
   }
 
-  // Should always have overlap samples left in the |sync_buffer_|.
+  // Should always have overlap samples left in the `sync_buffer_`.
   RTC_DCHECK_GE(sync_buffer_->FutureLength(), expand_->overlap_length());
 
   // TODO(yujo): For muted frames, this can be a copy rather than an addition.
@@ -1041,7 +1041,7 @@
   }
 
   // Update the background noise parameters if last operation wrote data
-  // straight from the decoder to the |sync_buffer_|. That is, none of the
+  // straight from the decoder to the `sync_buffer_`. That is, none of the
   // operations that modify the signal can be followed by a parameter update.
   if ((last_mode_ == Mode::kNormal) || (last_mode_ == Mode::kAccelerateFail) ||
       (last_mode_ == Mode::kPreemptiveExpandFail) ||
@@ -1051,14 +1051,14 @@
   }
 
   if (operation == Operation::kDtmf) {
-    // DTMF data was written the end of |sync_buffer_|.
-    // Update index to end of DTMF data in |sync_buffer_|.
+    // DTMF data was written the end of `sync_buffer_`.
+    // Update index to end of DTMF data in `sync_buffer_`.
     sync_buffer_->set_dtmf_index(sync_buffer_->Size());
   }
 
   if (last_mode_ != Mode::kExpand && last_mode_ != Mode::kCodecPlc) {
-    // If last operation was not expand, calculate the |playout_timestamp_| from
-    // the |sync_buffer_|. However, do not update the |playout_timestamp_| if it
+    // If last operation was not expand, calculate the `playout_timestamp_` from
+    // the `sync_buffer_`. However, do not update the `playout_timestamp_` if it
     // would be moved "backwards".
     uint32_t temp_timestamp =
         sync_buffer_->end_timestamp() -
@@ -1067,7 +1067,7 @@
       playout_timestamp_ = temp_timestamp;
     }
   } else {
-    // Use dead reckoning to estimate the |playout_timestamp_|.
+    // Use dead reckoning to estimate the `playout_timestamp_`.
     playout_timestamp_ += static_cast<uint32_t>(output_size_samples_);
   }
   // Set the timestamp in the audio frame to zero before the first packet has
@@ -1206,7 +1206,7 @@
     // Use the provided action instead of the decision NetEq decided on.
     *operation = *action_override;
   }
-  // Check if we already have enough samples in the |sync_buffer_|. If so,
+  // Check if we already have enough samples in the `sync_buffer_`. If so,
   // change decision to normal, unless the decision was merge, accelerate, or
   // preemptive expand.
   if (samples_left >= rtc::dchecked_cast<int>(output_size_samples_) &&
@@ -1245,7 +1245,7 @@
         *operation = Operation::kNormal;
       }
     }
-    // Adjust |sync_buffer_| timestamp before setting |end_timestamp| to the
+    // Adjust `sync_buffer_` timestamp before setting `end_timestamp` to the
     // new value.
     sync_buffer_->IncreaseEndTimestamp(timestamp_ - end_timestamp);
     end_timestamp = timestamp_;
@@ -1535,7 +1535,7 @@
   while (!packet_list->empty() && !decoder_database_->IsComfortNoise(
                                       packet_list->front().payload_type)) {
     RTC_DCHECK(decoder);  // At this point, we must have a decoder object.
-    // The number of channels in the |sync_buffer_| should be the same as the
+    // The number of channels in the `sync_buffer_` should be the same as the
     // number decoder channels.
     RTC_DCHECK_EQ(sync_buffer_->Channels(), decoder->Channels());
     RTC_DCHECK_GE(decoded_buffer_length_, kMaxFrameSize * decoder->Channels());
@@ -1557,7 +1557,7 @@
       *speech_type = result.speech_type;
       if (result.num_decoded_samples > 0) {
         *decoded_length += rtc::dchecked_cast<int>(result.num_decoded_samples);
-        // Update |decoder_frame_length_| with number of samples per channel.
+        // Update `decoder_frame_length_` with number of samples per channel.
         decoder_frame_length_ =
             result.num_decoded_samples / decoder->Channels();
       }
@@ -1733,7 +1733,7 @@
   size_t num_channels = algorithm_buffer_->Channels();
   size_t decoded_length_per_channel = decoded_length / num_channels;
   if (decoded_length_per_channel < required_samples) {
-    // Must move data from the |sync_buffer_| in order to get 30 ms.
+    // Must move data from the `sync_buffer_` in order to get 30 ms.
     borrowed_samples_per_channel =
         static_cast<int>(required_samples - decoded_length_per_channel);
     memmove(&decoded_buffer[borrowed_samples_per_channel * num_channels],
@@ -1765,7 +1765,7 @@
   }
 
   if (borrowed_samples_per_channel > 0) {
-    // Copy borrowed samples back to the |sync_buffer_|.
+    // Copy borrowed samples back to the `sync_buffer_`.
     size_t length = algorithm_buffer_->Size();
     if (length < borrowed_samples_per_channel) {
       // This destroys the beginning of the buffer, but will not cause any
@@ -1806,7 +1806,7 @@
   size_t old_borrowed_samples_per_channel = 0;
   size_t decoded_length_per_channel = decoded_length / num_channels;
   if (decoded_length_per_channel < required_samples) {
-    // Must move data from the |sync_buffer_| in order to get 30 ms.
+    // Must move data from the `sync_buffer_` in order to get 30 ms.
     borrowed_samples_per_channel =
         required_samples - decoded_length_per_channel;
     // Calculate how many of these were already played out.
@@ -1843,7 +1843,7 @@
   }
 
   if (borrowed_samples_per_channel > 0) {
-    // Copy borrowed samples back to the |sync_buffer_|.
+    // Copy borrowed samples back to the `sync_buffer_`.
     sync_buffer_->ReplaceAtIndex(
         *algorithm_buffer_, borrowed_samples_per_channel,
         sync_buffer_->Size() - borrowed_samples_per_channel);
@@ -1903,10 +1903,10 @@
 }
 
 int NetEqImpl::DoDtmf(const DtmfEvent& dtmf_event, bool* play_dtmf) {
-  // This block of the code and the block further down, handling |dtmf_switch|
+  // This block of the code and the block further down, handling `dtmf_switch`
   // are commented out. Otherwise playing out-of-band DTMF would fail in VoE
   // test, DtmfTest.ManualSuccessfullySendsOutOfBandTelephoneEvents. This is
-  // equivalent to |dtmf_switch| always be false.
+  // equivalent to `dtmf_switch` always be false.
   //
   // See http://webrtc-codereview.appspot.com/1195004/ for discussion
   // On this issue. This change might cause some glitches at the point of
@@ -1916,7 +1916,7 @@
   //  if ((last_mode_ != Modes::kDtmf) &&
   //      dtmf_tone_generator_->initialized()) {
   //    // Special case; see below.
-  //    // We must catch this before calling Generate, since |initialized| is
+  //    // We must catch this before calling Generate, since `initialized` is
   //    // modified in that call.
   //    dtmf_switch = true;
   //  }
@@ -1948,7 +1948,7 @@
   //    // TODO(hlundin): This code seems incorrect. (Legacy.) Write test and
   //    // verify correct operation.
   //    RTC_NOTREACHED();
-  //    // Must generate enough data to replace all of the |sync_buffer_|
+  //    // Must generate enough data to replace all of the `sync_buffer_`
   //    // "future".
   //    int required_length = sync_buffer_->FutureLength();
   //    RTC_DCHECK(dtmf_tone_generator_->initialized());
@@ -2033,7 +2033,7 @@
   do {
     timestamp_ = next_packet->timestamp;
     absl::optional<Packet> packet = packet_buffer_->GetNextPacket();
-    // |next_packet| may be invalid after the |packet_buffer_| operation.
+    // `next_packet` may be invalid after the `packet_buffer_` operation.
     next_packet = nullptr;
     if (!packet) {
       RTC_LOG(LS_ERROR) << "Should always be able to extract a packet here";
@@ -2180,7 +2180,7 @@
   comfort_noise_.reset(
       new ComfortNoise(fs_hz, decoder_database_.get(), sync_buffer_.get()));
 
-  // Verify that |decoded_buffer_| is long enough.
+  // Verify that `decoded_buffer_` is long enough.
   if (decoded_buffer_length_ < kMaxFrameSize * channels) {
     // Reallocate to larger size.
     decoded_buffer_length_ = kMaxFrameSize * channels;
diff --git a/modules/audio_coding/neteq/neteq_impl.h b/modules/audio_coding/neteq/neteq_impl.h
index 88da6dc..e3d84b3 100644
--- a/modules/audio_coding/neteq/neteq_impl.h
+++ b/modules/audio_coding/neteq/neteq_impl.h
@@ -141,7 +141,7 @@
   bool RegisterPayloadType(int rtp_payload_type,
                            const SdpAudioFormat& audio_format) override;
 
-  // Removes |rtp_payload_type| from the codec database. Returns 0 on success,
+  // Removes `rtp_payload_type` from the codec database. Returns 0 on success,
   // -1 on failure.
   int RemovePayloadType(uint8_t rtp_payload_type) override;
 
@@ -159,7 +159,7 @@
 
   int FilteredCurrentDelayMs() const override;
 
-  // Writes the current network statistics to |stats|. The statistics are reset
+  // Writes the current network statistics to `stats`. The statistics are reset
   // after the call.
   int NetworkStatistics(NetEqNetworkStatistics* stats) override;
 
@@ -215,7 +215,7 @@
                            rtc::ArrayView<const uint8_t> payload)
       RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
 
-  // Delivers 10 ms of audio data. The data is written to |audio_frame|.
+  // Delivers 10 ms of audio data. The data is written to `audio_frame`.
   // Returns 0 on success, otherwise an error code.
   int GetAudioInternal(AudioFrame* audio_frame,
                        bool* muted,
@@ -223,9 +223,9 @@
       RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
 
   // Provides a decision to the GetAudioInternal method. The decision what to
-  // do is written to |operation|. Packets to decode are written to
-  // |packet_list|, and a DTMF event to play is written to |dtmf_event|. When
-  // DTMF should be played, |play_dtmf| is set to true by the method.
+  // do is written to `operation`. Packets to decode are written to
+  // `packet_list`, and a DTMF event to play is written to `dtmf_event`. When
+  // DTMF should be played, `play_dtmf` is set to true by the method.
   // Returns 0 on success, otherwise an error code.
   int GetDecision(Operation* operation,
                   PacketList* packet_list,
@@ -234,11 +234,11 @@
                   absl::optional<Operation> action_override)
       RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
 
-  // Decodes the speech packets in |packet_list|, and writes the results to
-  // |decoded_buffer|, which is allocated to hold |decoded_buffer_length|
-  // elements. The length of the decoded data is written to |decoded_length|.
+  // Decodes the speech packets in `packet_list`, and writes the results to
+  // `decoded_buffer`, which is allocated to hold `decoded_buffer_length`
+  // elements. The length of the decoded data is written to `decoded_length`.
   // The speech type -- speech or (codec-internal) comfort noise -- is written
-  // to |speech_type|. If |packet_list| contains any SID frames for RFC 3389
+  // to `speech_type`. If `packet_list` contains any SID frames for RFC 3389
   // comfort noise, those are not decoded.
   int Decode(PacketList* packet_list,
              Operation* operation,
@@ -293,7 +293,7 @@
                          bool play_dtmf) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
 
   // Sub-method which calls the ComfortNoise class to generate RFC 3389 comfort
-  // noise. |packet_list| can either contain one SID frame to update the
+  // noise. `packet_list` can either contain one SID frame to update the
   // noise parameters, or no payload at all, in which case the previously
   // received parameters are used.
   int DoRfc3389Cng(PacketList* packet_list, bool play_dtmf)
@@ -308,20 +308,20 @@
   int DoDtmf(const DtmfEvent& dtmf_event, bool* play_dtmf)
       RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
 
-  // Overdub DTMF on top of |output|.
+  // Overdub DTMF on top of `output`.
   int DtmfOverdub(const DtmfEvent& dtmf_event,
                   size_t num_channels,
                   int16_t* output) const RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
 
-  // Extracts packets from |packet_buffer_| to produce at least
-  // |required_samples| samples. The packets are inserted into |packet_list|.
+  // Extracts packets from `packet_buffer_` to produce at least
+  // `required_samples` samples. The packets are inserted into `packet_list`.
   // Returns the number of samples that the packets in the list will produce, or
   // -1 in case of an error.
   int ExtractPackets(size_t required_samples, PacketList* packet_list)
       RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
 
   // Resets various variables and objects to new values based on the sample rate
-  // |fs_hz| and |channels| number audio channels.
+  // `fs_hz` and `channels` number audio channels.
   void SetSampleRateAndChannels(int fs_hz, size_t channels)
       RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
 
diff --git a/modules/audio_coding/neteq/neteq_impl_unittest.cc b/modules/audio_coding/neteq/neteq_impl_unittest.cc
index 53b4dae..875e62c 100644
--- a/modules/audio_coding/neteq/neteq_impl_unittest.cc
+++ b/modules/audio_coding/neteq/neteq_impl_unittest.cc
@@ -207,8 +207,8 @@
     EXPECT_EQ(1u, output.num_channels_);
     EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
 
-    // DTMF packets are immediately consumed by |InsertPacket()| and won't be
-    // returned by |GetAudio()|.
+    // DTMF packets are immediately consumed by `InsertPacket()` and won't be
+    // returned by `GetAudio()`.
     EXPECT_THAT(output.packet_infos_, IsEmpty());
 
     // Verify first 64 samples of actual output.
@@ -461,7 +461,7 @@
    public:
     CountingSamplesDecoder() : next_value_(1) {}
 
-    // Produce as many samples as input bytes (|encoded_len|).
+    // Produce as many samples as input bytes (`encoded_len`).
     int DecodeInternal(const uint8_t* encoded,
                        size_t encoded_len,
                        int /* sample_rate_hz */,
@@ -578,7 +578,7 @@
       .WillRepeatedly(Return(rtc::checked_cast<int>(kPayloadLengthSamples)));
   int16_t dummy_output[kPayloadLengthSamples] = {0};
   // The below expectation will make the mock decoder write
-  // |kPayloadLengthSamples| zeros to the output array, and mark it as speech.
+  // `kPayloadLengthSamples` zeros to the output array, and mark it as speech.
   EXPECT_CALL(mock_decoder, DecodeInternal(Pointee(0), kPayloadLengthBytes,
                                            kSampleRateHz, _, _))
       .WillOnce(DoAll(SetArrayArgument<3>(dummy_output,
@@ -1284,7 +1284,7 @@
       .WillRepeatedly(Return(rtc::checked_cast<int>(kPayloadLengthSamples)));
   int16_t dummy_output[kPayloadLengthSamples] = {0};
   // The below expectation will make the mock decoder write
-  // |kPayloadLengthSamples| - 5 zeros to the output array, and mark it as
+  // `kPayloadLengthSamples` - 5 zeros to the output array, and mark it as
   // speech. That is, the decoded length is 5 samples shorter than the expected.
   EXPECT_CALL(mock_decoder,
               DecodeInternal(_, kPayloadLengthBytes, kSampleRateHz, _, _))
diff --git a/modules/audio_coding/neteq/neteq_network_stats_unittest.cc b/modules/audio_coding/neteq/neteq_network_stats_unittest.cc
index 8f72734..862edaf 100644
--- a/modules/audio_coding/neteq/neteq_network_stats_unittest.cc
+++ b/modules/audio_coding/neteq/neteq_network_stats_unittest.cc
@@ -188,11 +188,11 @@
                            : 0xffffffff);
   }
 
-  // |stats_ref|
+  // `stats_ref`
   // expects.x = -1, do not care
-  // expects.x = 0, 'x' in current stats should equal 'x' in |stats_ref|
-  // expects.x = 1, 'x' in current stats should < 'x' in |stats_ref|
-  // expects.x = 2, 'x' in current stats should > 'x' in |stats_ref|
+  // expects.x = 0, 'x' in current stats should equal 'x' in `stats_ref`
+  // expects.x = 1, 'x' in current stats should < 'x' in `stats_ref`
+  // expects.x = 2, 'x' in current stats should > 'x' in `stats_ref`
   void CheckNetworkStatistics(NetEqNetworkStatsCheck expects) {
     NetEqNetworkStatistics stats;
     neteq_->NetworkStatistics(&stats);
@@ -229,7 +229,7 @@
     uint32_t time_now;
     uint32_t next_send_time;
 
-    // Initiate |last_lost_time_|.
+    // Initiate `last_lost_time_`.
     time_now = next_send_time = last_lost_time_ = rtp_generator_->GetRtpHeader(
         kPayloadType, frame_size_samples_, &rtp_header_);
     for (int k = 0; k < num_loops; ++k) {
diff --git a/modules/audio_coding/neteq/neteq_unittest.cc b/modules/audio_coding/neteq/neteq_unittest.cc
index bdd90e9..5ce6b89 100644
--- a/modules/audio_coding/neteq/neteq_unittest.cc
+++ b/modules/audio_coding/neteq/neteq_unittest.cc
@@ -305,7 +305,7 @@
   PopulateRtpInfo(0, 0, &rtp_info);
   rtp_info.payloadType = 103;  // iSAC, but the payload is invalid.
   EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload));
-  // Set all of |out_data_| to 1, and verify that it was set to 0 by the call
+  // Set all of `out_data_` to 1, and verify that it was set to 0 by the call
   // to GetAudio.
   int16_t* out_frame_data = out_frame_.mutable_data();
   for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) {
@@ -327,7 +327,7 @@
 }
 
 TEST_F(NetEqDecodingTest, GetAudioBeforeInsertPacket) {
-  // Set all of |out_data_| to 1, and verify that it was set to 0 by the call
+  // Set all of `out_data_` to 1, and verify that it was set to 0 by the call
   // to GetAudio.
   int16_t* out_frame_data = out_frame_.mutable_data();
   for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) {
@@ -371,7 +371,7 @@
     AudioFrame output;
     test::AudioLoop input;
     // We are using the same 32 kHz input file for all tests, regardless of
-    // |sampling_rate_hz|. The output may sound weird, but the test is still
+    // `sampling_rate_hz`. The output may sound weird, but the test is still
     // valid.
     ASSERT_TRUE(input.Init(
         webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"),
@@ -534,7 +534,7 @@
   ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, rtc::ArrayView<const uint8_t>(
                                                   payload, payload_len)));
 
-  // Pull audio until we have played |kCngPeriodMs| of CNG. Start at 10 ms since
+  // Pull audio until we have played `kCngPeriodMs` of CNG. Start at 10 ms since
   // we have already pulled out CNG once.
   for (int cng_time_ms = 10; cng_time_ms < kCngPeriodMs; cng_time_ms += 10) {
     ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
diff --git a/modules/audio_coding/neteq/normal.cc b/modules/audio_coding/neteq/normal.cc
index 3ed0e26..6ffae097 100644
--- a/modules/audio_coding/neteq/normal.cc
+++ b/modules/audio_coding/neteq/normal.cc
@@ -45,7 +45,7 @@
   const int fs_mult = fs_hz_ / 8000;
   RTC_DCHECK_GT(fs_mult, 0);
   // fs_shift = log2(fs_mult), rounded down.
-  // Note that |fs_shift| is not "exact" for 48 kHz.
+  // Note that `fs_shift` is not "exact" for 48 kHz.
   // TODO(hlundin): Investigate this further.
   const int fs_shift = 30 - WebRtcSpl_NormW32(fs_mult);
 
@@ -83,7 +83,7 @@
       size_t energy_length =
           std::min(static_cast<size_t>(fs_mult * 64), length_per_channel);
       int scaling = 6 + fs_shift - WebRtcSpl_NormW32(decoded_max * decoded_max);
-      scaling = std::max(scaling, 0);  // |scaling| should always be >= 0.
+      scaling = std::max(scaling, 0);  // `scaling` should always be >= 0.
       int32_t energy = WebRtcSpl_DotProductWithScale(signal.get(), signal.get(),
                                                      energy_length, scaling);
       int32_t scaled_energy_length =
diff --git a/modules/audio_coding/neteq/normal.h b/modules/audio_coding/neteq/normal.h
index d6dc84a..3607208 100644
--- a/modules/audio_coding/neteq/normal.h
+++ b/modules/audio_coding/neteq/normal.h
@@ -49,11 +49,11 @@
 
   virtual ~Normal() {}
 
-  // Performs the "Normal" operation. The decoder data is supplied in |input|,
-  // having |length| samples in total for all channels (interleaved). The
-  // result is written to |output|. The number of channels allocated in
-  // |output| defines the number of channels that will be used when
-  // de-interleaving |input|. |last_mode| contains the mode used in the previous
+  // Performs the "Normal" operation. The decoder data is supplied in `input`,
+  // having `length` samples in total for all channels (interleaved). The
+  // result is written to `output`. The number of channels allocated in
+  // `output` defines the number of channels that will be used when
+  // de-interleaving `input`. `last_mode` contains the mode used in the previous
   // GetAudio call (i.e., not the current one).
   int Process(const int16_t* input,
               size_t length,
diff --git a/modules/audio_coding/neteq/normal_unittest.cc b/modules/audio_coding/neteq/normal_unittest.cc
index 7e533bb..4554d79 100644
--- a/modules/audio_coding/neteq/normal_unittest.cc
+++ b/modules/audio_coding/neteq/normal_unittest.cc
@@ -51,7 +51,7 @@
   StatisticsCalculator statistics;
   Expand expand(&bgn, &sync_buffer, &random_vector, &statistics, fs, channels);
   Normal normal(fs, &db, bgn, &expand, &statistics);
-  EXPECT_CALL(db, Die());  // Called when |db| goes out of scope.
+  EXPECT_CALL(db, Die());  // Called when `db` goes out of scope.
 }
 
 TEST(Normal, AvoidDivideByZero) {
@@ -85,8 +85,8 @@
   EXPECT_EQ(input_size_samples, normal.Process(input, input_size_samples,
                                                NetEq::Mode::kExpand, &output));
 
-  EXPECT_CALL(db, Die());      // Called when |db| goes out of scope.
-  EXPECT_CALL(expand, Die());  // Called when |expand| goes out of scope.
+  EXPECT_CALL(db, Die());      // Called when `db` goes out of scope.
+  EXPECT_CALL(expand, Die());  // Called when `expand` goes out of scope.
 }
 
 TEST(Normal, InputLengthAndChannelsDoNotMatch) {
@@ -109,8 +109,8 @@
   EXPECT_EQ(0, normal.Process(input, input_len, NetEq::Mode::kExpand, &output));
   EXPECT_EQ(0u, output.Size());
 
-  EXPECT_CALL(db, Die());      // Called when |db| goes out of scope.
-  EXPECT_CALL(expand, Die());  // Called when |expand| goes out of scope.
+  EXPECT_CALL(db, Die());      // Called when `db` goes out of scope.
+  EXPECT_CALL(expand, Die());  // Called when `expand` goes out of scope.
 }
 
 TEST(Normal, LastModeExpand120msPacket) {
@@ -138,8 +138,8 @@
 
   EXPECT_EQ(kPacketsizeBytes, output.Size());
 
-  EXPECT_CALL(db, Die());      // Called when |db| goes out of scope.
-  EXPECT_CALL(expand, Die());  // Called when |expand| goes out of scope.
+  EXPECT_CALL(db, Die());      // Called when `db` goes out of scope.
+  EXPECT_CALL(expand, Die());  // Called when `expand` goes out of scope.
 }
 
 // TODO(hlundin): Write more tests.
diff --git a/modules/audio_coding/neteq/packet.h b/modules/audio_coding/neteq/packet.h
index 4455494..0c6f204 100644
--- a/modules/audio_coding/neteq/packet.h
+++ b/modules/audio_coding/neteq/packet.h
@@ -84,8 +84,8 @@
 
   // Packets should generally be moved around but sometimes it's useful to make
   // a copy, for example for testing purposes. NOTE: Will only work for
-  // un-parsed packets, i.e. |frame| must be unset. The payload will, however,
-  // be copied. |waiting_time| will also not be copied.
+  // un-parsed packets, i.e. `frame` must be unset. The payload will, however,
+  // be copied. `waiting_time` will also not be copied.
   Packet Clone() const;
 
   Packet& operator=(Packet&& b);
diff --git a/modules/audio_coding/neteq/packet_buffer.cc b/modules/audio_coding/neteq/packet_buffer.cc
index 86ae847..f6b5a47 100644
--- a/modules/audio_coding/neteq/packet_buffer.cc
+++ b/modules/audio_coding/neteq/packet_buffer.cc
@@ -33,7 +33,7 @@
 namespace webrtc {
 namespace {
 // Predicate used when inserting packets in the buffer list.
-// Operator() returns true when |packet| goes before |new_packet|.
+// Operator() returns true when `packet` goes before `new_packet`.
 class NewTimestampIsLarger {
  public:
   explicit NewTimestampIsLarger(const Packet& new_packet)
@@ -183,16 +183,16 @@
   PacketList::reverse_iterator rit = std::find_if(
       buffer_.rbegin(), buffer_.rend(), NewTimestampIsLarger(packet));
 
-  // The new packet is to be inserted to the right of |rit|. If it has the same
-  // timestamp as |rit|, which has a higher priority, do not insert the new
+  // The new packet is to be inserted to the right of `rit`. If it has the same
+  // timestamp as `rit`, which has a higher priority, do not insert the new
   // packet to list.
   if (rit != buffer_.rend() && packet.timestamp == rit->timestamp) {
     LogPacketDiscarded(packet.priority.codec_level, stats);
     return return_val;
   }
 
-  // The new packet is to be inserted to the left of |it|. If it has the same
-  // timestamp as |it|, which has a lower priority, replace |it| with the new
+  // The new packet is to be inserted to the left of `it`. If it has the same
+  // timestamp as `it`, which has a lower priority, replace `it` with the new
   // packet.
   PacketList::iterator it = rit.base();
   if (it != buffer_.end() && packet.timestamp == it->timestamp) {
diff --git a/modules/audio_coding/neteq/packet_buffer.h b/modules/audio_coding/neteq/packet_buffer.h
index cd2adf7..20a0533 100644
--- a/modules/audio_coding/neteq/packet_buffer.h
+++ b/modules/audio_coding/neteq/packet_buffer.h
@@ -45,7 +45,7 @@
   };
 
   // Constructor creates a buffer which can hold a maximum of
-  // |max_number_of_packets| packets.
+  // `max_number_of_packets` packets.
   PacketBuffer(size_t max_number_of_packets, const TickTimer* tick_timer);
 
   // Deletes all packets in the buffer before destroying the buffer.
@@ -63,7 +63,7 @@
   // Returns true for an empty buffer.
   virtual bool Empty() const;
 
-  // Inserts |packet| into the buffer. The buffer will take over ownership of
+  // Inserts `packet` into the buffer. The buffer will take over ownership of
   // the packet object.
   // Returns PacketBuffer::kOK on success, PacketBuffer::kFlushed if the buffer
   // was flushed due to overfilling.
@@ -93,14 +93,14 @@
       int target_level_ms);
 
   // Gets the timestamp for the first packet in the buffer and writes it to the
-  // output variable |next_timestamp|.
+  // output variable `next_timestamp`.
   // Returns PacketBuffer::kBufferEmpty if the buffer is empty,
   // PacketBuffer::kOK otherwise.
   virtual int NextTimestamp(uint32_t* next_timestamp) const;
 
   // Gets the timestamp for the first packet in the buffer with a timestamp no
-  // lower than the input limit |timestamp|. The result is written to the output
-  // variable |next_timestamp|.
+  // lower than the input limit `timestamp`. The result is written to the output
+  // variable `next_timestamp`.
   // Returns PacketBuffer::kBufferEmpty if the buffer is empty,
   // PacketBuffer::kOK otherwise.
   virtual int NextHigherTimestamp(uint32_t timestamp,
@@ -154,11 +154,11 @@
   virtual bool ContainsDtxOrCngPacket(
       const DecoderDatabase* decoder_database) const;
 
-  // Static method returning true if |timestamp| is older than |timestamp_limit|
-  // but less than |horizon_samples| behind |timestamp_limit|. For instance,
+  // Static method returning true if `timestamp` is older than `timestamp_limit`
+  // but less than `horizon_samples` behind `timestamp_limit`. For instance,
   // with timestamp_limit = 100 and horizon_samples = 10, a timestamp in the
   // range (90, 100) is considered obsolete, and will yield true.
-  // Setting |horizon_samples| to 0 is the same as setting it to 2^31, i.e.,
+  // Setting `horizon_samples` to 0 is the same as setting it to 2^31, i.e.,
   // half the 32-bit timestamp range.
   static bool IsObsoleteTimestamp(uint32_t timestamp,
                                   uint32_t timestamp_limit,
diff --git a/modules/audio_coding/neteq/post_decode_vad.h b/modules/audio_coding/neteq/post_decode_vad.h
index ca7cabf..3134d5f 100644
--- a/modules/audio_coding/neteq/post_decode_vad.h
+++ b/modules/audio_coding/neteq/post_decode_vad.h
@@ -40,8 +40,8 @@
   // Initializes post-decode VAD.
   void Init();
 
-  // Updates post-decode VAD with the audio data in |signal| having |length|
-  // samples. The data is of type |speech_type|, at the sample rate |fs_hz|.
+  // Updates post-decode VAD with the audio data in `signal` having `length`
+  // samples. The data is of type `speech_type`, at the sample rate `fs_hz`.
   void Update(int16_t* signal,
               size_t length,
               AudioDecoder::SpeechType speech_type,
diff --git a/modules/audio_coding/neteq/preemptive_expand.cc b/modules/audio_coding/neteq/preemptive_expand.cc
index cad8d6a..232170b 100644
--- a/modules/audio_coding/neteq/preemptive_expand.cc
+++ b/modules/audio_coding/neteq/preemptive_expand.cc
@@ -26,7 +26,7 @@
     size_t* length_change_samples) {
   old_data_length_per_channel_ = old_data_length;
   // Input length must be (almost) 30 ms.
-  // Also, the new part must be at least |overlap_samples_| elements.
+  // Also, the new part must be at least `overlap_samples_` elements.
   static const size_t k15ms = 120;  // 15 ms = 120 samples at 8 kHz sample rate.
   if (num_channels_ == 0 ||
       input_length / num_channels_ < (2 * k15ms - 1) * fs_mult_ ||
@@ -64,7 +64,7 @@
     bool active_speech,
     bool /*fast_mode*/,
     AudioMultiVector* output) const {
-  // Pre-calculate common multiplication with |fs_mult_|.
+  // Pre-calculate common multiplication with `fs_mult_`.
   // 120 corresponds to 15 ms.
   size_t fs_mult_120 = static_cast<size_t>(fs_mult_ * 120);
   // Check for strong correlation (>0.9 in Q14) and at least 15 ms new data,
@@ -80,12 +80,12 @@
     // Copy first part, including cross-fade region.
     output->PushBackInterleaved(rtc::ArrayView<const int16_t>(
         input, (unmodified_length + peak_index) * num_channels_));
-    // Copy the last |peak_index| samples up to 15 ms to |temp_vector|.
+    // Copy the last `peak_index` samples up to 15 ms to `temp_vector`.
     AudioMultiVector temp_vector(num_channels_);
     temp_vector.PushBackInterleaved(rtc::ArrayView<const int16_t>(
         &input[(unmodified_length - peak_index) * num_channels_],
         peak_index * num_channels_));
-    // Cross-fade |temp_vector| onto the end of |output|.
+    // Cross-fade `temp_vector` onto the end of `output`.
     output->CrossFade(temp_vector, peak_index);
     // Copy the last unmodified part, 15 ms + pitch period until the end.
     output->PushBackInterleaved(rtc::ArrayView<const int16_t>(
diff --git a/modules/audio_coding/neteq/preemptive_expand.h b/modules/audio_coding/neteq/preemptive_expand.h
index e7d2bad..708ebfd 100644
--- a/modules/audio_coding/neteq/preemptive_expand.h
+++ b/modules/audio_coding/neteq/preemptive_expand.h
@@ -37,9 +37,9 @@
         overlap_samples_(overlap_samples) {}
 
   // This method performs the actual PreemptiveExpand operation. The samples are
-  // read from |input|, of length |input_length| elements, and are written to
-  // |output|. The number of samples added through time-stretching is
-  // is provided in the output |length_change_samples|. The method returns
+  // read from `input`, of length `input_length` elements, and are written to
+  // `output`. The number of samples added through time-stretching is
+  // is provided in the output `length_change_samples`. The method returns
   // the outcome of the operation as an enumerator value.
   ReturnCodes Process(const int16_t* pw16_decoded,
                       size_t len,
@@ -48,7 +48,7 @@
                       size_t* length_change_samples);
 
  protected:
-  // Sets the parameters |best_correlation| and |peak_index| to suitable
+  // Sets the parameters `best_correlation` and `peak_index` to suitable
   // values when the signal contains no active speech.
   void SetParametersForPassiveSpeech(size_t input_length,
                                      int16_t* best_correlation,
diff --git a/modules/audio_coding/neteq/red_payload_splitter.cc b/modules/audio_coding/neteq/red_payload_splitter.cc
index b517e38..b7b4520 100644
--- a/modules/audio_coding/neteq/red_payload_splitter.cc
+++ b/modules/audio_coding/neteq/red_payload_splitter.cc
@@ -27,9 +27,9 @@
 
 // The method loops through a list of packets {A, B, C, ...}. Each packet is
 // split into its corresponding RED payloads, {A1, A2, ...}, which is
-// temporarily held in the list |new_packets|.
-// When the first packet in |packet_list| has been processed, the original
-// packet is replaced by the new ones in |new_packets|, so that |packet_list|
+// temporarily held in the list `new_packets`.
+// When the first packet in `packet_list` has been processed, the original
+// packet is replaced by the new ones in `new_packets`, so that `packet_list`
 // becomes: {A1, A2, ..., B, C, ...}. The method then continues with B, and C,
 // until all the original packets have been replaced by their split payloads.
 bool RedPayloadSplitter::SplitRed(PacketList* packet_list) {
@@ -110,7 +110,7 @@
 
     if (new_headers.size() <= kMaxRedBlocks) {
       // Populate the new packets with payload data.
-      // |payload_ptr| now points at the first payload byte.
+      // `payload_ptr` now points at the first payload byte.
       PacketList new_packets;  // An empty list to store the split packets in.
       for (size_t i = 0; i != new_headers.size(); ++i) {
         const auto& new_header = new_headers[i];
@@ -143,14 +143,14 @@
         payload_ptr += payload_length;
       }
       // Insert new packets into original list, before the element pointed to by
-      // iterator |it|.
+      // iterator `it`.
       packet_list->splice(it, std::move(new_packets));
     } else {
       RTC_LOG(LS_WARNING) << "SplitRed too many blocks: " << new_headers.size();
       ret = false;
     }
-    // Remove |it| from the packet list. This operation effectively moves the
-    // iterator |it| to the next packet in the list. Thus, we do not have to
+    // Remove `it` from the packet list. This operation effectively moves the
+    // iterator `it` to the next packet in the list. Thus, we do not have to
     // increment it manually.
     it = packet_list->erase(it);
   }
@@ -175,8 +175,8 @@
       } else {
         if (this_payload_type != main_payload_type) {
           // We do not allow redundant payloads of a different type.
-          // Remove |it| from the packet list. This operation effectively
-          // moves the iterator |it| to the next packet in the list. Thus, we
+          // Remove `it` from the packet list. This operation effectively
+          // moves the iterator `it` to the next packet in the list. Thus, we
           // do not have to increment it manually.
           it = packet_list->erase(it);
           continue;
diff --git a/modules/audio_coding/neteq/red_payload_splitter.h b/modules/audio_coding/neteq/red_payload_splitter.h
index c54ffc0..5566091 100644
--- a/modules/audio_coding/neteq/red_payload_splitter.h
+++ b/modules/audio_coding/neteq/red_payload_splitter.h
@@ -30,15 +30,15 @@
 
   virtual ~RedPayloadSplitter() {}
 
-  // Splits each packet in |packet_list| into its separate RED payloads. Each
+  // Splits each packet in `packet_list` into its separate RED payloads. Each
   // RED payload is packetized into a Packet. The original elements in
-  // |packet_list| are properly deleted, and replaced by the new packets.
-  // Note that all packets in |packet_list| must be RED payloads, i.e., have
+  // `packet_list` are properly deleted, and replaced by the new packets.
+  // Note that all packets in `packet_list` must be RED payloads, i.e., have
   // RED headers according to RFC 2198 at the very beginning of the payload.
   // Returns kOK or an error.
   virtual bool SplitRed(PacketList* packet_list);
 
-  // Checks all packets in |packet_list|. Packets that are DTMF events or
+  // Checks all packets in `packet_list`. Packets that are DTMF events or
   // comfort noise payloads are kept. Except that, only one single payload type
   // is accepted. Any packet with another payload type is discarded.
   virtual void CheckRedPayloads(PacketList* packet_list,
diff --git a/modules/audio_coding/neteq/red_payload_splitter_unittest.cc b/modules/audio_coding/neteq/red_payload_splitter_unittest.cc
index 1f16945..a0ba541 100644
--- a/modules/audio_coding/neteq/red_payload_splitter_unittest.cc
+++ b/modules/audio_coding/neteq/red_payload_splitter_unittest.cc
@@ -70,9 +70,9 @@
 //   |0|   Block PT  |
 //   +-+-+-+-+-+-+-+-+
 
-// Creates a RED packet, with |num_payloads| payloads, with payload types given
-// by the values in array |payload_types| (which must be of length
-// |num_payloads|). Each redundant payload is |timestamp_offset| samples
+// Creates a RED packet, with `num_payloads` payloads, with payload types given
+// by the values in array `payload_types` (which must be of length
+// `num_payloads`). Each redundant payload is `timestamp_offset` samples
 // "behind" the the previous payload.
 Packet CreateRedPayload(size_t num_payloads,
                         uint8_t* payload_types,
@@ -109,7 +109,7 @@
     ++payload_ptr;
   }
   for (size_t i = 0; i < num_payloads; ++i) {
-    // Write |i| to all bytes in each payload.
+    // Write `i` to all bytes in each payload.
     if (embed_opus_fec) {
       CreateOpusFecPayload(payload_ptr, kPayloadLength,
                            static_cast<uint8_t>(i));
@@ -121,7 +121,7 @@
   return packet;
 }
 
-// Create a packet with all payload bytes set to |payload_value|.
+// Create a packet with all payload bytes set to `payload_value`.
 Packet CreatePacket(uint8_t payload_type,
                     size_t payload_length,
                     uint8_t payload_value,
@@ -140,7 +140,7 @@
   return packet;
 }
 
-// Checks that |packet| has the attributes given in the remaining parameters.
+// Checks that `packet` has the attributes given in the remaining parameters.
 void VerifyPacket(const Packet& packet,
                   size_t payload_length,
                   uint8_t payload_type,
@@ -289,7 +289,7 @@
 TEST(RedPayloadSplitter, CheckRedPayloads) {
   PacketList packet_list;
   for (uint8_t i = 0; i <= 3; ++i) {
-    // Create packet with payload type |i|, payload length 10 bytes, all 0.
+    // Create packet with payload type `i`, payload length 10 bytes, all 0.
     packet_list.push_back(CreatePacket(i, 10, 0));
   }
 
diff --git a/modules/audio_coding/neteq/statistics_calculator.cc b/modules/audio_coding/neteq/statistics_calculator.cc
index 741cdbd..8e28130 100644
--- a/modules/audio_coding/neteq/statistics_calculator.cc
+++ b/modules/audio_coding/neteq/statistics_calculator.cc
@@ -339,7 +339,7 @@
   } else {
     std::sort(waiting_times_.begin(), waiting_times_.end());
     // Find mid-point elements. If the size is odd, the two values
-    // |middle_left| and |middle_right| will both be the one middle element; if
+    // `middle_left` and `middle_right` will both be the one middle element; if
     // the size is even, they will be the the two neighboring elements at the
     // middle of the list.
     const int middle_left = waiting_times_[(waiting_times_.size() - 1) / 2];
diff --git a/modules/audio_coding/neteq/statistics_calculator.h b/modules/audio_coding/neteq/statistics_calculator.h
index f0c2734..5c3fb75 100644
--- a/modules/audio_coding/neteq/statistics_calculator.h
+++ b/modules/audio_coding/neteq/statistics_calculator.h
@@ -34,16 +34,16 @@
   // Resets the counters that are not handled by Reset().
   void ResetMcu();
 
-  // Reports that |num_samples| samples were produced through expansion, and
+  // Reports that `num_samples` samples were produced through expansion, and
   // that the expansion produced other than just noise samples.
   void ExpandedVoiceSamples(size_t num_samples, bool is_new_concealment_event);
 
-  // Reports that |num_samples| samples were produced through expansion, and
+  // Reports that `num_samples` samples were produced through expansion, and
   // that the expansion produced only noise samples.
   void ExpandedNoiseSamples(size_t num_samples, bool is_new_concealment_event);
 
   // Corrects the statistics for number of samples produced through non-noise
-  // expansion by adding |num_samples| (negative or positive) to the current
+  // expansion by adding `num_samples` (negative or positive) to the current
   // value. The result is capped to zero to avoid negative values.
   void ExpandedVoiceSamplesCorrection(int num_samples);
 
@@ -55,24 +55,24 @@
   // Mark end of expand event; triggers some stats to be reported.
   void EndExpandEvent(int fs_hz);
 
-  // Reports that |num_samples| samples were produced through preemptive
+  // Reports that `num_samples` samples were produced through preemptive
   // expansion.
   void PreemptiveExpandedSamples(size_t num_samples);
 
-  // Reports that |num_samples| samples were removed through accelerate.
+  // Reports that `num_samples` samples were removed through accelerate.
   void AcceleratedSamples(size_t num_samples);
 
-  // Reports that |num_packets| packets were discarded.
+  // Reports that `num_packets` packets were discarded.
   virtual void PacketsDiscarded(size_t num_packets);
 
-  // Reports that |num_packets| secondary (FEC) packets were discarded.
+  // Reports that `num_packets` secondary (FEC) packets were discarded.
   virtual void SecondaryPacketsDiscarded(size_t num_packets);
 
-  // Reports that |num_packets| secondary (FEC) packets were received.
+  // Reports that `num_packets` secondary (FEC) packets were received.
   virtual void SecondaryPacketsReceived(size_t num_packets);
 
-  // Increases the report interval counter with |num_samples| at a sample rate
-  // of |fs_hz|. This is how the StatisticsCalculator gets notified that current
+  // Increases the report interval counter with `num_samples` at a sample rate
+  // of `fs_hz`. This is how the StatisticsCalculator gets notified that current
   // time is increasing.
   void IncreaseCounter(size_t num_samples, int fs_hz);
 
@@ -84,7 +84,7 @@
   // Stores new packet waiting time in waiting time statistics.
   void StoreWaitingTime(int waiting_time_ms);
 
-  // Reports that |num_samples| samples were decoded from secondary packets.
+  // Reports that `num_samples` samples were decoded from secondary packets.
   void SecondaryDecodedSamples(int num_samples);
 
   // Reports that the packet buffer was flushed.
@@ -93,17 +93,17 @@
   // Reports that the jitter buffer received a packet.
   void ReceivedPacket();
 
-  // Reports that a received packet was delayed by |delay_ms| milliseconds.
+  // Reports that a received packet was delayed by `delay_ms` milliseconds.
   virtual void RelativePacketArrivalDelay(size_t delay_ms);
 
-  // Logs a delayed packet outage event of |num_samples| expanded at a sample
-  // rate of |fs_hz|. A delayed packet outage event is defined as an expand
+  // Logs a delayed packet outage event of `num_samples` expanded at a sample
+  // rate of `fs_hz`. A delayed packet outage event is defined as an expand
   // period caused not by an actual packet loss, but by a delayed packet.
   virtual void LogDelayedPacketOutageEvent(int num_samples, int fs_hz);
 
-  // Returns the current network statistics in |stats|. The number of samples
-  // per packet is |samples_per_packet|. The method does not populate
-  // |preferred_buffer_size_ms|, |jitter_peaks_found| or |clockdrift_ppm|; use
+  // Returns the current network statistics in `stats`. The number of samples
+  // per packet is `samples_per_packet`. The method does not populate
+  // `preferred_buffer_size_ms`, `jitter_peaks_found` or `clockdrift_ppm`; use
   // the PopulateDelayManagerStats method for those.
   void GetNetworkStatistics(size_t samples_per_packet,
                             NetEqNetworkStatistics* stats);
diff --git a/modules/audio_coding/neteq/sync_buffer.cc b/modules/audio_coding/neteq/sync_buffer.cc
index 73e0628..80e1691 100644
--- a/modules/audio_coding/neteq/sync_buffer.cc
+++ b/modules/audio_coding/neteq/sync_buffer.cc
@@ -59,11 +59,11 @@
     channels_[channel]->InsertZerosAt(length, position);
   }
   if (next_index_ >= position) {
-    // We are moving the |next_index_| sample.
+    // We are moving the `next_index_` sample.
     set_next_index(next_index_ + length);  // Overflow handled by subfunction.
   }
   if (dtmf_index_ > 0 && dtmf_index_ >= position) {
-    // We are moving the |dtmf_index_| sample.
+    // We are moving the `dtmf_index_` sample.
     set_dtmf_index(dtmf_index_ + length);  // Overflow handled by subfunction.
   }
 }
@@ -71,7 +71,7 @@
 void SyncBuffer::ReplaceAtIndex(const AudioMultiVector& insert_this,
                                 size_t length,
                                 size_t position) {
-  position = std::min(position, Size());  // Cap |position| in the valid range.
+  position = std::min(position, Size());  // Cap `position` in the valid range.
   length = std::min(length, Size() - position);
   AudioMultiVector::OverwriteAt(insert_this, length, position);
 }
@@ -106,12 +106,12 @@
 }
 
 void SyncBuffer::set_next_index(size_t value) {
-  // Cannot set |next_index_| larger than the size of the buffer.
+  // Cannot set `next_index_` larger than the size of the buffer.
   next_index_ = std::min(value, Size());
 }
 
 void SyncBuffer::set_dtmf_index(size_t value) {
-  // Cannot set |dtmf_index_| larger than the size of the buffer.
+  // Cannot set `dtmf_index_` larger than the size of the buffer.
   dtmf_index_ = std::min(value, Size());
 }
 
diff --git a/modules/audio_coding/neteq/sync_buffer.h b/modules/audio_coding/neteq/sync_buffer.h
index 754716b..7d24730 100644
--- a/modules/audio_coding/neteq/sync_buffer.h
+++ b/modules/audio_coding/neteq/sync_buffer.h
@@ -35,55 +35,55 @@
   // Returns the number of samples yet to play out from the buffer.
   size_t FutureLength() const;
 
-  // Adds the contents of |append_this| to the back of the SyncBuffer. Removes
+  // Adds the contents of `append_this` to the back of the SyncBuffer. Removes
   // the same number of samples from the beginning of the SyncBuffer, to
-  // maintain a constant buffer size. The |next_index_| is updated to reflect
+  // maintain a constant buffer size. The `next_index_` is updated to reflect
   // the move of the beginning of "future" data.
   void PushBack(const AudioMultiVector& append_this) override;
 
   // Like PushBack, but reads the samples channel-interleaved from the input.
   void PushBackInterleaved(const rtc::BufferT<int16_t>& append_this);
 
-  // Adds |length| zeros to the beginning of each channel. Removes
+  // Adds `length` zeros to the beginning of each channel. Removes
   // the same number of samples from the end of the SyncBuffer, to
-  // maintain a constant buffer size. The |next_index_| is updated to reflect
+  // maintain a constant buffer size. The `next_index_` is updated to reflect
   // the move of the beginning of "future" data.
   // Note that this operation may delete future samples that are waiting to
   // be played.
   void PushFrontZeros(size_t length);
 
-  // Inserts |length| zeros into each channel at index |position|. The size of
-  // the SyncBuffer is kept constant, which means that the last |length|
+  // Inserts `length` zeros into each channel at index `position`. The size of
+  // the SyncBuffer is kept constant, which means that the last `length`
   // elements in each channel will be purged.
   virtual void InsertZerosAtIndex(size_t length, size_t position);
 
   // Overwrites each channel in this SyncBuffer with values taken from
-  // |insert_this|. The values are taken from the beginning of |insert_this| and
-  // are inserted starting at |position|. |length| values are written into each
-  // channel. The size of the SyncBuffer is kept constant. That is, if |length|
-  // and |position| are selected such that the new data would extend beyond the
+  // `insert_this`. The values are taken from the beginning of `insert_this` and
+  // are inserted starting at `position`. `length` values are written into each
+  // channel. The size of the SyncBuffer is kept constant. That is, if `length`
+  // and `position` are selected such that the new data would extend beyond the
   // end of the current SyncBuffer, the buffer is not extended.
-  // The |next_index_| is not updated.
+  // The `next_index_` is not updated.
   virtual void ReplaceAtIndex(const AudioMultiVector& insert_this,
                               size_t length,
                               size_t position);
 
-  // Same as the above method, but where all of |insert_this| is written (with
+  // Same as the above method, but where all of `insert_this` is written (with
   // the same constraints as above, that the SyncBuffer is not extended).
   virtual void ReplaceAtIndex(const AudioMultiVector& insert_this,
                               size_t position);
 
-  // Reads |requested_len| samples from each channel and writes them interleaved
-  // into |output|. The |next_index_| is updated to point to the sample to read
-  // next time. The AudioFrame |output| is first reset, and the |data_|,
-  // |num_channels_|, and |samples_per_channel_| fields are updated.
+  // Reads `requested_len` samples from each channel and writes them interleaved
+  // into `output`. The `next_index_` is updated to point to the sample to read
+  // next time. The AudioFrame `output` is first reset, and the `data_`,
+  // `num_channels_`, and `samples_per_channel_` fields are updated.
   void GetNextAudioInterleaved(size_t requested_len, AudioFrame* output);
 
-  // Adds |increment| to |end_timestamp_|.
+  // Adds `increment` to `end_timestamp_`.
   void IncreaseEndTimestamp(uint32_t increment);
 
   // Flushes the buffer. The buffer will contain only zeros after the flush, and
-  // |next_index_| will point to the end, like when the buffer was first
+  // `next_index_` will point to the end, like when the buffer was first
   // created.
   void Flush();
 
diff --git a/modules/audio_coding/neteq/sync_buffer_unittest.cc b/modules/audio_coding/neteq/sync_buffer_unittest.cc
index 860dbae..bdcd924 100644
--- a/modules/audio_coding/neteq/sync_buffer_unittest.cc
+++ b/modules/audio_coding/neteq/sync_buffer_unittest.cc
@@ -55,18 +55,18 @@
   SyncBuffer sync_buffer(kChannels, kLen);
   static const size_t kNewLen = 10;
   AudioMultiVector new_data(kChannels, kNewLen);
-  // Populate |new_data|.
+  // Populate `new_data`.
   for (size_t channel = 0; channel < kChannels; ++channel) {
     for (size_t i = 0; i < kNewLen; ++i) {
       new_data[channel][i] = rtc::checked_cast<int16_t>(i);
     }
   }
-  // Push back |new_data| into |sync_buffer|. This operation should pop out
-  // data from the front of |sync_buffer|, so that the size of the buffer
-  // remains the same. The |next_index_| should also move with the same length.
+  // Push back `new_data` into `sync_buffer`. This operation should pop out
+  // data from the front of `sync_buffer`, so that the size of the buffer
+  // remains the same. The `next_index_` should also move with the same length.
   sync_buffer.PushBack(new_data);
   ASSERT_EQ(kLen, sync_buffer.Size());
-  // Verify that |next_index_| moved accordingly.
+  // Verify that `next_index_` moved accordingly.
   EXPECT_EQ(kLen - kNewLen, sync_buffer.next_index());
   // Verify the new contents.
   for (size_t channel = 0; channel < kChannels; ++channel) {
@@ -95,7 +95,7 @@
   SyncBuffer sync_buffer(kChannels, kLen);
   static const size_t kNewLen = 10;
   AudioMultiVector new_data(kChannels, kNewLen);
-  // Populate |new_data|.
+  // Populate `new_data`.
   for (size_t channel = 0; channel < kChannels; ++channel) {
     for (size_t i = 0; i < kNewLen; ++i) {
       new_data[channel][i] = rtc::checked_cast<int16_t>(1000 + i);
@@ -104,10 +104,10 @@
   sync_buffer.PushBack(new_data);
   EXPECT_EQ(kLen, sync_buffer.Size());
 
-  // Push |kNewLen| - 1 zeros into each channel in the front of the SyncBuffer.
+  // Push `kNewLen` - 1 zeros into each channel in the front of the SyncBuffer.
   sync_buffer.PushFrontZeros(kNewLen - 1);
   EXPECT_EQ(kLen, sync_buffer.Size());  // Size should remain the same.
-  // Verify that |next_index_| moved accordingly. Should be at the end - 1.
+  // Verify that `next_index_` moved accordingly. Should be at the end - 1.
   EXPECT_EQ(kLen - 1, sync_buffer.next_index());
   // Verify the zeros.
   for (size_t channel = 0; channel < kChannels; ++channel) {
@@ -128,22 +128,22 @@
   SyncBuffer sync_buffer(kChannels, kLen);
   static const size_t kNewLen = 10;
   AudioMultiVector new_data(kChannels, kNewLen);
-  // Populate |new_data|.
+  // Populate `new_data`.
   for (size_t channel = 0; channel < kChannels; ++channel) {
     for (size_t i = 0; i < kNewLen; ++i) {
       new_data[channel][i] = rtc::checked_cast<int16_t>(i);
     }
   }
-  // Push back |new_data| into |sync_buffer|. This operation should pop out
-  // data from the front of |sync_buffer|, so that the size of the buffer
-  // remains the same. The |next_index_| should also move with the same length.
+  // Push back `new_data` into `sync_buffer`. This operation should pop out
+  // data from the front of `sync_buffer`, so that the size of the buffer
+  // remains the same. The `next_index_` should also move with the same length.
   sync_buffer.PushBack(new_data);
 
   // Read to interleaved output. Read in two batches, where each read operation
-  // should automatically update the |net_index_| in the SyncBuffer.
-  // Note that |samples_read| is the number of samples read from each channel.
-  // That is, the number of samples written to |output| is
-  // |samples_read| * |kChannels|.
+  // should automatically update the `net_index_` in the SyncBuffer.
+  // Note that `samples_read` is the number of samples read from each channel.
+  // That is, the number of samples written to `output` is
+  // `samples_read` * `kChannels`.
   AudioFrame output1;
   sync_buffer.GetNextAudioInterleaved(kNewLen / 2, &output1);
   EXPECT_EQ(kChannels, output1.num_channels_);
diff --git a/modules/audio_coding/neteq/test/neteq_decoding_test.cc b/modules/audio_coding/neteq/test/neteq_decoding_test.cc
index d7f414a..1c70f14 100644
--- a/modules/audio_coding/neteq/test/neteq_decoding_test.cc
+++ b/modules/audio_coding/neteq/test/neteq_decoding_test.cc
@@ -346,8 +346,8 @@
   EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
 
   if (network_freeze_ms > 0) {
-    // First keep pulling audio for |network_freeze_ms| without inserting
-    // any data, then insert CNG data corresponding to |network_freeze_ms|
+    // First keep pulling audio for `network_freeze_ms` without inserting
+    // any data, then insert CNG data corresponding to `network_freeze_ms`
     // without pulling any output audio.
     const double loop_end_time = t_ms + network_freeze_ms;
     for (; t_ms < loop_end_time; t_ms += 10) {
@@ -357,7 +357,7 @@
       EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
     }
     bool pull_once = pull_audio_during_freeze;
-    // If |pull_once| is true, GetAudio will be called once half-way through
+    // If `pull_once` is true, GetAudio will be called once half-way through
     // the network recovery period.
     double pull_time_ms = (t_ms + next_input_time_ms) / 2;
     while (next_input_time_ms <= t_ms) {
diff --git a/modules/audio_coding/neteq/time_stretch.cc b/modules/audio_coding/neteq/time_stretch.cc
index b768029..b89be06 100644
--- a/modules/audio_coding/neteq/time_stretch.cc
+++ b/modules/audio_coding/neteq/time_stretch.cc
@@ -26,7 +26,7 @@
                                               bool fast_mode,
                                               AudioMultiVector* output,
                                               size_t* length_change_samples) {
-  // Pre-calculate common multiplication with |fs_mult_|.
+  // Pre-calculate common multiplication with `fs_mult_`.
   size_t fs_mult_120 =
       static_cast<size_t>(fs_mult_ * 120);  // Corresponds to 15 ms.
 
@@ -37,8 +37,8 @@
     signal = input;
     signal_len = input_len;
   } else {
-    // We want |signal| to be only the first channel of |input|, which is
-    // interleaved. Thus, we take the first sample, skip forward |num_channels|
+    // We want `signal` to be only the first channel of `input`, which is
+    // interleaved. Thus, we take the first sample, skip forward `num_channels`
     // samples, and continue like that.
     signal_len = input_len / num_channels_;
     signal_array.reset(new int16_t[signal_len]);
@@ -65,37 +65,37 @@
   int16_t peak_value;
   DspHelper::PeakDetection(auto_correlation_, kCorrelationLen, kNumPeaks,
                            fs_mult_, &peak_index, &peak_value);
-  // Assert that |peak_index| stays within boundaries.
+  // Assert that `peak_index` stays within boundaries.
   RTC_DCHECK_LE(peak_index, (2 * kCorrelationLen - 1) * fs_mult_);
 
   // Compensate peak_index for displaced starting position. The displacement
-  // happens in AutoCorrelation(). Here, |kMinLag| is in the down-sampled 4 kHz
-  // domain, while the |peak_index| is in the original sample rate; hence, the
+  // happens in AutoCorrelation(). Here, `kMinLag` is in the down-sampled 4 kHz
+  // domain, while the `peak_index` is in the original sample rate; hence, the
   // multiplication by fs_mult_ * 2.
   peak_index += kMinLag * fs_mult_ * 2;
-  // Assert that |peak_index| stays within boundaries.
+  // Assert that `peak_index` stays within boundaries.
   RTC_DCHECK_GE(peak_index, static_cast<size_t>(20 * fs_mult_));
   RTC_DCHECK_LE(peak_index,
                 20 * fs_mult_ + (2 * kCorrelationLen - 1) * fs_mult_);
 
-  // Calculate scaling to ensure that |peak_index| samples can be square-summed
+  // Calculate scaling to ensure that `peak_index` samples can be square-summed
   // without overflowing.
   int scaling = 31 - WebRtcSpl_NormW32(max_input_value_ * max_input_value_) -
                 WebRtcSpl_NormW32(static_cast<int32_t>(peak_index));
   scaling = std::max(0, scaling);
 
-  // |vec1| starts at 15 ms minus one pitch period.
+  // `vec1` starts at 15 ms minus one pitch period.
   const int16_t* vec1 = &signal[fs_mult_120 - peak_index];
-  // |vec2| start at 15 ms.
+  // `vec2` start at 15 ms.
   const int16_t* vec2 = &signal[fs_mult_120];
-  // Calculate energies for |vec1| and |vec2|, assuming they both contain
-  // |peak_index| samples.
+  // Calculate energies for `vec1` and `vec2`, assuming they both contain
+  // `peak_index` samples.
   int32_t vec1_energy =
       WebRtcSpl_DotProductWithScale(vec1, vec1, peak_index, scaling);
   int32_t vec2_energy =
       WebRtcSpl_DotProductWithScale(vec2, vec2, peak_index, scaling);
 
-  // Calculate cross-correlation between |vec1| and |vec2|.
+  // Calculate cross-correlation between `vec1` and `vec2`.
   int32_t cross_corr =
       WebRtcSpl_DotProductWithScale(vec1, vec2, peak_index, scaling);
 
@@ -135,7 +135,7 @@
     cross_corr = WEBRTC_SPL_SHIFT_W32(cross_corr, temp_scale);
     cross_corr = std::max(0, cross_corr);  // Don't use if negative.
     best_correlation = WebRtcSpl_DivW32W16(cross_corr, sqrt_energy_prod);
-    // Make sure |best_correlation| is no larger than 1 in Q14.
+    // Make sure `best_correlation` is no larger than 1 in Q14.
     best_correlation = std::min(static_cast<int16_t>(16384), best_correlation);
   }
 
@@ -165,7 +165,7 @@
       &downsampled_input_[kMaxLag], &downsampled_input_[kMaxLag - kMinLag],
       kCorrelationLen, kMaxLag - kMinLag, -1, auto_corr);
 
-  // Normalize correlation to 14 bits and write to |auto_correlation_|.
+  // Normalize correlation to 14 bits and write to `auto_correlation_`.
   int32_t max_corr = WebRtcSpl_MaxAbsValueW32(auto_corr, kCorrelationLen);
   int scaling = std::max(0, 17 - WebRtcSpl_NormW32(max_corr));
   WebRtcSpl_VectorBitShiftW32ToW16(auto_correlation_, kCorrelationLen,
@@ -182,8 +182,8 @@
   // active speech.
   // Rewrite the inequality as:
   // (vec1_energy + vec2_energy) / 16 <= peak_index * background_noise_energy.
-  // The two sides of the inequality will be denoted |left_side| and
-  // |right_side|.
+  // The two sides of the inequality will be denoted `left_side` and
+  // `right_side`.
   int32_t left_side = rtc::saturated_cast<int32_t>(
       (static_cast<int64_t>(vec1_energy) + vec2_energy) / 16);
   int32_t right_side;
@@ -199,11 +199,11 @@
   right_side =
       rtc::dchecked_cast<int32_t>(peak_index) * (right_side >> right_scale);
 
-  // Scale |left_side| properly before comparing with |right_side|.
-  // (|scaling| is the scale factor before energy calculation, thus the scale
+  // Scale `left_side` properly before comparing with `right_side`.
+  // (`scaling` is the scale factor before energy calculation, thus the scale
   // factor for the energy is 2 * scaling.)
   if (WebRtcSpl_NormW32(left_side) < 2 * scaling) {
-    // Cannot scale only |left_side|, must scale |right_side| too.
+    // Cannot scale only `left_side`, must scale `right_side` too.
     int temp_scale = WebRtcSpl_NormW32(left_side);
     left_side = left_side << temp_scale;
     right_side = right_side >> (2 * scaling - temp_scale);
diff --git a/modules/audio_coding/neteq/time_stretch.h b/modules/audio_coding/neteq/time_stretch.h
index 17ea4ec..998d080 100644
--- a/modules/audio_coding/neteq/time_stretch.h
+++ b/modules/audio_coding/neteq/time_stretch.h
@@ -58,7 +58,7 @@
                       size_t* length_change_samples);
 
  protected:
-  // Sets the parameters |best_correlation| and |peak_index| to suitable
+  // Sets the parameters `best_correlation` and `peak_index` to suitable
   // values when the signal contains no active speech. This method must be
   // implemented by the sub-classes.
   virtual void SetParametersForPassiveSpeech(size_t input_length,
@@ -91,13 +91,13 @@
   const BackgroundNoise& background_noise_;
   int16_t max_input_value_;
   int16_t downsampled_input_[kDownsampledLen];
-  // Adding 1 to the size of |auto_correlation_| because of how it is used
+  // Adding 1 to the size of `auto_correlation_` because of how it is used
   // by the peak-detection algorithm.
   int16_t auto_correlation_[kCorrelationLen + 1];
 
  private:
-  // Calculates the auto-correlation of |downsampled_input_| and writes the
-  // result to |auto_correlation_|.
+  // Calculates the auto-correlation of `downsampled_input_` and writes the
+  // result to `auto_correlation_`.
   void AutoCorrelation();
 
   // Performs a simple voice-activity detection based on the input parameters.
diff --git a/modules/audio_coding/neteq/timestamp_scaler.cc b/modules/audio_coding/neteq/timestamp_scaler.cc
index b0461bb..59177d0 100644
--- a/modules/audio_coding/neteq/timestamp_scaler.cc
+++ b/modules/audio_coding/neteq/timestamp_scaler.cc
@@ -79,7 +79,7 @@
     const int64_t internal_diff = int64_t{internal_timestamp} - internal_ref_;
     RTC_DCHECK_GT(numerator_, 0);
     // Do not update references in this method.
-    // Switch |denominator_| and |numerator_| to convert the other way.
+    // Switch `denominator_` and `numerator_` to convert the other way.
     return external_ref_ + (internal_diff * denominator_) / numerator_;
   }
 }
diff --git a/modules/audio_coding/neteq/timestamp_scaler.h b/modules/audio_coding/neteq/timestamp_scaler.h
index 93cb953..4d578fc 100644
--- a/modules/audio_coding/neteq/timestamp_scaler.h
+++ b/modules/audio_coding/neteq/timestamp_scaler.h
@@ -37,15 +37,15 @@
   // Start over.
   virtual void Reset();
 
-  // Scale the timestamp in |packet| from external to internal.
+  // Scale the timestamp in `packet` from external to internal.
   virtual void ToInternal(Packet* packet);
 
-  // Scale the timestamp for all packets in |packet_list| from external to
+  // Scale the timestamp for all packets in `packet_list` from external to
   // internal.
   virtual void ToInternal(PacketList* packet_list);
 
-  // Returns the internal equivalent of |external_timestamp|, given the
-  // RTP payload type |rtp_payload_type|.
+  // Returns the internal equivalent of `external_timestamp`, given the
+  // RTP payload type `rtp_payload_type`.
   virtual uint32_t ToInternal(uint32_t external_timestamp,
                               uint8_t rtp_payload_type);
 
diff --git a/modules/audio_coding/neteq/timestamp_scaler_unittest.cc b/modules/audio_coding/neteq/timestamp_scaler_unittest.cc
index 9ba63e3..26dc06d 100644
--- a/modules/audio_coding/neteq/timestamp_scaler_unittest.cc
+++ b/modules/audio_coding/neteq/timestamp_scaler_unittest.cc
@@ -58,7 +58,7 @@
   // Test both sides of the timestamp wrap-around.
   static const uint32_t kStep = 160;
   uint32_t start_timestamp = 0;
-  // |external_timestamp| will be a large positive value.
+  // `external_timestamp` will be a large positive value.
   start_timestamp = start_timestamp - 5 * kStep;
   for (uint32_t timestamp = start_timestamp; timestamp != 5 * kStep;
        timestamp += kStep) {
@@ -111,7 +111,7 @@
   // Test both sides of the timestamp wrap-around.
   static const uint32_t kStep = 320;
   uint32_t external_timestamp = 0;
-  // |external_timestamp| will be a large positive value.
+  // `external_timestamp` will be a large positive value.
   external_timestamp = external_timestamp - 5 * kStep;
   uint32_t internal_timestamp = external_timestamp;
   for (; external_timestamp != 5 * kStep; external_timestamp += kStep) {
@@ -290,7 +290,7 @@
   // Test both sides of the timestamp wrap-around.
   static const uint32_t kStep = 960;
   uint32_t external_timestamp = 0;
-  // |external_timestamp| will be a large positive value.
+  // `external_timestamp` will be a large positive value.
   external_timestamp = external_timestamp - 5 * kStep;
   uint32_t internal_timestamp = external_timestamp;
   for (; external_timestamp != 5 * kStep; external_timestamp += kStep) {
diff --git a/modules/audio_coding/neteq/tools/audio_loop.h b/modules/audio_coding/neteq/tools/audio_loop.h
index cd764cc..25da463 100644
--- a/modules/audio_coding/neteq/tools/audio_loop.h
+++ b/modules/audio_coding/neteq/tools/audio_loop.h
@@ -29,17 +29,17 @@
 
   virtual ~AudioLoop() {}
 
-  // Initializes the AudioLoop by reading from |file_name|. The loop will be no
-  // longer than |max_loop_length_samples|, if the length of the file is
+  // Initializes the AudioLoop by reading from `file_name`. The loop will be no
+  // longer than `max_loop_length_samples`, if the length of the file is
   // greater. Otherwise, the loop length is the same as the file length.
-  // The audio will be delivered in blocks of |block_length_samples|.
+  // The audio will be delivered in blocks of `block_length_samples`.
   // Returns false if the initialization failed, otherwise true.
   bool Init(const std::string file_name,
             size_t max_loop_length_samples,
             size_t block_length_samples);
 
   // Returns a (pointer,size) pair for the next block of audio. The size is
-  // equal to the |block_length_samples| Init() argument.
+  // equal to the `block_length_samples` Init() argument.
   rtc::ArrayView<const int16_t> GetNextBlock();
 
  private:
diff --git a/modules/audio_coding/neteq/tools/audio_sink.h b/modules/audio_coding/neteq/tools/audio_sink.h
index 68825eb..cd6733b 100644
--- a/modules/audio_coding/neteq/tools/audio_sink.h
+++ b/modules/audio_coding/neteq/tools/audio_sink.h
@@ -24,11 +24,11 @@
   AudioSink() {}
   virtual ~AudioSink() {}
 
-  // Writes |num_samples| from |audio| to the AudioSink. Returns true if
+  // Writes `num_samples` from `audio` to the AudioSink. Returns true if
   // successful, otherwise false.
   virtual bool WriteArray(const int16_t* audio, size_t num_samples) = 0;
 
-  // Writes |audio_frame| to the AudioSink. Returns true if successful,
+  // Writes `audio_frame` to the AudioSink. Returns true if successful,
   // otherwise false.
   bool WriteAudioFrame(const AudioFrame& audio_frame) {
     return WriteArray(audio_frame.data(), audio_frame.samples_per_channel_ *
diff --git a/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc b/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc
index 6cbba20..18a9103 100644
--- a/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc
+++ b/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc
@@ -43,7 +43,7 @@
   for (unsigned i = 0; i < 2 * payload_len_samples_; ++i)
     packet_memory[kHeaderLenBytes + i] = encoded_sample_[i % 2];
   WriteHeader(packet_memory);
-  // |packet| assumes ownership of |packet_memory|.
+  // `packet` assumes ownership of `packet_memory`.
   auto packet =
       std::make_unique<Packet>(std::move(packet_buffer), next_arrival_time_ms_);
   next_arrival_time_ms_ += payload_len_samples_ / samples_per_ms_;
diff --git a/modules/audio_coding/neteq/tools/fake_decode_from_file.h b/modules/audio_coding/neteq/tools/fake_decode_from_file.h
index 0260981..7b53653 100644
--- a/modules/audio_coding/neteq/tools/fake_decode_from_file.h
+++ b/modules/audio_coding/neteq/tools/fake_decode_from_file.h
@@ -54,9 +54,9 @@
 
   int PacketDuration(const uint8_t* encoded, size_t encoded_len) const override;
 
-  // Helper method. Writes |timestamp|, |samples| and
-  // |original_payload_size_bytes| to |encoded| in a format that the
-  // FakeDecodeFromFile decoder will understand. |encoded| must be at least 12
+  // Helper method. Writes `timestamp`, `samples` and
+  // `original_payload_size_bytes` to `encoded` in a format that the
+  // FakeDecodeFromFile decoder will understand. `encoded` must be at least 12
   // bytes long.
   static void PrepareEncoded(uint32_t timestamp,
                              size_t samples,
diff --git a/modules/audio_coding/neteq/tools/input_audio_file.cc b/modules/audio_coding/neteq/tools/input_audio_file.cc
index d5e2862..0d9f0ed 100644
--- a/modules/audio_coding/neteq/tools/input_audio_file.cc
+++ b/modules/audio_coding/neteq/tools/input_audio_file.cc
@@ -81,9 +81,9 @@
                                           size_t samples,
                                           size_t channels,
                                           int16_t* destination) {
-  // Start from the end of |source| and |destination|, and work towards the
+  // Start from the end of `source` and `destination`, and work towards the
   // beginning. This is to allow in-place interleaving of the same array (i.e.,
-  // |source| and |destination| are the same array).
+  // `source` and `destination` are the same array).
   for (int i = static_cast<int>(samples - 1); i >= 0; --i) {
     for (int j = static_cast<int>(channels - 1); j >= 0; --j) {
       destination[i * channels + j] = source[i];
diff --git a/modules/audio_coding/neteq/tools/input_audio_file.h b/modules/audio_coding/neteq/tools/input_audio_file.h
index 4335a99..010d8cc 100644
--- a/modules/audio_coding/neteq/tools/input_audio_file.h
+++ b/modules/audio_coding/neteq/tools/input_audio_file.h
@@ -27,22 +27,22 @@
 
   virtual ~InputAudioFile();
 
-  // Reads |samples| elements from source file to |destination|. Returns true
+  // Reads `samples` elements from source file to `destination`. Returns true
   // if the read was successful, otherwise false. If the file end is reached,
   // the file is rewound and reading continues from the beginning.
-  // The output |destination| must have the capacity to hold |samples| elements.
+  // The output `destination` must have the capacity to hold `samples` elements.
   virtual bool Read(size_t samples, int16_t* destination);
 
-  // Fast-forwards (|samples| > 0) or -backwards (|samples| < 0) the file by the
+  // Fast-forwards (`samples` > 0) or -backwards (`samples` < 0) the file by the
   // indicated number of samples. Just like Read(), Seek() starts over at the
   // beginning of the file if the end is reached. However, seeking backwards
   // past the beginning of the file is not possible.
   virtual bool Seek(int samples);
 
   // Creates a multi-channel signal from a mono signal. Each sample is repeated
-  // |channels| times to create an interleaved multi-channel signal where all
-  // channels are identical. The output |destination| must have the capacity to
-  // hold samples * channels elements. Note that |source| and |destination| can
+  // `channels` times to create an interleaved multi-channel signal where all
+  // channels are identical. The output `destination` must have the capacity to
+  // hold samples * channels elements. Note that `source` and `destination` can
   // be the same array (i.e., point to the same address).
   static void DuplicateInterleaved(const int16_t* source,
                                    size_t samples,
diff --git a/modules/audio_coding/neteq/tools/neteq_delay_analyzer.h b/modules/audio_coding/neteq/tools/neteq_delay_analyzer.h
index f6b895a..f56ddb7 100644
--- a/modules/audio_coding/neteq/tools/neteq_delay_analyzer.h
+++ b/modules/audio_coding/neteq/tools/neteq_delay_analyzer.h
@@ -47,7 +47,7 @@
   // as provided by CreateGraphs.
   void CreateMatlabScript(const std::string& script_name) const;
 
-  // Creates a python script with file name |script_name|. When executed in
+  // Creates a python script with file name `script_name`. When executed in
   // Python, the script will generate graphs with the same timing information
   // as provided by CreateGraphs.
   void CreatePythonScript(const std::string& script_name) const;
diff --git a/modules/audio_coding/neteq/tools/neteq_input.h b/modules/audio_coding/neteq/tools/neteq_input.h
index 732b807..3a66264 100644
--- a/modules/audio_coding/neteq/tools/neteq_input.h
+++ b/modules/audio_coding/neteq/tools/neteq_input.h
@@ -51,7 +51,7 @@
   absl::optional<int64_t> NextEventTime() const {
     const auto a = NextPacketTime();
     const auto b = NextOutputEventTime();
-    // Return the minimum of non-empty |a| and |b|, or empty if both are empty.
+    // Return the minimum of non-empty `a` and `b`, or empty if both are empty.
     if (a) {
       return b ? std::min(*a, *b) : a;
     }
diff --git a/modules/audio_coding/neteq/tools/neteq_performance_test.cc b/modules/audio_coding/neteq/tools/neteq_performance_test.cc
index 1fb853c..ccaa87b 100644
--- a/modules/audio_coding/neteq/tools/neteq_performance_test.cc
+++ b/modules/audio_coding/neteq/tools/neteq_performance_test.cc
@@ -44,7 +44,7 @@
   auto audio_decoder_factory = CreateBuiltinAudioDecoderFactory();
   auto neteq =
       DefaultNetEqFactory().CreateNetEq(config, audio_decoder_factory, clock);
-  // Register decoder in |neteq|.
+  // Register decoder in `neteq`.
   if (!neteq->RegisterPayloadType(kPayloadType,
                                   SdpAudioFormat("l16", kSampRateHz, 1)))
     return -1;
diff --git a/modules/audio_coding/neteq/tools/neteq_performance_test.h b/modules/audio_coding/neteq/tools/neteq_performance_test.h
index d2212f0..b5b4d91 100644
--- a/modules/audio_coding/neteq/tools/neteq_performance_test.h
+++ b/modules/audio_coding/neteq/tools/neteq_performance_test.h
@@ -19,9 +19,9 @@
 class NetEqPerformanceTest {
  public:
   // Runs a performance test with parameters as follows:
-  //   |runtime_ms|: the simulation time, i.e., the duration of the audio data.
-  //   |lossrate|: drop one out of |lossrate| packets, e.g., one out of 10.
-  //   |drift_factor|: clock drift in [0, 1].
+  //   `runtime_ms`: the simulation time, i.e., the duration of the audio data.
+  //   `lossrate`: drop one out of `lossrate` packets, e.g., one out of 10.
+  //   `drift_factor`: clock drift in [0, 1].
   // Returns the runtime in ms.
   static int64_t Run(int runtime_ms, int lossrate, double drift_factor);
 };
diff --git a/modules/audio_coding/neteq/tools/neteq_quality_test.cc b/modules/audio_coding/neteq/tools/neteq_quality_test.cc
index 3f3077f..8322ac2 100644
--- a/modules/audio_coding/neteq/tools/neteq_quality_test.cc
+++ b/modules/audio_coding/neteq/tools/neteq_quality_test.cc
@@ -120,8 +120,8 @@
 
 // ProbTrans00Solver() is to calculate the transition probability from no-loss
 // state to itself in a modified Gilbert Elliot packet loss model. The result is
-// to achieve the target packet loss rate |loss_rate|, when a packet is not
-// lost only if all |units| drawings within the duration of the packet result in
+// to achieve the target packet loss rate `loss_rate`, when a packet is not
+// lost only if all `units` drawings within the duration of the packet result in
 // no-loss.
 static double ProbTrans00Solver(int units,
                                 double loss_rate,
@@ -310,10 +310,10 @@
   int units = block_duration_ms_ / kPacketLossTimeUnitMs;
   switch (absl::GetFlag(FLAGS_random_loss_mode)) {
     case kUniformLoss: {
-      // |unit_loss_rate| is the packet loss rate for each unit time interval
+      // `unit_loss_rate` is the packet loss rate for each unit time interval
       // (kPacketLossTimeUnitMs). Since a packet loss event is generated if any
       // of |block_duration_ms_ / kPacketLossTimeUnitMs| unit time intervals of
-      // a full packet duration is drawn with a loss, |unit_loss_rate| fulfills
+      // a full packet duration is drawn with a loss, `unit_loss_rate` fulfills
       // (1 - unit_loss_rate) ^ (block_duration_ms_ / kPacketLossTimeUnitMs) ==
       // 1 - packet_loss_rate.
       double unit_loss_rate =
@@ -322,7 +322,7 @@
       break;
     }
     case kGilbertElliotLoss: {
-      // |FLAGS_burst_length| should be integer times of kPacketLossTimeUnitMs.
+      // `FLAGS_burst_length` should be integer times of kPacketLossTimeUnitMs.
       ASSERT_EQ(0, absl::GetFlag(FLAGS_burst_length) % kPacketLossTimeUnitMs);
 
       // We do not allow 100 percent packet loss in Gilbert Elliot model, which
diff --git a/modules/audio_coding/neteq/tools/neteq_quality_test.h b/modules/audio_coding/neteq/tools/neteq_quality_test.h
index 11d347a..edcb117 100644
--- a/modules/audio_coding/neteq/tools/neteq_quality_test.h
+++ b/modules/audio_coding/neteq/tools/neteq_quality_test.h
@@ -108,9 +108,9 @@
   void SetUp() override;
 
   // EncodeBlock(...) does the following:
-  // 1. encodes a block of audio, saved in |in_data| and has a length of
-  // |block_size_samples| (samples per channel),
-  // 2. save the bit stream to |payload| of |max_bytes| bytes in size,
+  // 1. encodes a block of audio, saved in `in_data` and has a length of
+  // `block_size_samples` (samples per channel),
+  // 2. save the bit stream to `payload` of `max_bytes` bytes in size,
   // 3. returns the length of the payload (in bytes),
   virtual int EncodeBlock(int16_t* in_data,
                           size_t block_size_samples,
@@ -122,12 +122,12 @@
   bool PacketLost();
 
   // DecodeBlock() decodes a block of audio using the payload stored in
-  // |payload_| with the length of |payload_size_bytes_| (bytes). The decoded
-  // audio is to be stored in |out_data_|.
+  // `payload_` with the length of `payload_size_bytes_` (bytes). The decoded
+  // audio is to be stored in `out_data_`.
   int DecodeBlock();
 
-  // Transmit() uses |rtp_generator_| to generate a packet and passes it to
-  // |neteq_|.
+  // Transmit() uses `rtp_generator_` to generate a packet and passes it to
+  // `neteq_`.
   int Transmit();
 
   // Runs encoding / transmitting / decoding.
diff --git a/modules/audio_coding/neteq/tools/neteq_rtpplay.cc b/modules/audio_coding/neteq/tools/neteq_rtpplay.cc
index 16a789f..39f05e5 100644
--- a/modules/audio_coding/neteq/tools/neteq_rtpplay.cc
+++ b/modules/audio_coding/neteq/tools/neteq_rtpplay.cc
@@ -157,7 +157,7 @@
 namespace {
 
 // Parses the input string for a valid SSRC (at the start of the string). If a
-// valid SSRC is found, it is written to the output variable |ssrc|, and true is
+// valid SSRC is found, it is written to the output variable `ssrc`, and true is
 // returned. Otherwise, false is returned.
 bool ParseSsrc(const std::string& str, uint32_t* ssrc) {
   if (str.empty())
@@ -247,7 +247,7 @@
               << std::endl;
     return false;
   }
-  // Without |output_audio_filename|, |output_files_base_name| is required when
+  // Without `output_audio_filename`, `output_files_base_name` is required when
   // plotting output files must be generated (in order to form a valid output
   // file name).
   if (output_audio_filename.empty() && plotting &&
diff --git a/modules/audio_coding/neteq/tools/neteq_test_factory.h b/modules/audio_coding/neteq/tools/neteq_test_factory.h
index fdfe650..cb9bb1c 100644
--- a/modules/audio_coding/neteq/tools/neteq_test_factory.h
+++ b/modules/audio_coding/neteq/tools/neteq_test_factory.h
@@ -134,7 +134,7 @@
     bool enable_fast_accelerate = false;
     // Dumps events that describes the simulation on a step-by-step basis.
     bool textlog = false;
-    // If specified and |textlog| is true, the output of |textlog| is written to
+    // If specified and `textlog` is true, the output of `textlog` is written to
     // the specified file name.
     absl::optional<std::string> textlog_filename;
     // Base name for the output script files for plotting the delay profile.
diff --git a/modules/audio_coding/neteq/tools/output_audio_file.h b/modules/audio_coding/neteq/tools/output_audio_file.h
index f5b0988..ad97722 100644
--- a/modules/audio_coding/neteq/tools/output_audio_file.h
+++ b/modules/audio_coding/neteq/tools/output_audio_file.h
@@ -23,7 +23,7 @@
 
 class OutputAudioFile : public AudioSink {
  public:
-  // Creates an OutputAudioFile, opening a file named |file_name| for writing.
+  // Creates an OutputAudioFile, opening a file named `file_name` for writing.
   // The file format is 16-bit signed host-endian PCM.
   explicit OutputAudioFile(const std::string& file_name) {
     out_file_ = fopen(file_name.c_str(), "wb");
diff --git a/modules/audio_coding/neteq/tools/output_wav_file.h b/modules/audio_coding/neteq/tools/output_wav_file.h
index 6982a76..ae2e970 100644
--- a/modules/audio_coding/neteq/tools/output_wav_file.h
+++ b/modules/audio_coding/neteq/tools/output_wav_file.h
@@ -22,7 +22,7 @@
 
 class OutputWavFile : public AudioSink {
  public:
-  // Creates an OutputWavFile, opening a file named |file_name| for writing.
+  // Creates an OutputWavFile, opening a file named `file_name` for writing.
   // The output file is a PCM encoded wav file.
   OutputWavFile(const std::string& file_name,
                 int sample_rate_hz,
diff --git a/modules/audio_coding/neteq/tools/packet.h b/modules/audio_coding/neteq/tools/packet.h
index ef118d9..92e5ee9 100644
--- a/modules/audio_coding/neteq/tools/packet.h
+++ b/modules/audio_coding/neteq/tools/packet.h
@@ -55,12 +55,12 @@
   virtual ~Packet();
 
   // Parses the first bytes of the RTP payload, interpreting them as RED headers
-  // according to RFC 2198. The headers will be inserted into |headers|. The
+  // according to RFC 2198. The headers will be inserted into `headers`. The
   // caller of the method assumes ownership of the objects in the list, and
   // must delete them properly.
   bool ExtractRedHeaders(std::list<RTPHeader*>* headers) const;
 
-  // Deletes all RTPHeader objects in |headers|, but does not delete |headers|
+  // Deletes all RTPHeader objects in `headers`, but does not delete `headers`
   // itself.
   static void DeleteRedHeaders(std::list<RTPHeader*>* headers);
 
diff --git a/modules/audio_coding/neteq/tools/packet_unittest.cc b/modules/audio_coding/neteq/tools/packet_unittest.cc
index 7cc9a48..69cf56b 100644
--- a/modules/audio_coding/neteq/tools/packet_unittest.cc
+++ b/modules/audio_coding/neteq/tools/packet_unittest.cc
@@ -124,17 +124,17 @@
 }
 
 namespace {
-// Writes one RED block header starting at |rtp_data|, according to RFC 2198.
+// Writes one RED block header starting at `rtp_data`, according to RFC 2198.
 // returns the number of bytes written (1 or 4).
 //
-// Format if |last_payoad| is false:
+// Format if `last_payoad` is false:
 // 0                   1                    2                   3
 // 0 1 2 3 4 5 6 7 8 9 0 1 2 3  4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
 // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 // |1|   block PT  |  timestamp offset         |   block length    |
 // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 //
-// Format if |last_payoad| is true:
+// Format if `last_payoad` is true:
 // 0 1 2 3 4 5 6 7
 // +-+-+-+-+-+-+-+-+
 // |0|   Block PT  |
@@ -183,7 +183,7 @@
                                  last_block, payload_ptr);
   }
   const double kPacketTime = 1.0;
-  // Hand over ownership of |packet_memory| to |packet|.
+  // Hand over ownership of `packet_memory` to `packet`.
   Packet packet(packet_memory, kPacketLengthBytes, kPacketTime);
   ASSERT_TRUE(packet.valid_header());
   EXPECT_EQ(kRedPayloadType, packet.header().payloadType);
diff --git a/modules/audio_coding/neteq/tools/rtc_event_log_source.h b/modules/audio_coding/neteq/tools/rtc_event_log_source.h
index 3c91f73..d4be2a7 100644
--- a/modules/audio_coding/neteq/tools/rtc_event_log_source.h
+++ b/modules/audio_coding/neteq/tools/rtc_event_log_source.h
@@ -31,7 +31,7 @@
 
 class RtcEventLogSource : public PacketSource {
  public:
-  // Creates an RtcEventLogSource reading from |file_name|. If the file cannot
+  // Creates an RtcEventLogSource reading from `file_name`. If the file cannot
   // be opened, or has the wrong format, NULL will be returned.
   static std::unique_ptr<RtcEventLogSource> CreateFromFile(
       const std::string& file_name,
diff --git a/modules/audio_coding/neteq/tools/rtp_file_source.h b/modules/audio_coding/neteq/tools/rtp_file_source.h
index 953e2fa..3777e52 100644
--- a/modules/audio_coding/neteq/tools/rtp_file_source.h
+++ b/modules/audio_coding/neteq/tools/rtp_file_source.h
@@ -30,7 +30,7 @@
 
 class RtpFileSource : public PacketSource {
  public:
-  // Creates an RtpFileSource reading from |file_name|. If the file cannot be
+  // Creates an RtpFileSource reading from `file_name`. If the file cannot be
   // opened, or has the wrong format, NULL will be returned.
   static RtpFileSource* Create(
       const std::string& file_name,
@@ -42,7 +42,7 @@
 
   ~RtpFileSource() override;
 
-  // Registers an RTP header extension and binds it to |id|.
+  // Registers an RTP header extension and binds it to `id`.
   virtual bool RegisterRtpHeaderExtension(RTPExtensionType type, uint8_t id);
 
   std::unique_ptr<Packet> NextPacket() override;
diff --git a/modules/audio_coding/neteq/tools/rtp_generator.cc b/modules/audio_coding/neteq/tools/rtp_generator.cc
index 38c30c4f..e883fc1 100644
--- a/modules/audio_coding/neteq/tools/rtp_generator.cc
+++ b/modules/audio_coding/neteq/tools/rtp_generator.cc
@@ -50,7 +50,7 @@
   if (timestamp_ - static_cast<uint32_t>(payload_length_samples) <=
           jump_from_timestamp_ &&
       timestamp_ > jump_from_timestamp_) {
-    // We just moved across the |jump_from_timestamp_| timestamp. Do the jump.
+    // We just moved across the `jump_from_timestamp_` timestamp. Do the jump.
     timestamp_ = jump_to_timestamp_;
   }
   return ret;
diff --git a/modules/audio_coding/neteq/tools/rtp_generator.h b/modules/audio_coding/neteq/tools/rtp_generator.h
index 1454c57..6ca6e1b 100644
--- a/modules/audio_coding/neteq/tools/rtp_generator.h
+++ b/modules/audio_coding/neteq/tools/rtp_generator.h
@@ -34,9 +34,9 @@
 
   virtual ~RtpGenerator() {}
 
-  // Writes the next RTP header to |rtp_header|, which will be of type
-  // |payload_type|. Returns the send time for this packet (in ms). The value of
-  // |payload_length_samples| determines the send time for the next packet.
+  // Writes the next RTP header to `rtp_header`, which will be of type
+  // `payload_type`. Returns the send time for this packet (in ms). The value of
+  // `payload_length_samples` determines the send time for the next packet.
   virtual uint32_t GetRtpHeader(uint8_t payload_type,
                                 size_t payload_length_samples,
                                 RTPHeader* rtp_header);
diff --git a/modules/audio_coding/test/TestAllCodecs.cc b/modules/audio_coding/test/TestAllCodecs.cc
index 9cb3752..e93df34 100644
--- a/modules/audio_coding/test/TestAllCodecs.cc
+++ b/modules/audio_coding/test/TestAllCodecs.cc
@@ -320,7 +320,7 @@
   // If G.722, store half the size to compensate for the timestamp bug in the
   // RFC for G.722.
   // If iSAC runs in adaptive mode, packet size in samples can change on the
-  // fly, so we exclude this test by setting |packet_size_samples_| to -1.
+  // fly, so we exclude this test by setting `packet_size_samples_` to -1.
   int clockrate_hz = sampling_freq_hz;
   size_t num_channels = 1;
   if (absl::EqualsIgnoreCase(codec_name, "G722")) {
diff --git a/modules/audio_coding/test/TestStereo.cc b/modules/audio_coding/test/TestStereo.cc
index 61d27aa..1b1222c 100644
--- a/modules/audio_coding/test/TestStereo.cc
+++ b/modules/audio_coding/test/TestStereo.cc
@@ -509,8 +509,8 @@
   in_file_mono_->FastForward(100);
 
   while (1) {
-    // Simulate packet loss by setting |packet_loss_| to "true" in
-    // |percent_loss| percent of the loops.
+    // Simulate packet loss by setting `packet_loss_` to "true" in
+    // `percent_loss` percent of the loops.
     if (percent_loss > 0) {
       if (counter_ == floor((100 / percent_loss) + 0.5)) {
         counter_ = 0;
diff --git a/modules/audio_coding/test/TestVADDTX.h b/modules/audio_coding/test/TestVADDTX.h
index cce802d..9c6791a 100644
--- a/modules/audio_coding/test/TestVADDTX.h
+++ b/modules/audio_coding/test/TestVADDTX.h
@@ -23,7 +23,7 @@
 namespace webrtc {
 
 // This class records the frame type, and delegates actual sending to the
-// |next_| AudioPacketizationCallback.
+// `next_` AudioPacketizationCallback.
 class MonitoringAudioPacketizationCallback : public AudioPacketizationCallback {
  public:
   explicit MonitoringAudioPacketizationCallback(
@@ -67,9 +67,9 @@
   // the expectation. Saves result to a file.
   // expects[x] means
   // -1 : do not care,
-  // 0  : there have been no packets of type |x|,
-  // 1  : there have been packets of type |x|,
-  // with |x| indicates the following packet types
+  // 0  : there have been no packets of type `x`,
+  // 1  : there have been packets of type `x`,
+  // with `x` indicates the following packet types
   // 0 - kEmptyFrame
   // 1 - kAudioFrameSpeech
   // 2 - kAudioFrameCN
diff --git a/modules/audio_coding/test/opus_test.cc b/modules/audio_coding/test/opus_test.cc
index 5f70c03..6822bc3 100644
--- a/modules/audio_coding/test/opus_test.cc
+++ b/modules/audio_coding/test/opus_test.cc
@@ -277,8 +277,8 @@
         ASSERT_GE(bitstream_len_byte_int, 0);
         bitstream_len_byte = static_cast<size_t>(bitstream_len_byte_int);
 
-        // Simulate packet loss by setting |packet_loss_| to "true" in
-        // |percent_loss| percent of the loops.
+        // Simulate packet loss by setting `packet_loss_` to "true" in
+        // `percent_loss` percent of the loops.
         // TODO(tlegrand): Move handling of loss simulation to TestPackStereo.
         if (percent_loss > 0) {
           if (counter_ == floor((100 / percent_loss) + 0.5)) {