Use backticks not vertical bars to denote variables in comments for /sdk

Bug: webrtc:12338
Change-Id: Ifaad29ccb63b0f2f3aeefb77dae061ebc7f87e6c
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/227024
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Artem Titov <titovartem@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#34561}
diff --git a/sdk/android/api/org/webrtc/Camera1Enumerator.java b/sdk/android/api/org/webrtc/Camera1Enumerator.java
index 7f6435e..28b1046 100644
--- a/sdk/android/api/org/webrtc/Camera1Enumerator.java
+++ b/sdk/android/api/org/webrtc/Camera1Enumerator.java
@@ -158,7 +158,7 @@
     return ranges;
   }
 
-  // Returns the camera index for camera with name |deviceName|, or throws IllegalArgumentException
+  // Returns the camera index for camera with name `deviceName`, or throws IllegalArgumentException
   // if no such camera can be found.
   static int getCameraIndex(String deviceName) {
     Logging.d(TAG, "getCameraIndex: " + deviceName);
diff --git a/sdk/android/api/org/webrtc/CameraEnumerationAndroid.java b/sdk/android/api/org/webrtc/CameraEnumerationAndroid.java
index de2b919..0c3188f 100644
--- a/sdk/android/api/org/webrtc/CameraEnumerationAndroid.java
+++ b/sdk/android/api/org/webrtc/CameraEnumerationAndroid.java
@@ -152,24 +152,24 @@
     }
   }
 
-  // Prefer a fps range with an upper bound close to |framerate|. Also prefer a fps range with a low
+  // Prefer a fps range with an upper bound close to `framerate`. Also prefer a fps range with a low
   // lower bound, to allow the framerate to fluctuate based on lightning conditions.
   public static CaptureFormat.FramerateRange getClosestSupportedFramerateRange(
       List<CaptureFormat.FramerateRange> supportedFramerates, final int requestedFps) {
     return Collections.min(
         supportedFramerates, new ClosestComparator<CaptureFormat.FramerateRange>() {
-          // Progressive penalty if the upper bound is further away than |MAX_FPS_DIFF_THRESHOLD|
+          // Progressive penalty if the upper bound is further away than `MAX_FPS_DIFF_THRESHOLD`
           // from requested.
           private static final int MAX_FPS_DIFF_THRESHOLD = 5000;
           private static final int MAX_FPS_LOW_DIFF_WEIGHT = 1;
           private static final int MAX_FPS_HIGH_DIFF_WEIGHT = 3;
 
-          // Progressive penalty if the lower bound is bigger than |MIN_FPS_THRESHOLD|.
+          // Progressive penalty if the lower bound is bigger than `MIN_FPS_THRESHOLD`.
           private static final int MIN_FPS_THRESHOLD = 8000;
           private static final int MIN_FPS_LOW_VALUE_WEIGHT = 1;
           private static final int MIN_FPS_HIGH_VALUE_WEIGHT = 4;
 
-          // Use one weight for small |value| less than |threshold|, and another weight above.
+          // Use one weight for small `value` less than `threshold`, and another weight above.
           private int progressivePenalty(int value, int threshold, int lowWeight, int highWeight) {
             return (value < threshold) ? value * lowWeight
                                        : threshold * lowWeight + (value - threshold) * highWeight;
diff --git a/sdk/android/api/org/webrtc/CameraVideoCapturer.java b/sdk/android/api/org/webrtc/CameraVideoCapturer.java
index 88228ab..ec26868 100644
--- a/sdk/android/api/org/webrtc/CameraVideoCapturer.java
+++ b/sdk/android/api/org/webrtc/CameraVideoCapturer.java
@@ -48,7 +48,7 @@
    * The callback may be called on an arbitrary thread.
    */
   public interface CameraSwitchHandler {
-    // Invoked on success. |isFrontCamera| is true if the new camera is front facing.
+    // Invoked on success. `isFrontCamera` is true if the new camera is front facing.
     void onCameraSwitchDone(boolean isFrontCamera);
 
     // Invoked on failure, e.g. camera is stopped or only one camera available.
diff --git a/sdk/android/api/org/webrtc/DataChannel.java b/sdk/android/api/org/webrtc/DataChannel.java
index bcbf6f0..804915d 100644
--- a/sdk/android/api/org/webrtc/DataChannel.java
+++ b/sdk/android/api/org/webrtc/DataChannel.java
@@ -63,7 +63,7 @@
     public final ByteBuffer data;
 
     /**
-     * Indicates whether |data| contains UTF-8 text or "binary data"
+     * Indicates whether `data` contains UTF-8 text or "binary data"
      * (i.e. anything else).
      */
     public final boolean binary;
@@ -110,7 +110,7 @@
     this.nativeDataChannel = nativeDataChannel;
   }
 
-  /** Register |observer|, replacing any previously-registered observer. */
+  /** Register `observer`, replacing any previously-registered observer. */
   public void registerObserver(Observer observer) {
     checkDataChannelExists();
     if (nativeObserver != 0) {
@@ -157,7 +157,7 @@
     nativeClose();
   }
 
-  /** Send |data| to the remote peer; return success. */
+  /** Send `data` to the remote peer; return success. */
   public boolean send(Buffer buffer) {
     checkDataChannelExists();
     // TODO(fischman): this could be cleverer about avoiding copies if the
diff --git a/sdk/android/api/org/webrtc/EglBase.java b/sdk/android/api/org/webrtc/EglBase.java
index c1cb906..38871b3 100644
--- a/sdk/android/api/org/webrtc/EglBase.java
+++ b/sdk/android/api/org/webrtc/EglBase.java
@@ -146,8 +146,8 @@
   }
 
   /**
-   * Create a new context with the specified config attributes, sharing data with |sharedContext|.
-   * If |sharedContext| is null, a root context is created. This function will try to create an EGL
+   * Create a new context with the specified config attributes, sharing data with `sharedContext`.
+   * If `sharedContext` is null, a root context is created. This function will try to create an EGL
    * 1.4 context if possible, and an EGL 1.0 context otherwise.
    */
   public static EglBase create(@Nullable Context sharedContext, int[] configAttributes) {
@@ -171,7 +171,7 @@
   }
 
   /**
-   * Helper function for creating a plain context, sharing data with |sharedContext|. This function
+   * Helper function for creating a plain context, sharing data with `sharedContext`. This function
    * will try to create an EGL 1.4 context if possible, and an EGL 1.0 context otherwise.
    */
   public static EglBase create(Context sharedContext) {
diff --git a/sdk/android/api/org/webrtc/EglRenderer.java b/sdk/android/api/org/webrtc/EglRenderer.java
index 47bd0cf..106c9ad 100644
--- a/sdk/android/api/org/webrtc/EglRenderer.java
+++ b/sdk/android/api/org/webrtc/EglRenderer.java
@@ -111,8 +111,8 @@
 
   protected final String name;
 
-  // |renderThreadHandler| is a handler for communicating with |renderThread|, and is synchronized
-  // on |handlerLock|.
+  // `renderThreadHandler` is a handler for communicating with `renderThread`, and is synchronized
+  // on `handlerLock`.
   private final Object handlerLock = new Object();
   @Nullable private Handler renderThreadHandler;
 
@@ -136,11 +136,11 @@
   private boolean usePresentationTimeStamp;
   private final Matrix drawMatrix = new Matrix();
 
-  // Pending frame to render. Serves as a queue with size 1. Synchronized on |frameLock|.
+  // Pending frame to render. Serves as a queue with size 1. Synchronized on `frameLock`.
   private final Object frameLock = new Object();
   @Nullable private VideoFrame pendingFrame;
 
-  // These variables are synchronized on |layoutLock|.
+  // These variables are synchronized on `layoutLock`.
   private final Object layoutLock = new Object();
   private float layoutAspectRatio;
   // If true, mirrors the video stream horizontally.
@@ -148,7 +148,7 @@
   // If true, mirrors the video stream vertically.
   private boolean mirrorVertically;
 
-  // These variables are synchronized on |statisticsLock|.
+  // These variables are synchronized on `statisticsLock`.
   private final Object statisticsLock = new Object();
   // Total number of video frames received in renderFrame() call.
   private int framesReceived;
@@ -198,9 +198,9 @@
   }
 
   /**
-   * Initialize this class, sharing resources with |sharedContext|. The custom |drawer| will be used
+   * Initialize this class, sharing resources with `sharedContext`. The custom `drawer` will be used
    * for drawing frames on the EGLSurface. This class is responsible for calling release() on
-   * |drawer|. It is allowed to call init() to reinitialize the renderer after a previous
+   * `drawer`. It is allowed to call init() to reinitialize the renderer after a previous
    * init()/release() cycle. If usePresentationTimeStamp is true, eglPresentationTimeANDROID will be
    * set with the frame timestamps, which specifies desired presentation time and might be useful
    * for e.g. syncing audio and video.
@@ -592,10 +592,10 @@
   }
 
   /**
-   * Renders and releases |pendingFrame|.
+   * Renders and releases `pendingFrame`.
    */
   private void renderFrameOnRenderThread() {
-    // Fetch and render |pendingFrame|.
+    // Fetch and render `pendingFrame`.
     final VideoFrame frame;
     synchronized (frameLock) {
       if (pendingFrame == null) {
diff --git a/sdk/android/api/org/webrtc/GlShader.java b/sdk/android/api/org/webrtc/GlShader.java
index 8f4cda3..7efd8d3 100644
--- a/sdk/android/api/org/webrtc/GlShader.java
+++ b/sdk/android/api/org/webrtc/GlShader.java
@@ -78,16 +78,16 @@
   }
 
   /**
-   * Enable and upload a vertex array for attribute |label|. The vertex data is specified in
-   * |buffer| with |dimension| number of components per vertex.
+   * Enable and upload a vertex array for attribute `label`. The vertex data is specified in
+   * `buffer` with `dimension` number of components per vertex.
    */
   public void setVertexAttribArray(String label, int dimension, FloatBuffer buffer) {
     setVertexAttribArray(label, dimension, 0 /* stride */, buffer);
   }
 
   /**
-   * Enable and upload a vertex array for attribute |label|. The vertex data is specified in
-   * |buffer| with |dimension| number of components per vertex and specified |stride|.
+   * Enable and upload a vertex array for attribute `label`. The vertex data is specified in
+   * `buffer` with `dimension` number of components per vertex and specified `stride`.
    */
   public void setVertexAttribArray(String label, int dimension, int stride, FloatBuffer buffer) {
     if (program == -1) {
diff --git a/sdk/android/api/org/webrtc/Metrics.java b/sdk/android/api/org/webrtc/Metrics.java
index 3098f72..2533768 100644
--- a/sdk/android/api/org/webrtc/Metrics.java
+++ b/sdk/android/api/org/webrtc/Metrics.java
@@ -18,12 +18,12 @@
 // Rtc histograms can be queried through the API, getAndReset().
 // The returned map holds the name of a histogram and its samples.
 //
-// Example of |map| with one histogram:
-// |name|: "WebRTC.Video.InputFramesPerSecond"
-//     |min|: 1
-//     |max|: 100
-//     |bucketCount|: 50
-//     |samples|: [30]:1
+// Example of `map` with one histogram:
+// `name`: "WebRTC.Video.InputFramesPerSecond"
+//     `min`: 1
+//     `max`: 100
+//     `bucketCount`: 50
+//     `samples`: [30]:1
 //
 // Most histograms are not updated frequently (e.g. most video metrics are an
 // average over the call and recorded when a stream is removed).
diff --git a/sdk/android/api/org/webrtc/NetworkChangeDetector.java b/sdk/android/api/org/webrtc/NetworkChangeDetector.java
index a845c78..65a78c4 100644
--- a/sdk/android/api/org/webrtc/NetworkChangeDetector.java
+++ b/sdk/android/api/org/webrtc/NetworkChangeDetector.java
@@ -98,9 +98,9 @@
 
     /**
      * Called when network preference change for a (list of) connection type(s). (e.g WIFI) is
-     * |NOT_PREFERRED| or |NEUTRAL|.
+     * `NOT_PREFERRED` or `NEUTRAL`.
      *
-     * <p>note: |types| is a list of ConnectionTypes, so that all cellular types can be modified in
+     * <p>note: `types` is a list of ConnectionTypes, so that all cellular types can be modified in
      * one call.
      */
     public void onNetworkPreference(List<ConnectionType> types, @NetworkPreference int preference);
diff --git a/sdk/android/api/org/webrtc/NetworkMonitorAutoDetect.java b/sdk/android/api/org/webrtc/NetworkMonitorAutoDetect.java
index 3d233b3..6b7a02f 100644
--- a/sdk/android/api/org/webrtc/NetworkMonitorAutoDetect.java
+++ b/sdk/android/api/org/webrtc/NetworkMonitorAutoDetect.java
@@ -172,7 +172,7 @@
     }
 
     /**
-     * Returns connection type and status information about |network|.
+     * Returns connection type and status information about `network`.
      * Only callable on Lollipop and newer releases.
      */
     @SuppressLint("NewApi")
@@ -186,9 +186,9 @@
         return new NetworkState(false, -1, -1, -1, -1);
       }
       // The general logic of handling a VPN in this method is as follows. getNetworkInfo will
-      // return the info of the network with the same id as in |network| when it is registered via
-      // ConnectivityManager.registerNetworkAgent in Android. |networkInfo| may or may not indicate
-      // the type TYPE_VPN if |network| is a VPN. To reliably detect the VPN interface, we need to
+      // return the info of the network with the same id as in `network` when it is registered via
+      // ConnectivityManager.registerNetworkAgent in Android. `networkInfo` may or may not indicate
+      // the type TYPE_VPN if `network` is a VPN. To reliably detect the VPN interface, we need to
       // query the network capability as below in the case when networkInfo.getType() is not
       // TYPE_VPN. On the other hand when networkInfo.getType() is TYPE_VPN, the only solution so
       // far to obtain the underlying network information is to query the active network interface.
@@ -198,7 +198,7 @@
       // getActiveNetworkInfo may thus give the wrong interface information, and one should note
       // that getActiveNetworkInfo would return the default network interface if the VPN does not
       // specify its underlying networks in the implementation. Therefore, we need further compare
-      // |network| to the active network. If they are not the same network, we will have to fall
+      // `network` to the active network. If they are not the same network, we will have to fall
       // back to report an unknown network.
 
       if (networkInfo.getType() != ConnectivityManager.TYPE_VPN) {
@@ -209,15 +209,15 @@
             || !networkCapabilities.hasTransport(NetworkCapabilities.TRANSPORT_VPN)) {
           return getNetworkState(networkInfo);
         }
-        // When |network| is in fact a VPN after querying its capability but |networkInfo| is not of
-        // type TYPE_VPN, |networkInfo| contains the info for the underlying network, and we return
+        // When `network` is in fact a VPN after querying its capability but `networkInfo` is not of
+        // type TYPE_VPN, `networkInfo` contains the info for the underlying network, and we return
         // a NetworkState constructed from it.
         return new NetworkState(networkInfo.isConnected(), ConnectivityManager.TYPE_VPN, -1,
             networkInfo.getType(), networkInfo.getSubtype());
       }
 
-      // When |networkInfo| is of type TYPE_VPN, which implies |network| is a VPN, we return the
-      // NetworkState of the active network via getActiveNetworkInfo(), if |network| is the active
+      // When `networkInfo` is of type TYPE_VPN, which implies `network` is a VPN, we return the
+      // NetworkState of the active network via getActiveNetworkInfo(), if `network` is the active
       // network that supports the VPN. Otherwise, NetworkState of an unknown network with type -1
       // will be returned.
       //
diff --git a/sdk/android/api/org/webrtc/PeerConnection.java b/sdk/android/api/org/webrtc/PeerConnection.java
index 67705ba..ac259ae 100644
--- a/sdk/android/api/org/webrtc/PeerConnection.java
+++ b/sdk/android/api/org/webrtc/PeerConnection.java
@@ -169,9 +169,9 @@
     public final String password;
     public final TlsCertPolicy tlsCertPolicy;
 
-    // If the URIs in |urls| only contain IP addresses, this field can be used
+    // If the URIs in `urls` only contain IP addresses, this field can be used
     // to indicate the hostname, which may be necessary for TLS (using the SNI
-    // extension). If |urls| itself contains the hostname, this isn't
+    // extension). If `urls` itself contains the hostname, this isn't
     // necessary.
     public final String hostname;
 
@@ -1106,7 +1106,7 @@
    * transceiver will cause future calls to CreateOffer to add a media description
    * for the corresponding transceiver.
    *
-   * <p>The initial value of |mid| in the returned transceiver is null. Setting a
+   * <p>The initial value of `mid` in the returned transceiver is null. Setting a
    * new session description may change it to a non-null value.
    *
    * <p>https://w3c.github.io/webrtc-pc/#dom-rtcpeerconnection-addtransceiver
diff --git a/sdk/android/api/org/webrtc/PeerConnectionFactory.java b/sdk/android/api/org/webrtc/PeerConnectionFactory.java
index c87e639..1777ade 100644
--- a/sdk/android/api/org/webrtc/PeerConnectionFactory.java
+++ b/sdk/android/api/org/webrtc/PeerConnectionFactory.java
@@ -133,7 +133,7 @@
   public static class Options {
     // Keep in sync with webrtc/rtc_base/network.h!
     //
-    // These bit fields are defined for |networkIgnoreMask| below.
+    // These bit fields are defined for `networkIgnoreMask` below.
     static final int ADAPTER_TYPE_UNKNOWN = 0;
     static final int ADAPTER_TYPE_ETHERNET = 1 << 0;
     static final int ADAPTER_TYPE_WIFI = 1 << 1;
diff --git a/sdk/android/api/org/webrtc/RTCStats.java b/sdk/android/api/org/webrtc/RTCStats.java
index 573d953..eaa28de 100644
--- a/sdk/android/api/org/webrtc/RTCStats.java
+++ b/sdk/android/api/org/webrtc/RTCStats.java
@@ -14,8 +14,8 @@
 
 /**
  * Java version of webrtc::RTCStats. Represents an RTCStats object, as
- * described in https://w3c.github.io/webrtc-stats/. The |id|, |timestampUs|
- * and |type| accessors have the same meaning for this class as for the
+ * described in https://w3c.github.io/webrtc-stats/. The `id`, `timestampUs`
+ * and `type` accessors have the same meaning for this class as for the
  * RTCStats dictionary. Each RTCStatsReport produced by getStats contains
  * multiple RTCStats objects; one for each underlying object (codec, stream,
  * transport, etc.) that was inspected to produce the stats.
diff --git a/sdk/android/api/org/webrtc/RendererCommon.java b/sdk/android/api/org/webrtc/RendererCommon.java
index 5865b07..b97901c 100644
--- a/sdk/android/api/org/webrtc/RendererCommon.java
+++ b/sdk/android/api/org/webrtc/RendererCommon.java
@@ -123,9 +123,9 @@
   //    clipped.
   // SCALE_ASPECT_BALANCED - Compromise between FIT and FILL. Video frame will fill as much as
   // possible of the view while maintaining aspect ratio, under the constraint that at least
-  // |BALANCED_VISIBLE_FRACTION| of the frame content will be shown.
+  // `BALANCED_VISIBLE_FRACTION` of the frame content will be shown.
   public static enum ScalingType { SCALE_ASPECT_FIT, SCALE_ASPECT_FILL, SCALE_ASPECT_BALANCED }
-  // The minimum fraction of the frame content that will be shown for |SCALE_ASPECT_BALANCED|.
+  // The minimum fraction of the frame content that will be shown for `SCALE_ASPECT_BALANCED`.
   // This limits excessive cropping when adjusting display size.
   private static float BALANCED_VISIBLE_FRACTION = 0.5625f;
 
@@ -209,7 +209,7 @@
   }
 
   /**
-   * Move |matrix| transformation origin to (0.5, 0.5). This is the origin for texture coordinates
+   * Move `matrix` transformation origin to (0.5, 0.5). This is the origin for texture coordinates
    * that are in the range 0 to 1.
    */
   private static void adjustOrigin(float[] matrix) {
diff --git a/sdk/android/api/org/webrtc/RtpSender.java b/sdk/android/api/org/webrtc/RtpSender.java
index bc894e5..7018c57 100644
--- a/sdk/android/api/org/webrtc/RtpSender.java
+++ b/sdk/android/api/org/webrtc/RtpSender.java
@@ -39,7 +39,7 @@
    *
    * @param takeOwnership If true, the RtpSender takes ownership of the track
    *                      from the caller, and will auto-dispose of it when no
-   *                      longer needed. |takeOwnership| should only be used if
+   *                      longer needed. `takeOwnership` should only be used if
    *                      the caller owns the track; it is not appropriate when
    *                      the track is owned by, for example, another RtpSender
    *                      or a MediaStream.
diff --git a/sdk/android/api/org/webrtc/SurfaceEglRenderer.java b/sdk/android/api/org/webrtc/SurfaceEglRenderer.java
index 7a6db15..6cba3f4 100644
--- a/sdk/android/api/org/webrtc/SurfaceEglRenderer.java
+++ b/sdk/android/api/org/webrtc/SurfaceEglRenderer.java
@@ -42,9 +42,9 @@
   }
 
   /**
-   * Initialize this class, sharing resources with |sharedContext|. The custom |drawer| will be used
+   * Initialize this class, sharing resources with `sharedContext`. The custom `drawer` will be used
    * for drawing frames on the EGLSurface. This class is responsible for calling release() on
-   * |drawer|. It is allowed to call init() to reinitialize the renderer after a previous
+   * `drawer`. It is allowed to call init() to reinitialize the renderer after a previous
    * init()/release() cycle.
    */
   public void init(final EglBase.Context sharedContext,
@@ -125,7 +125,7 @@
     logD("surfaceChanged: format: " + format + " size: " + width + "x" + height);
   }
 
-  // Update frame dimensions and report any changes to |rendererEvents|.
+  // Update frame dimensions and report any changes to `rendererEvents`.
   private void updateFrameDimensionsAndReportEvents(VideoFrame frame) {
     synchronized (layoutLock) {
       if (isRenderingPaused) {
diff --git a/sdk/android/api/org/webrtc/SurfaceTextureHelper.java b/sdk/android/api/org/webrtc/SurfaceTextureHelper.java
index 0dd45cf..085576b 100644
--- a/sdk/android/api/org/webrtc/SurfaceTextureHelper.java
+++ b/sdk/android/api/org/webrtc/SurfaceTextureHelper.java
@@ -48,7 +48,7 @@
 
   private static final String TAG = "SurfaceTextureHelper";
   /**
-   * Construct a new SurfaceTextureHelper sharing OpenGL resources with |sharedContext|. A dedicated
+   * Construct a new SurfaceTextureHelper sharing OpenGL resources with `sharedContext`. A dedicated
    * thread and handler is created for handling the SurfaceTexture. May return null if EGL fails to
    * initialize a pixel buffer surface and make it current. If alignTimestamps is true, the frame
    * timestamps will be aligned to rtc::TimeNanos(). If frame timestamps are aligned to
@@ -66,7 +66,7 @@
     // The onFrameAvailable() callback will be executed on the SurfaceTexture ctor thread. See:
     // http://grepcode.com/file/repository.grepcode.com/java/ext/com.google.android/android/5.1.1_r1/android/graphics/SurfaceTexture.java#195.
     // Therefore, in order to control the callback thread on API lvl < 21, the SurfaceTextureHelper
-    // is constructed on the |handler| thread.
+    // is constructed on the `handler` thread.
     return ThreadUtils.invokeAtFrontUninterruptibly(handler, new Callable<SurfaceTextureHelper>() {
       @Nullable
       @Override
@@ -147,7 +147,7 @@
   @Nullable private final TimestampAligner timestampAligner;
   private final FrameRefMonitor frameRefMonitor;
 
-  // These variables are only accessed from the |handler| thread.
+  // These variables are only accessed from the `handler` thread.
   @Nullable private VideoSink listener;
   // The possible states of this class.
   private boolean hasPendingTexture;
@@ -156,7 +156,7 @@
   private int frameRotation;
   private int textureWidth;
   private int textureHeight;
-  // |pendingListener| is set in setListener() and the runnable is posted to the handler thread.
+  // `pendingListener` is set in setListener() and the runnable is posted to the handler thread.
   // setListener() is not allowed to be called again before stopListening(), so this is thread safe.
   @Nullable private VideoSink pendingListener;
   final Runnable setListenerRunnable = new Runnable() {
@@ -223,7 +223,7 @@
   }
 
   /**
-   * Start to stream textures to the given |listener|. If you need to change listener, you need to
+   * Start to stream textures to the given `listener`. If you need to change listener, you need to
    * call stopListening() first.
    */
   public void startListening(final VideoSink listener) {
@@ -331,7 +331,7 @@
   }
 
   /**
-   * Posts to the correct thread to convert |textureBuffer| to I420.
+   * Posts to the correct thread to convert `textureBuffer` to I420.
    *
    * @deprecated Use toI420() instead.
    */
diff --git a/sdk/android/api/org/webrtc/SurfaceViewRenderer.java b/sdk/android/api/org/webrtc/SurfaceViewRenderer.java
index f62d274..6c9140a 100644
--- a/sdk/android/api/org/webrtc/SurfaceViewRenderer.java
+++ b/sdk/android/api/org/webrtc/SurfaceViewRenderer.java
@@ -64,7 +64,7 @@
   }
 
   /**
-   * Initialize this class, sharing resources with |sharedContext|. It is allowed to call init() to
+   * Initialize this class, sharing resources with `sharedContext`. It is allowed to call init() to
    * reinitialize the renderer after a previous init()/release() cycle.
    */
   public void init(EglBase.Context sharedContext, RendererCommon.RendererEvents rendererEvents) {
@@ -72,9 +72,9 @@
   }
 
   /**
-   * Initialize this class, sharing resources with |sharedContext|. The custom |drawer| will be used
+   * Initialize this class, sharing resources with `sharedContext`. The custom `drawer` will be used
    * for drawing frames on the EGLSurface. This class is responsible for calling release() on
-   * |drawer|. It is allowed to call init() to reinitialize the renderer after a previous
+   * `drawer`. It is allowed to call init() to reinitialize the renderer after a previous
    * init()/release() cycle.
    */
   public void init(final EglBase.Context sharedContext,
diff --git a/sdk/android/api/org/webrtc/TimestampAligner.java b/sdk/android/api/org/webrtc/TimestampAligner.java
index 7c4bed4..d96c939 100644
--- a/sdk/android/api/org/webrtc/TimestampAligner.java
+++ b/sdk/android/api/org/webrtc/TimestampAligner.java
@@ -31,7 +31,7 @@
 
   /**
    * Translates camera timestamps to the same timescale as is used by rtc::TimeNanos().
-   * |cameraTimeNs| is assumed to be accurate, but with an unknown epoch and clock drift. Returns
+   * `cameraTimeNs` is assumed to be accurate, but with an unknown epoch and clock drift. Returns
    * the translated timestamp.
    */
   public long translateTimestamp(long cameraTimeNs) {
diff --git a/sdk/android/api/org/webrtc/VideoEncoder.java b/sdk/android/api/org/webrtc/VideoEncoder.java
index 4604281..352c702 100644
--- a/sdk/android/api/org/webrtc/VideoEncoder.java
+++ b/sdk/android/api/org/webrtc/VideoEncoder.java
@@ -238,7 +238,7 @@
 
   public interface Callback {
     /**
-     * Old encoders assume that the byte buffer held by |frame| is not accessed after the call to
+     * Old encoders assume that the byte buffer held by `frame` is not accessed after the call to
      * this method returns. If the pipeline downstream needs to hold on to the buffer, it then has
      * to make its own copy. We want to move to a model where no copying is needed, and instead use
      * retain()/release() to signal to the encoder when it is safe to reuse the buffer.
diff --git a/sdk/android/api/org/webrtc/VideoFrame.java b/sdk/android/api/org/webrtc/VideoFrame.java
index bb30069..0066354 100644
--- a/sdk/android/api/org/webrtc/VideoFrame.java
+++ b/sdk/android/api/org/webrtc/VideoFrame.java
@@ -60,8 +60,8 @@
     @Override @CalledByNative("Buffer") void release();
 
     /**
-     * Crops a region defined by |cropx|, |cropY|, |cropWidth| and |cropHeight|. Scales it to size
-     * |scaleWidth| x |scaleHeight|.
+     * Crops a region defined by `cropx`, `cropY`, `cropWidth` and `cropHeight`. Scales it to size
+     * `scaleWidth` x `scaleHeight`.
      */
     @CalledByNative("Buffer")
     Buffer cropAndScale(
diff --git a/sdk/android/api/org/webrtc/VideoFrameDrawer.java b/sdk/android/api/org/webrtc/VideoFrameDrawer.java
index 66c1c4f..cb4bb01 100644
--- a/sdk/android/api/org/webrtc/VideoFrameDrawer.java
+++ b/sdk/android/api/org/webrtc/VideoFrameDrawer.java
@@ -61,7 +61,7 @@
     @Nullable private int[] yuvTextures;
 
     /**
-     * Upload |planes| into OpenGL textures, taking stride into consideration.
+     * Upload `planes` into OpenGL textures, taking stride into consideration.
      *
      * @return Array of three texture indices corresponding to Y-, U-, and V-plane respectively.
      */
@@ -145,8 +145,8 @@
   private int renderWidth;
   private int renderHeight;
 
-  // Calculate the frame size after |renderMatrix| is applied. Stores the output in member variables
-  // |renderWidth| and |renderHeight| to avoid allocations since this function is called for every
+  // Calculate the frame size after `renderMatrix` is applied. Stores the output in member variables
+  // `renderWidth` and `renderHeight` to avoid allocations since this function is called for every
   // frame.
   private void calculateTransformedRenderSize(
       int frameWidth, int frameHeight, @Nullable Matrix renderMatrix) {
@@ -155,7 +155,7 @@
       renderHeight = frameHeight;
       return;
     }
-    // Transform the texture coordinates (in the range [0, 1]) according to |renderMatrix|.
+    // Transform the texture coordinates (in the range [0, 1]) according to `renderMatrix`.
     renderMatrix.mapPoints(dstPoints, srcPoints);
 
     // Multiply with the width and height to get the positions in terms of pixels.
diff --git a/sdk/android/api/org/webrtc/YuvConverter.java b/sdk/android/api/org/webrtc/YuvConverter.java
index 9c00678..10b9cc1 100644
--- a/sdk/android/api/org/webrtc/YuvConverter.java
+++ b/sdk/android/api/org/webrtc/YuvConverter.java
@@ -153,7 +153,7 @@
     //    +----+----+
     //
     // In memory, we use the same stride for all of Y, U and V. The
-    // U data starts at offset |height| * |stride| from the Y data,
+    // U data starts at offset `height` * `stride` from the Y data,
     // and the V data starts at at offset |stride/2| from the U
     // data, with rows of U and V data alternating.
     //
@@ -161,12 +161,12 @@
     // a single byte per pixel (EGL10.EGL_COLOR_BUFFER_TYPE,
     // EGL10.EGL_LUMINANCE_BUFFER,), but that seems to be
     // unsupported by devices. So do the following hack: Allocate an
-    // RGBA buffer, of width |stride|/4. To render each of these
+    // RGBA buffer, of width `stride`/4. To render each of these
     // large pixels, sample the texture at 4 different x coordinates
     // and store the results in the four components.
     //
     // Since the V data needs to start on a boundary of such a
-    // larger pixel, it is not sufficient that |stride| is even, it
+    // larger pixel, it is not sufficient that `stride` is even, it
     // has to be a multiple of 8 pixels.
     final int frameWidth = preparedBuffer.getWidth();
     final int frameHeight = preparedBuffer.getHeight();
diff --git a/sdk/android/instrumentationtests/src/org/webrtc/CameraVideoCapturerTestFixtures.java b/sdk/android/instrumentationtests/src/org/webrtc/CameraVideoCapturerTestFixtures.java
index 56b7440..e32e67b 100644
--- a/sdk/android/instrumentationtests/src/org/webrtc/CameraVideoCapturerTestFixtures.java
+++ b/sdk/android/instrumentationtests/src/org/webrtc/CameraVideoCapturerTestFixtures.java
@@ -541,7 +541,7 @@
     capturerInstance.capturer.stopCapture();
     capturerInstance.observer.releaseFrame();
 
-    // We can't change |capturer| at this point, but we should not crash.
+    // We can't change `capturer` at this point, but we should not crash.
     capturerInstance.capturer.switchCamera(null /* switchEventsHandler */);
     capturerInstance.capturer.changeCaptureFormat(DEFAULT_WIDTH, DEFAULT_HEIGHT, DEFAULT_FPS);
 
diff --git a/sdk/android/instrumentationtests/src/org/webrtc/PeerConnectionTest.java b/sdk/android/instrumentationtests/src/org/webrtc/PeerConnectionTest.java
index f1141e1..bde4960 100644
--- a/sdk/android/instrumentationtests/src/org/webrtc/PeerConnectionTest.java
+++ b/sdk/android/instrumentationtests/src/org/webrtc/PeerConnectionTest.java
@@ -145,7 +145,7 @@
   // TODO(fischman) MOAR test ideas:
   // - Test that PC.removeStream() works; requires a second
   //   createOffer/createAnswer dance.
-  // - audit each place that uses |constraints| for specifying non-trivial
+  // - audit each place that uses `constraints` for specifying non-trivial
   //   constraints (and ensure they're honored).
   // - test error cases
   // - ensure reasonable coverage of jni code is achieved.  Coverage is
diff --git a/sdk/android/instrumentationtests/src/org/webrtc/SurfaceTextureHelperTest.java b/sdk/android/instrumentationtests/src/org/webrtc/SurfaceTextureHelperTest.java
index 40048e1..1d5ae93 100644
--- a/sdk/android/instrumentationtests/src/org/webrtc/SurfaceTextureHelperTest.java
+++ b/sdk/android/instrumentationtests/src/org/webrtc/SurfaceTextureHelperTest.java
@@ -123,8 +123,8 @@
     surfaceTextureHelper.startListening(listener);
     surfaceTextureHelper.setTextureSize(width, height);
 
-    // Create resources for stubbing an OES texture producer. |eglOesBase| has the SurfaceTexture in
-    // |surfaceTextureHelper| as the target EGLSurface.
+    // Create resources for stubbing an OES texture producer. `eglOesBase` has the SurfaceTexture in
+    // `surfaceTextureHelper` as the target EGLSurface.
     final EglBase eglOesBase = EglBase.create(eglBase.getEglBaseContext(), EglBase.CONFIG_PLAIN);
     eglOesBase.createSurface(surfaceTextureHelper.getSurfaceTexture());
     assertEquals(eglOesBase.surfaceWidth(), width);
@@ -191,8 +191,8 @@
     surfaceTextureHelper.startListening(listener);
     surfaceTextureHelper.setTextureSize(width, height);
 
-    // Create resources for stubbing an OES texture producer. |eglOesBase| has the SurfaceTexture in
-    // |surfaceTextureHelper| as the target EGLSurface.
+    // Create resources for stubbing an OES texture producer. `eglOesBase` has the SurfaceTexture in
+    // `surfaceTextureHelper` as the target EGLSurface.
     final EglBase eglOesBase = EglBase.create(eglBase.getEglBaseContext(), EglBase.CONFIG_PLAIN);
     eglOesBase.createSurface(surfaceTextureHelper.getSurfaceTexture());
     assertEquals(eglOesBase.surfaceWidth(), width);
@@ -410,7 +410,7 @@
     eglBase.swapBuffers();
     listener1.waitForTextureBuffer().release();
 
-    // Stop listening - |listener1| should not receive any textures after this.
+    // Stop listening - `listener1` should not receive any textures after this.
     surfaceTextureHelper.stopListening();
 
     // Connect different listener.
@@ -423,7 +423,7 @@
     GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
     eglBase.swapBuffers();
 
-    // Check that |listener2| received the frame, and not |listener1|.
+    // Check that `listener2` received the frame, and not `listener1`.
     listener2.waitForTextureBuffer().release();
     listener1.assertNoFrameIsDelivered(/* waitPeriodMs= */ 1);
 
@@ -446,8 +446,8 @@
     surfaceTextureHelper.startListening(listener);
     surfaceTextureHelper.setTextureSize(width, height);
 
-    // Create resources for stubbing an OES texture producer. |eglBase| has the SurfaceTexture in
-    // |surfaceTextureHelper| as the target EGLSurface.
+    // Create resources for stubbing an OES texture producer. `eglBase` has the SurfaceTexture in
+    // `surfaceTextureHelper` as the target EGLSurface.
 
     eglBase.createSurface(surfaceTextureHelper.getSurfaceTexture());
     assertEquals(eglBase.surfaceWidth(), width);
diff --git a/sdk/android/instrumentationtests/src/org/webrtc/VideoFrameBufferTest.java b/sdk/android/instrumentationtests/src/org/webrtc/VideoFrameBufferTest.java
index 4388e4a..096edc9 100644
--- a/sdk/android/instrumentationtests/src/org/webrtc/VideoFrameBufferTest.java
+++ b/sdk/android/instrumentationtests/src/org/webrtc/VideoFrameBufferTest.java
@@ -153,7 +153,7 @@
   }
 
   /**
-   * Create an RGB texture buffer available in |eglContext| with the same pixel content as the given
+   * Create an RGB texture buffer available in `eglContext` with the same pixel content as the given
    * I420 buffer.
    */
   public static VideoFrame.TextureBuffer createRgbTextureBuffer(
@@ -191,7 +191,7 @@
   }
 
   /**
-   * Create an OES texture buffer available in |eglContext| with the same pixel content as the given
+   * Create an OES texture buffer available in `eglContext` with the same pixel content as the given
    * I420 buffer.
    */
   public static VideoFrame.TextureBuffer createOesTextureBuffer(
diff --git a/sdk/android/native_api/jni/class_loader.cc b/sdk/android/native_api/jni/class_loader.cc
index d0aa395..1789d78 100644
--- a/sdk/android/native_api/jni/class_loader.cc
+++ b/sdk/android/native_api/jni/class_loader.cc
@@ -18,7 +18,7 @@
 #include "sdk/android/native_api/jni/java_types.h"
 #include "sdk/android/native_api/jni/scoped_java_ref.h"
 
-// Abort the process if |jni| has a Java exception pending. This macros uses the
+// Abort the process if `jni` has a Java exception pending. This macros uses the
 // comma operator to execute ExceptionDescribe and ExceptionClear ignoring their
 // return values and sending "" to the error stream.
 #define CHECK_EXCEPTION(jni)        \
diff --git a/sdk/android/native_api/jni/java_types.h b/sdk/android/native_api/jni/java_types.h
index a1639d6..8a13e3e 100644
--- a/sdk/android/native_api/jni/java_types.h
+++ b/sdk/android/native_api/jni/java_types.h
@@ -30,7 +30,7 @@
 #include "rtc_base/checks.h"
 #include "sdk/android/native_api/jni/scoped_java_ref.h"
 
-// Abort the process if |jni| has a Java exception pending.
+// Abort the process if `jni` has a Java exception pending.
 // This macros uses the comma operator to execute ExceptionDescribe
 // and ExceptionClear ignoring their return values and sending ""
 // to the error stream.
@@ -110,7 +110,7 @@
   RTC_DISALLOW_COPY_AND_ASSIGN(Iterable);
 };
 
-// Returns true if |obj| == null in Java.
+// Returns true if `obj` == null in Java.
 bool IsNull(JNIEnv* jni, const JavaRef<jobject>& obj);
 
 // Returns the name of a Java enum.
@@ -319,7 +319,7 @@
   return builder.GetJavaMap();
 }
 
-// Return a |jlong| that will correctly convert back to |ptr|.  This is needed
+// Return a `jlong` that will correctly convert back to `ptr`.  This is needed
 // because the alternative (of silently passing a 32-bit pointer to a vararg
 // function expecting a 64-bit param) picks up garbage in the high 32 bits.
 jlong NativeToJavaPointer(void* ptr);
diff --git a/sdk/android/native_api/jni/scoped_java_ref.h b/sdk/android/native_api/jni/scoped_java_ref.h
index ac2c4f4..634d35a 100644
--- a/sdk/android/native_api/jni/scoped_java_ref.h
+++ b/sdk/android/native_api/jni/scoped_java_ref.h
@@ -74,7 +74,7 @@
 template <typename T>
 class JavaParamRef : public JavaRef<T> {
  public:
-  // Assumes that |obj| is a parameter passed to a JNI method from Java.
+  // Assumes that `obj` is a parameter passed to a JNI method from Java.
   // Does not assume ownership as parameters should not be deleted.
   explicit JavaParamRef(T obj) : JavaRef<T>(obj) {}
   JavaParamRef(JNIEnv*, T obj) : JavaRef<T>(obj) {}
@@ -112,7 +112,7 @@
     Reset(other.obj(), OwnershipPolicy::RETAIN);
   }
 
-  // Assumes that |obj| is a reference to a Java object and takes
+  // Assumes that `obj` is a reference to a Java object and takes
   // ownership  of this  reference. This should preferably not be used
   // outside of JNI helper functions.
   ScopedJavaLocalRef(JNIEnv* env, T obj) : JavaRef<T>(obj), env_(env) {}
diff --git a/sdk/android/native_api/peerconnection/peer_connection_factory.h b/sdk/android/native_api/peerconnection/peer_connection_factory.h
index 00550a9..6f046c5 100644
--- a/sdk/android/native_api/peerconnection/peer_connection_factory.h
+++ b/sdk/android/native_api/peerconnection/peer_connection_factory.h
@@ -20,7 +20,7 @@
 
 namespace webrtc {
 
-// Creates java PeerConnectionFactory with specified |pcf|.
+// Creates java PeerConnectionFactory with specified `pcf`.
 jobject NativeToJavaPeerConnectionFactory(
     JNIEnv* jni,
     rtc::scoped_refptr<webrtc::PeerConnectionFactoryInterface> pcf,
diff --git a/sdk/android/native_api/stacktrace/stacktrace.cc b/sdk/android/native_api/stacktrace/stacktrace.cc
index cea3490..4889437 100644
--- a/sdk/android/native_api/stacktrace/stacktrace.cc
+++ b/sdk/android/native_api/stacktrace/stacktrace.cc
@@ -99,7 +99,7 @@
 SignalHandlerOutputState* volatile g_signal_handler_output_state;
 
 // This function is called iteratively for each stack trace element and stores
-// the element in the array from |unwind_output_state|.
+// the element in the array from `unwind_output_state`.
 _Unwind_Reason_Code UnwindBacktrace(struct _Unwind_Context* unwind_context,
                                     void* unwind_output_state) {
   SignalHandlerOutputState* const output_state =
@@ -136,7 +136,7 @@
 
 // Temporarily change the signal handler to a function that records a raw stack
 // trace and interrupt the given tid. This function will block until the output
-// thread stack trace has been stored in |params|. The return value is an error
+// thread stack trace has been stored in `params`. The return value is an error
 // string on failure and null on success.
 const char* CaptureRawStacktrace(int pid,
                                  int tid,
@@ -206,8 +206,8 @@
 std::vector<StackTraceElement> GetStackTrace(int tid) {
   // Only a thread itself can unwind its stack, so we will interrupt the given
   // tid with a custom signal handler in order to unwind its stack. The stack
-  // will be recorded to |params| through the use of the global pointer
-  // |g_signal_handler_param|.
+  // will be recorded to `params` through the use of the global pointer
+  // `g_signal_handler_param`.
   SignalHandlerOutputState params;
 
   const char* error_string = CaptureRawStacktrace(getpid(), tid, &params);
diff --git a/sdk/android/native_unittests/audio_device/audio_device_unittest.cc b/sdk/android/native_unittests/audio_device/audio_device_unittest.cc
index 31da60c..54a01ad 100644
--- a/sdk/android/native_unittests/audio_device/audio_device_unittest.cc
+++ b/sdk/android/native_unittests/audio_device/audio_device_unittest.cc
@@ -65,7 +65,7 @@
 static const size_t kBitsPerSample = 16;
 static const size_t kBytesPerSample = kBitsPerSample / 8;
 // Run the full-duplex test during this time (unit is in seconds).
-// Note that first |kNumIgnoreFirstCallbacks| are ignored.
+// Note that first `kNumIgnoreFirstCallbacks` are ignored.
 static const int kFullDuplexTimeInSec = 5;
 // Wait for the callback sequence to stabilize by ignoring this amount of the
 // initial callbacks (avoids initial FIFO access).
@@ -124,7 +124,7 @@
   void Write(const void* source, size_t num_frames) override {}
 
   // Read samples from file stored in memory (at construction) and copy
-  // |num_frames| (<=> 10ms) to the |destination| byte buffer.
+  // `num_frames` (<=> 10ms) to the `destination` byte buffer.
   void Read(void* destination, size_t num_frames) override {
     memcpy(destination, static_cast<int16_t*>(&file_[file_pos_]),
            num_frames * sizeof(int16_t));
@@ -168,7 +168,7 @@
 
   ~FifoAudioStream() { Flush(); }
 
-  // Allocate new memory, copy |num_frames| samples from |source| into memory
+  // Allocate new memory, copy `num_frames` samples from `source` into memory
   // and add pointer to the memory location to end of the list.
   // Increases the size of the FIFO by one element.
   void Write(const void* source, size_t num_frames) override {
@@ -189,8 +189,8 @@
     total_written_elements_ += size;
   }
 
-  // Read pointer to data buffer from front of list, copy |num_frames| of stored
-  // data into |destination| and delete the utilized memory allocation.
+  // Read pointer to data buffer from front of list, copy `num_frames` of stored
+  // data into `destination` and delete the utilized memory allocation.
   // Decreases the size of the FIFO by one element.
   void Read(void* destination, size_t num_frames) override {
     ASSERT_EQ(num_frames, frames_per_buffer_);
@@ -248,7 +248,7 @@
         rec_count_(0),
         pulse_time_(0) {}
 
-  // Insert periodic impulses in first two samples of |destination|.
+  // Insert periodic impulses in first two samples of `destination`.
   void Read(void* destination, size_t num_frames) override {
     ASSERT_EQ(num_frames, frames_per_buffer_);
     if (play_count_ == 0) {
@@ -269,14 +269,14 @@
     }
   }
 
-  // Detect received impulses in |source|, derive time between transmission and
+  // Detect received impulses in `source`, derive time between transmission and
   // detection and add the calculated delay to list of latencies.
   void Write(const void* source, size_t num_frames) override {
     ASSERT_EQ(num_frames, frames_per_buffer_);
     rec_count_++;
     if (pulse_time_ == 0) {
       // Avoid detection of new impulse response until a new impulse has
-      // been transmitted (sets |pulse_time_| to value larger than zero).
+      // been transmitted (sets `pulse_time_` to value larger than zero).
       return;
     }
     const int16_t* ptr16 = static_cast<const int16_t*>(source);
@@ -295,7 +295,7 @@
       // Total latency is the difference between transmit time and detection
       // tome plus the extra delay within the buffer in which we detected the
       // received impulse. It is transmitted at sample 0 but can be received
-      // at sample N where N > 0. The term |extra_delay| accounts for N and it
+      // at sample N where N > 0. The term `extra_delay` accounts for N and it
       // is a value between 0 and 10ms.
       latencies_.push_back(now_time - pulse_time_ + extra_delay);
       pulse_time_ = 0;
diff --git a/sdk/android/src/java/org/webrtc/AndroidVideoDecoder.java b/sdk/android/src/java/org/webrtc/AndroidVideoDecoder.java
index aa68e9d..f392f8c 100644
--- a/sdk/android/src/java/org/webrtc/AndroidVideoDecoder.java
+++ b/sdk/android/src/java/org/webrtc/AndroidVideoDecoder.java
@@ -514,7 +514,7 @@
       throw new AssertionError("Stride is not divisible by two: " + stride);
     }
 
-    // Note that the case with odd |sliceHeight| is handled in a special way.
+    // Note that the case with odd `sliceHeight` is handled in a special way.
     // The chroma height contained in the payload is rounded down instead of
     // up, making it one row less than what we expect in WebRTC. Therefore, we
     // have to duplicate the last chroma rows for this case. Also, the offset
diff --git a/sdk/android/src/java/org/webrtc/Camera1Session.java b/sdk/android/src/java/org/webrtc/Camera1Session.java
index 2d821c2..a439315 100644
--- a/sdk/android/src/java/org/webrtc/Camera1Session.java
+++ b/sdk/android/src/java/org/webrtc/Camera1Session.java
@@ -133,7 +133,7 @@
 
   private static CaptureFormat findClosestCaptureFormat(
       android.hardware.Camera.Parameters parameters, int width, int height, int framerate) {
-    // Find closest supported format for |width| x |height| @ |framerate|.
+    // Find closest supported format for `width` x `height` @ `framerate`.
     final List<CaptureFormat.FramerateRange> supportedFramerates =
         Camera1Enumerator.convertFramerates(parameters.getSupportedPreviewFpsRange());
     Logging.d(TAG, "Available fps ranges: " + supportedFramerates);
diff --git a/sdk/android/src/java/org/webrtc/EglBase14Impl.java b/sdk/android/src/java/org/webrtc/EglBase14Impl.java
index 202b0da..a3b8a78 100644
--- a/sdk/android/src/java/org/webrtc/EglBase14Impl.java
+++ b/sdk/android/src/java/org/webrtc/EglBase14Impl.java
@@ -69,7 +69,7 @@
   }
 
   // Create a new context with the specified config type, sharing data with sharedContext.
-  // |sharedContext| may be null.
+  // `sharedContext` may be null.
   public EglBase14Impl(EGLContext sharedContext, int[] configAttributes) {
     eglDisplay = getEglDisplay();
     eglConfig = getEglConfig(eglDisplay, configAttributes);
diff --git a/sdk/android/src/java/org/webrtc/audio/WebRtcAudioEffects.java b/sdk/android/src/java/org/webrtc/audio/WebRtcAudioEffects.java
index 6b69b26..7444df3 100644
--- a/sdk/android/src/java/org/webrtc/audio/WebRtcAudioEffects.java
+++ b/sdk/android/src/java/org/webrtc/audio/WebRtcAudioEffects.java
@@ -22,7 +22,7 @@
 // This class wraps control of three different platform effects. Supported
 // effects are: AcousticEchoCanceler (AEC) and NoiseSuppressor (NS).
 // Calling enable() will active all effects that are
-// supported by the device if the corresponding |shouldEnableXXX| member is set.
+// supported by the device if the corresponding `shouldEnableXXX` member is set.
 class WebRtcAudioEffects {
   private static final boolean DEBUG = false;
 
@@ -71,7 +71,7 @@
   }
 
   // Call this method to enable or disable the platform AEC. It modifies
-  // |shouldEnableAec| which is used in enable() where the actual state
+  // `shouldEnableAec` which is used in enable() where the actual state
   // of the AEC effect is modified. Returns true if HW AEC is supported and
   // false otherwise.
   public boolean setAEC(boolean enable) {
@@ -90,7 +90,7 @@
   }
 
   // Call this method to enable or disable the platform NS. It modifies
-  // |shouldEnableNs| which is used in enable() where the actual state
+  // `shouldEnableNs` which is used in enable() where the actual state
   // of the NS effect is modified. Returns true if HW NS is supported and
   // false otherwise.
   public boolean setNS(boolean enable) {
@@ -180,7 +180,7 @@
     }
   }
 
-  // Returns true for effect types in |type| that are of "VoIP" types:
+  // Returns true for effect types in `type` that are of "VoIP" types:
   // Acoustic Echo Canceler (AEC) or Automatic Gain Control (AGC) or
   // Noise Suppressor (NS). Note that, an extra check for support is needed
   // in each comparison since some devices includes effects in the
@@ -217,7 +217,7 @@
   }
 
   // Returns true if an effect of the specified type is available. Functionally
-  // equivalent to (NoiseSuppressor|AutomaticGainControl|...).isAvailable(), but
+  // equivalent to (NoiseSuppressor`AutomaticGainControl`...).isAvailable(), but
   // faster as it avoids the expensive OS call to enumerate effects.
   private static boolean isEffectTypeAvailable(UUID effectType, UUID blockListedUuid) {
     Descriptor[] effects = getAvailableEffects();
diff --git a/sdk/android/src/java/org/webrtc/audio/WebRtcAudioRecord.java b/sdk/android/src/java/org/webrtc/audio/WebRtcAudioRecord.java
index 7346959..dbbcdef 100644
--- a/sdk/android/src/java/org/webrtc/audio/WebRtcAudioRecord.java
+++ b/sdk/android/src/java/org/webrtc/audio/WebRtcAudioRecord.java
@@ -237,7 +237,7 @@
 
   // Returns true if verifyAudioConfig() succeeds. This value is set after a specific delay when
   // startRecording() has been called. Hence, should preferably be called in combination with
-  // stopRecording() to ensure that it has been set properly. |isAudioConfigVerified| is
+  // stopRecording() to ensure that it has been set properly. `isAudioConfigVerified` is
   // enabled in WebRtcAudioRecord to ensure that the returned value is valid.
   @CalledByNative
   boolean isAudioSourceMatchingRecordingSession() {
@@ -491,7 +491,7 @@
       long nativeAudioRecordJni, ByteBuffer byteBuffer);
   private native void nativeDataIsRecorded(long nativeAudioRecordJni, int bytes);
 
-  // Sets all recorded samples to zero if |mute| is true, i.e., ensures that
+  // Sets all recorded samples to zero if `mute` is true, i.e., ensures that
   // the microphone is muted.
   public void setMicrophoneMute(boolean mute) {
     Logging.w(TAG, "setMicrophoneMute(" + mute + ")");
diff --git a/sdk/android/src/java/org/webrtc/audio/WebRtcAudioTrack.java b/sdk/android/src/java/org/webrtc/audio/WebRtcAudioTrack.java
index 5e1201d..2447fbce 100644
--- a/sdk/android/src/java/org/webrtc/audio/WebRtcAudioTrack.java
+++ b/sdk/android/src/java/org/webrtc/audio/WebRtcAudioTrack.java
@@ -76,7 +76,7 @@
   private @Nullable AudioTrackThread audioThread;
   private final VolumeLogger volumeLogger;
 
-  // Samples to be played are replaced by zeros if |speakerMute| is set to true.
+  // Samples to be played are replaced by zeros if `speakerMute` is set to true.
   // Can be used to ensure that the speaker is fully muted.
   private volatile boolean speakerMute;
   private byte[] emptyBytes;
@@ -218,9 +218,9 @@
     Logging.d(TAG, "minBufferSizeInBytes: " + minBufferSizeInBytes);
     // For the streaming mode, data must be written to the audio sink in
     // chunks of size (given by byteBuffer.capacity()) less than or equal
-    // to the total buffer size |minBufferSizeInBytes|. But, we have seen
+    // to the total buffer size `minBufferSizeInBytes`. But, we have seen
     // reports of "getMinBufferSize(): error querying hardware". Hence, it
-    // can happen that |minBufferSizeInBytes| contains an invalid value.
+    // can happen that `minBufferSizeInBytes` contains an invalid value.
     if (minBufferSizeInBytes < byteBuffer.capacity()) {
       reportWebRtcAudioTrackInitError("AudioTrack.getMinBufferSize returns an invalid value.");
       return -1;
@@ -559,7 +559,7 @@
       long nativeAudioTrackJni, ByteBuffer byteBuffer);
   private static native void nativeGetPlayoutData(long nativeAudioTrackJni, int bytes);
 
-  // Sets all samples to be played out to zero if |mute| is true, i.e.,
+  // Sets all samples to be played out to zero if `mute` is true, i.e.,
   // ensures that the speaker is muted.
   public void setSpeakerMute(boolean mute) {
     Logging.w(TAG, "setSpeakerMute(" + mute + ")");
diff --git a/sdk/android/src/jni/android_metrics.cc b/sdk/android/src/jni/android_metrics.cc
index 2c998d4..e021ef4 100644
--- a/sdk/android/src/jni/android_metrics.cc
+++ b/sdk/android/src/jni/android_metrics.cc
@@ -31,14 +31,14 @@
   std::map<std::string, std::unique_ptr<metrics::SampleInfo>> histograms;
   metrics::GetAndReset(&histograms);
   for (const auto& kv : histograms) {
-    // Create and add samples to |HistogramInfo|.
+    // Create and add samples to `HistogramInfo`.
     ScopedJavaLocalRef<jobject> j_info = Java_HistogramInfo_Constructor(
         jni, kv.second->min, kv.second->max,
         static_cast<int>(kv.second->bucket_count));
     for (const auto& sample : kv.second->samples) {
       Java_HistogramInfo_addSample(jni, j_info, sample.first, sample.second);
     }
-    // Add |HistogramInfo| to |Metrics|.
+    // Add `HistogramInfo` to `Metrics`.
     ScopedJavaLocalRef<jstring> j_name = NativeToJavaString(jni, kv.first);
     Java_Metrics_add(jni, j_metrics, j_name, j_info);
   }
diff --git a/sdk/android/src/jni/android_network_monitor.cc b/sdk/android/src/jni/android_network_monitor.cc
index 686f94e..088ca47 100644
--- a/sdk/android/src/jni/android_network_monitor.cc
+++ b/sdk/android/src/jni/android_network_monitor.cc
@@ -376,7 +376,7 @@
     rv = lollipopSetNetworkForSocket(*network_handle, socket_fd);
   }
 
-  // If |network| has since disconnected, |rv| will be ENONET. Surface this as
+  // If `network` has since disconnected, `rv` will be ENONET. Surface this as
   // ERR_NETWORK_CHANGED, rather than MapSystemError(ENONET) which gives back
   // the less descriptive ERR_FAILED.
   if (rv == 0) {
diff --git a/sdk/android/src/jni/android_network_monitor.h b/sdk/android/src/jni/android_network_monitor.h
index 423ae3a..01e5fb7 100644
--- a/sdk/android/src/jni/android_network_monitor.h
+++ b/sdk/android/src/jni/android_network_monitor.h
@@ -76,7 +76,7 @@
   void Start() override;
   void Stop() override;
 
-  // Does |this| NetworkMonitorInterface implement BindSocketToNetwork?
+  // Does `this` NetworkMonitorInterface implement BindSocketToNetwork?
   // Only Android returns true.
   virtual bool SupportsBindSocketToNetwork() const override { return true; }
 
diff --git a/sdk/android/src/jni/audio_device/aaudio_player.cc b/sdk/android/src/jni/audio_device/aaudio_player.cc
index 4e1c7e33..da68b83 100644
--- a/sdk/android/src/jni/audio_device/aaudio_player.cc
+++ b/sdk/android/src/jni/audio_device/aaudio_player.cc
@@ -200,7 +200,7 @@
   }
 
   // Read audio data from the WebRTC source using the FineAudioBuffer object
-  // and write that data into |audio_data| to be played out by AAudio.
+  // and write that data into `audio_data` to be played out by AAudio.
   // Prime output with zeros during a short initial phase to avoid distortion.
   // TODO(henrika): do more work to figure out of if the initial forced silence
   // period is really needed.
diff --git a/sdk/android/src/jni/audio_device/aaudio_player.h b/sdk/android/src/jni/audio_device/aaudio_player.h
index 5f9a9ea..9e775ec 100644
--- a/sdk/android/src/jni/audio_device/aaudio_player.h
+++ b/sdk/android/src/jni/audio_device/aaudio_player.h
@@ -81,8 +81,8 @@
  protected:
   // AAudioObserverInterface implementation.
 
-  // For an output stream, this function should render and write |num_frames|
-  // of data in the streams current data format to the |audio_data| buffer.
+  // For an output stream, this function should render and write `num_frames`
+  // of data in the streams current data format to the `audio_data` buffer.
   // Called on a real-time thread owned by AAudio.
   aaudio_data_callback_result_t OnDataCallback(void* audio_data,
                                                int32_t num_frames) override;
diff --git a/sdk/android/src/jni/audio_device/aaudio_recorder.cc b/sdk/android/src/jni/audio_device/aaudio_recorder.cc
index 34b14f4..8a4c353 100644
--- a/sdk/android/src/jni/audio_device/aaudio_recorder.cc
+++ b/sdk/android/src/jni/audio_device/aaudio_recorder.cc
@@ -157,7 +157,7 @@
   }
 }
 
-// Read and process |num_frames| of data from the |audio_data| buffer.
+// Read and process `num_frames` of data from the `audio_data` buffer.
 // TODO(henrika): possibly add trace here to be included in systrace.
 // See https://developer.android.com/studio/profile/systrace-commandline.html.
 aaudio_data_callback_result_t AAudioRecorder::OnDataCallback(
@@ -191,7 +191,7 @@
     RTC_DLOG(INFO) << "input latency: " << latency_millis_
                    << ", num_frames: " << num_frames;
   }
-  // Copy recorded audio in |audio_data| to the WebRTC sink using the
+  // Copy recorded audio in `audio_data` to the WebRTC sink using the
   // FineAudioBuffer object.
   fine_audio_buffer_->DeliverRecordedData(
       rtc::MakeArrayView(static_cast<const int16_t*>(audio_data),
diff --git a/sdk/android/src/jni/audio_device/aaudio_recorder.h b/sdk/android/src/jni/audio_device/aaudio_recorder.h
index 2b6aa03..a911577 100644
--- a/sdk/android/src/jni/audio_device/aaudio_recorder.h
+++ b/sdk/android/src/jni/audio_device/aaudio_recorder.h
@@ -72,8 +72,8 @@
  protected:
   // AAudioObserverInterface implementation.
 
-  // For an input stream, this function should read |num_frames| of recorded
-  // data, in the stream's current data format, from the |audio_data| buffer.
+  // For an input stream, this function should read `num_frames` of recorded
+  // data, in the stream's current data format, from the `audio_data` buffer.
   // Called on a real-time thread owned by AAudio.
   aaudio_data_callback_result_t OnDataCallback(void* audio_data,
                                                int32_t num_frames) override;
diff --git a/sdk/android/src/jni/audio_device/audio_record_jni.cc b/sdk/android/src/jni/audio_device/audio_record_jni.cc
index 15c290c..2739522 100644
--- a/sdk/android/src/jni/audio_device/audio_record_jni.cc
+++ b/sdk/android/src/jni/audio_device/audio_record_jni.cc
@@ -253,8 +253,8 @@
   audio_device_buffer_->SetRecordedBuffer(direct_buffer_address_,
                                           frames_per_buffer_);
   // We provide one (combined) fixed delay estimate for the APM and use the
-  // |playDelayMs| parameter only. Components like the AEC only sees the sum
-  // of |playDelayMs| and |recDelayMs|, hence the distributions does not matter.
+  // `playDelayMs` parameter only. Components like the AEC only sees the sum
+  // of `playDelayMs` and `recDelayMs`, hence the distributions does not matter.
   audio_device_buffer_->SetVQEData(total_delay_ms_, 0);
   if (audio_device_buffer_->DeliverRecordedData() == -1) {
     RTC_LOG(INFO) << "AudioDeviceBuffer::DeliverRecordedData failed";
diff --git a/sdk/android/src/jni/audio_device/audio_record_jni.h b/sdk/android/src/jni/audio_device/audio_record_jni.h
index 800d235..1ff62f8 100644
--- a/sdk/android/src/jni/audio_device/audio_record_jni.h
+++ b/sdk/android/src/jni/audio_device/audio_record_jni.h
@@ -74,8 +74,8 @@
   int32_t EnableBuiltInNS(bool enable) override;
 
   // Called from Java side so we can cache the address of the Java-manged
-  // |byte_buffer| in |direct_buffer_address_|. The size of the buffer
-  // is also stored in |direct_buffer_capacity_in_bytes_|.
+  // `byte_buffer` in `direct_buffer_address_`. The size of the buffer
+  // is also stored in `direct_buffer_capacity_in_bytes_`.
   // This method will be called by the WebRtcAudioRecord constructor, i.e.,
   // on the same thread that this object is created on.
   void CacheDirectBufferAddress(JNIEnv* env,
@@ -83,8 +83,8 @@
                                 const JavaParamRef<jobject>& byte_buffer);
 
   // Called periodically by the Java based WebRtcAudioRecord object when
-  // recording has started. Each call indicates that there are |length| new
-  // bytes recorded in the memory area |direct_buffer_address_| and it is
+  // recording has started. Each call indicates that there are `length` new
+  // bytes recorded in the memory area `direct_buffer_address_` and it is
   // now time to send these to the consumer.
   // This method is called on a high-priority thread from Java. The name of
   // the thread is 'AudioRecordThread'.
@@ -111,10 +111,10 @@
   // possible values. See audio_common.h for details.
   const int total_delay_ms_;
 
-  // Cached copy of address to direct audio buffer owned by |j_audio_record_|.
+  // Cached copy of address to direct audio buffer owned by `j_audio_record_`.
   void* direct_buffer_address_;
 
-  // Number of bytes in the direct audio buffer owned by |j_audio_record_|.
+  // Number of bytes in the direct audio buffer owned by `j_audio_record_`.
   size_t direct_buffer_capacity_in_bytes_;
 
   // Number audio frames per audio buffer. Each audio frame corresponds to
diff --git a/sdk/android/src/jni/audio_device/audio_track_jni.h b/sdk/android/src/jni/audio_device/audio_track_jni.h
index cc4d8f5..5ca907c 100644
--- a/sdk/android/src/jni/audio_device/audio_track_jni.h
+++ b/sdk/android/src/jni/audio_device/audio_track_jni.h
@@ -71,14 +71,14 @@
   void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override;
 
   // Called from Java side so we can cache the address of the Java-manged
-  // |byte_buffer| in |direct_buffer_address_|. The size of the buffer
-  // is also stored in |direct_buffer_capacity_in_bytes_|.
+  // `byte_buffer` in `direct_buffer_address_`. The size of the buffer
+  // is also stored in `direct_buffer_capacity_in_bytes_`.
   // Called on the same thread as the creating thread.
   void CacheDirectBufferAddress(JNIEnv* env,
                                 const JavaParamRef<jobject>& byte_buffer);
   // Called periodically by the Java based WebRtcAudioTrack object when
-  // playout has started. Each call indicates that |length| new bytes should
-  // be written to the memory area |direct_buffer_address_| for playout.
+  // playout has started. Each call indicates that `length` new bytes should
+  // be written to the memory area `direct_buffer_address_` for playout.
   // This method is called on a high-priority thread from Java. The name of
   // the thread is 'AudioTrackThread'.
   void GetPlayoutData(JNIEnv* env, size_t length);
@@ -99,10 +99,10 @@
   // AudioManager.
   const AudioParameters audio_parameters_;
 
-  // Cached copy of address to direct audio buffer owned by |j_audio_track_|.
+  // Cached copy of address to direct audio buffer owned by `j_audio_track_`.
   void* direct_buffer_address_;
 
-  // Number of bytes in the direct audio buffer owned by |j_audio_track_|.
+  // Number of bytes in the direct audio buffer owned by `j_audio_track_`.
   size_t direct_buffer_capacity_in_bytes_;
 
   // Number of audio frames per audio buffer. Each audio frame corresponds to
diff --git a/sdk/android/src/jni/audio_device/opensles_player.h b/sdk/android/src/jni/audio_device/opensles_player.h
index 7388a93..8a22432 100644
--- a/sdk/android/src/jni/audio_device/opensles_player.h
+++ b/sdk/android/src/jni/audio_device/opensles_player.h
@@ -95,7 +95,7 @@
   // Reads audio data in PCM format using the AudioDeviceBuffer.
   // Can be called both on the main thread (during Start()) and from the
   // internal audio thread while output streaming is active.
-  // If the |silence| flag is set, the audio is filled with zeros instead of
+  // If the `silence` flag is set, the audio is filled with zeros instead of
   // asking the WebRTC layer for real audio data. This procedure is also known
   // as audio priming.
   void EnqueuePlayoutData(bool silence);
@@ -106,7 +106,7 @@
 
   // Obtaines the SL Engine Interface from the existing global Engine object.
   // The interface exposes creation methods of all the OpenSL ES object types.
-  // This method defines the |engine_| member variable.
+  // This method defines the `engine_` member variable.
   bool ObtainEngineInterface();
 
   // Creates/destroys the output mix object.
diff --git a/sdk/android/src/jni/audio_device/opensles_recorder.h b/sdk/android/src/jni/audio_device/opensles_recorder.h
index ff324f3..93c4e4e 100644
--- a/sdk/android/src/jni/audio_device/opensles_recorder.h
+++ b/sdk/android/src/jni/audio_device/opensles_recorder.h
@@ -88,7 +88,7 @@
  private:
   // Obtaines the SL Engine Interface from the existing global Engine object.
   // The interface exposes creation methods of all the OpenSL ES object types.
-  // This method defines the |engine_| member variable.
+  // This method defines the `engine_` member variable.
   bool ObtainEngineInterface();
 
   // Creates/destroys the audio recorder and the simple-buffer queue object.
@@ -109,7 +109,7 @@
   // Wraps calls to SLAndroidSimpleBufferQueueState::Enqueue() and it can be
   // called both on the main thread (but before recording has started) and from
   // the internal audio thread while input streaming is active. It uses
-  // |simple_buffer_queue_| but no lock is needed since the initial calls from
+  // `simple_buffer_queue_` but no lock is needed since the initial calls from
   // the main thread and the native callback thread are mutually exclusive.
   bool EnqueueAudioBuffer();
 
diff --git a/sdk/android/src/jni/jni_generator_helper.cc b/sdk/android/src/jni/jni_generator_helper.cc
index d26c992..8ddcdff 100644
--- a/sdk/android/src/jni/jni_generator_helper.cc
+++ b/sdk/android/src/jni/jni_generator_helper.cc
@@ -15,7 +15,7 @@
 
 namespace webrtc {
 
-// If |atomic_class_id| set, it'll return immediately. Otherwise, it will look
+// If `atomic_class_id` set, it'll return immediately. Otherwise, it will look
 // up the class and store it. If there's a race, we take care to only store one
 // global reference (and the duplicated effort will happen only once).
 jclass LazyGetClass(JNIEnv* env,
@@ -29,18 +29,18 @@
   jclass cas_result = nullptr;
   if (std::atomic_compare_exchange_strong(atomic_class_id, &cas_result,
                                           clazz.obj())) {
-    // We sucessfully stored |clazz| in |atomic_class_id|, so we are
+    // We sucessfully stored `clazz` in `atomic_class_id`, so we are
     // intentionally leaking the global ref since it's now stored there.
     return clazz.Release();
   } else {
     // Some other thread came before us and stored a global pointer in
-    // |atomic_class_id|. Relase our global ref and return the ref from the
+    // `atomic_class_id`. Relase our global ref and return the ref from the
     // other thread.
     return cas_result;
   }
 }
 
-// If |atomic_method_id| set, it'll return immediately. Otherwise, it will look
+// If `atomic_method_id` set, it'll return immediately. Otherwise, it will look
 // up the method id and store it. If there's a race, it's ok since the values
 // are the same (and the duplicated effort will happen only once).
 template <MethodID::Type type>
diff --git a/sdk/android/src/jni/jni_generator_helper.h b/sdk/android/src/jni/jni_generator_helper.h
index a5497e1..23695ca 100644
--- a/sdk/android/src/jni/jni_generator_helper.h
+++ b/sdk/android/src/jni/jni_generator_helper.h
@@ -44,11 +44,11 @@
 
 namespace webrtc {
 
-// This function will initialize |atomic_class_id| to contain a global ref to
+// This function will initialize `atomic_class_id` to contain a global ref to
 // the given class, and will return that ref on subsequent calls. The caller is
-// responsible to zero-initialize |atomic_class_id|. It's fine to
+// responsible to zero-initialize `atomic_class_id`. It's fine to
 // simultaneously call this on multiple threads referencing the same
-// |atomic_method_id|.
+// `atomic_method_id`.
 jclass LazyGetClass(JNIEnv* env,
                     const char* class_name,
                     std::atomic<jclass>* atomic_class_id);
@@ -61,11 +61,11 @@
     TYPE_INSTANCE,
   };
 
-  // This function will initialize |atomic_method_id| to contain a ref to
+  // This function will initialize `atomic_method_id` to contain a ref to
   // the given method, and will return that ref on subsequent calls. The caller
-  // is responsible to zero-initialize |atomic_method_id|. It's fine to
+  // is responsible to zero-initialize `atomic_method_id`. It's fine to
   // simultaneously call this on multiple threads referencing the same
-  // |atomic_method_id|.
+  // `atomic_method_id`.
   template <Type type>
   static jmethodID LazyGet(JNIEnv* env,
                            jclass clazz,
@@ -151,7 +151,7 @@
             const char* jni_signature,
             std::atomic<jmethodID>* atomic_method_id) {
     base.Init<type>(env, clazz, method_name, jni_signature, atomic_method_id);
-    // Reset |pc| to correct caller.
+    // Reset `pc` to correct caller.
     base.pc = reinterpret_cast<uintptr_t>(__builtin_return_address(0));
   }
 
diff --git a/sdk/android/src/jni/jvm.cc b/sdk/android/src/jni/jvm.cc
index eaa4f67..4cf1aa5 100644
--- a/sdk/android/src/jni/jvm.cc
+++ b/sdk/android/src/jni/jvm.cc
@@ -27,7 +27,7 @@
 
 static pthread_once_t g_jni_ptr_once = PTHREAD_ONCE_INIT;
 
-// Key for per-thread JNIEnv* data.  Non-NULL in threads attached to |g_jvm| by
+// Key for per-thread JNIEnv* data.  Non-NULL in threads attached to `g_jvm` by
 // AttachCurrentThreadIfNeeded(), NULL in unattached threads and threads that
 // were attached by the JVM because of a Java->native call.
 static pthread_key_t g_jni_ptr;
@@ -48,7 +48,7 @@
 }
 
 static void ThreadDestructor(void* prev_jni_ptr) {
-  // This function only runs on threads where |g_jni_ptr| is non-NULL, meaning
+  // This function only runs on threads where `g_jni_ptr` is non-NULL, meaning
   // we were responsible for originally attaching the thread, so are responsible
   // for detaching it now.  However, because some JVM implementations (notably
   // Oracle's http://goo.gl/eHApYT) also use the pthread_key_create mechanism,
@@ -102,7 +102,7 @@
   return std::string(name);
 }
 
-// Return a |JNIEnv*| usable on this thread.  Attaches to |g_jvm| if necessary.
+// Return a |JNIEnv*| usable on this thread.  Attaches to `g_jvm` if necessary.
 JNIEnv* AttachCurrentThreadIfNeeded() {
   JNIEnv* jni = GetEnv();
   if (jni)
diff --git a/sdk/android/src/jni/jvm.h b/sdk/android/src/jni/jvm.h
index 069a19a..296a7fe 100644
--- a/sdk/android/src/jni/jvm.h
+++ b/sdk/android/src/jni/jvm.h
@@ -23,7 +23,7 @@
 
 JavaVM* GetJVM();
 
-// Return a |JNIEnv*| usable on this thread.  Attaches to |g_jvm| if necessary.
+// Return a |JNIEnv*| usable on this thread.  Attaches to `g_jvm` if necessary.
 JNIEnv* AttachCurrentThreadIfNeeded();
 
 }  // namespace jni
diff --git a/sdk/android/src/jni/pc/media_stream.cc b/sdk/android/src/jni/pc/media_stream.cc
index 4779793..019f105 100644
--- a/sdk/android/src/jni/pc/media_stream.cc
+++ b/sdk/android/src/jni/pc/media_stream.cc
@@ -49,7 +49,7 @@
   observer_->SignalVideoTrackAdded.connect(
       this, &JavaMediaStream::OnVideoTrackAddedToStream);
 
-  // |j_media_stream| holds one reference. Corresponding Release() is in
+  // `j_media_stream` holds one reference. Corresponding Release() is in
   // MediaStream_free, triggered by MediaStream.dispose().
   media_stream.release();
 }
diff --git a/sdk/android/src/jni/pc/peer_connection.cc b/sdk/android/src/jni/pc/peer_connection.cc
index 09b8f33..0b0d408 100644
--- a/sdk/android/src/jni/pc/peer_connection.cc
+++ b/sdk/android/src/jni/pc/peer_connection.cc
@@ -499,7 +499,7 @@
     const JavaParamRef<jobject>& j_pc) {
   PeerConnectionInterface* pc = ExtractNativePC(jni, j_pc);
   // It's only safe to operate on SessionDescriptionInterface on the
-  // signaling thread, but |jni| may only be used on the current thread, so we
+  // signaling thread, but `jni` may only be used on the current thread, so we
   // must do this odd dance.
   std::string sdp;
   std::string type;
@@ -518,7 +518,7 @@
     const JavaParamRef<jobject>& j_pc) {
   PeerConnectionInterface* pc = ExtractNativePC(jni, j_pc);
   // It's only safe to operate on SessionDescriptionInterface on the
-  // signaling thread, but |jni| may only be used on the current thread, so we
+  // signaling thread, but `jni` may only be used on the current thread, so we
   // must do this odd dance.
   std::string sdp;
   std::string type;
diff --git a/sdk/android/src/jni/pc/peer_connection_factory.cc b/sdk/android/src/jni/pc/peer_connection_factory.cc
index 53e715b..a12d5c1 100644
--- a/sdk/android/src/jni/pc/peer_connection_factory.cc
+++ b/sdk/android/src/jni/pc/peer_connection_factory.cc
@@ -242,9 +242,9 @@
 }
 
 // Following parameters are optional:
-// |audio_device_module|, |jencoder_factory|, |jdecoder_factory|,
-// |audio_processor|, |fec_controller_factory|,
-// |network_state_predictor_factory|, |neteq_factory|.
+// `audio_device_module`, `jencoder_factory`, `jdecoder_factory`,
+// `audio_processor`, `fec_controller_factory`,
+// `network_state_predictor_factory`, `neteq_factory`.
 ScopedJavaLocalRef<jobject> CreatePeerConnectionFactoryForJava(
     JNIEnv* jni,
     const JavaParamRef<jobject>& jcontext,
diff --git a/sdk/android/src/jni/pc/peer_connection_factory.h b/sdk/android/src/jni/pc/peer_connection_factory.h
index 5bfdb7a..33cb978 100644
--- a/sdk/android/src/jni/pc/peer_connection_factory.h
+++ b/sdk/android/src/jni/pc/peer_connection_factory.h
@@ -18,7 +18,7 @@
 namespace webrtc {
 namespace jni {
 
-// Creates java PeerConnectionFactory with specified |pcf|.
+// Creates java PeerConnectionFactory with specified `pcf`.
 jobject NativeToJavaPeerConnectionFactory(
     JNIEnv* jni,
     rtc::scoped_refptr<webrtc::PeerConnectionFactoryInterface> pcf,
diff --git a/sdk/android/src/jni/pc/rtp_receiver.h b/sdk/android/src/jni/pc/rtp_receiver.h
index dd8ba0e..ccef44b 100644
--- a/sdk/android/src/jni/pc/rtp_receiver.h
+++ b/sdk/android/src/jni/pc/rtp_receiver.h
@@ -23,7 +23,7 @@
     JNIEnv* env,
     rtc::scoped_refptr<RtpReceiverInterface> receiver);
 
-// Takes ownership of the passed |j_receiver| and stores it as a global
+// Takes ownership of the passed `j_receiver` and stores it as a global
 // reference. Will call dispose() in the dtor.
 class JavaRtpReceiverGlobalOwner {
  public:
diff --git a/sdk/android/src/jni/pc/rtp_transceiver.h b/sdk/android/src/jni/pc/rtp_transceiver.h
index e96276c..5b2d012 100644
--- a/sdk/android/src/jni/pc/rtp_transceiver.h
+++ b/sdk/android/src/jni/pc/rtp_transceiver.h
@@ -27,7 +27,7 @@
     JNIEnv* env,
     rtc::scoped_refptr<RtpTransceiverInterface> transceiver);
 
-// This takes ownership of the of the |j_transceiver| and stores it as a global
+// This takes ownership of the of the `j_transceiver` and stores it as a global
 // reference. This calls the Java Transceiver's dispose() method with the dtor.
 class JavaRtpTransceiverGlobalOwner {
  public:
diff --git a/sdk/android/src/jni/video_encoder_wrapper.cc b/sdk/android/src/jni/video_encoder_wrapper.cc
index d292a85..cb5a7f4 100644
--- a/sdk/android/src/jni/video_encoder_wrapper.cc
+++ b/sdk/android/src/jni/video_encoder_wrapper.cc
@@ -267,7 +267,7 @@
     frame_extra_infos_.pop_front();
   }
 
-  // This is a bit subtle. The |frame| variable from the lambda capture is
+  // This is a bit subtle. The `frame` variable from the lambda capture is
   // const. Which implies that (i) we need to make a copy to be able to
   // write to the metadata, and (ii) we should avoid using the .data()
   // method (including implicit conversion to ArrayView) on the non-const
diff --git a/sdk/android/src/jni/video_frame.cc b/sdk/android/src/jni/video_frame.cc
index 35d04d8..dd02731 100644
--- a/sdk/android/src/jni/video_frame.cc
+++ b/sdk/android/src/jni/video_frame.cc
@@ -41,8 +41,8 @@
 
   const ScopedJavaGlobalRef<jobject>& video_frame_buffer() const;
 
-  // Crops a region defined by |crop_x|, |crop_y|, |crop_width| and
-  // |crop_height|. Scales it to size |scale_width| x |scale_height|.
+  // Crops a region defined by `crop_x`, `crop_y`, `crop_width` and
+  // `crop_height`. Scales it to size `scale_width` x `scale_height`.
   rtc::scoped_refptr<VideoFrameBuffer> CropAndScale(int crop_x,
                                                     int crop_y,
                                                     int crop_width,
diff --git a/sdk/media_constraints.cc b/sdk/media_constraints.cc
index 6f4901c..2236f50 100644
--- a/sdk/media_constraints.cc
+++ b/sdk/media_constraints.cc
@@ -17,8 +17,8 @@
 namespace {
 
 // Find the highest-priority instance of the T-valued constraint named by
-// |key| and return its value as |value|. |constraints| can be null.
-// If |mandatory_constraints| is non-null, it is incremented if the key appears
+// `key` and return its value as `value`. `constraints` can be null.
+// If `mandatory_constraints` is non-null, it is incremented if the key appears
 // among the mandatory constraints.
 // Returns true if the key was found and has a valid value for type T.
 // If the key appears multiple times as an optional constraint, appearances
@@ -135,8 +135,8 @@
 
 const char MediaConstraints::kNumSimulcastLayers[] = "googNumSimulcastLayers";
 
-// Set |value| to the value associated with the first appearance of |key|, or
-// return false if |key| is not found.
+// Set `value` to the value associated with the first appearance of `key`, or
+// return false if `key` is not found.
 bool MediaConstraints::Constraints::FindFirst(const std::string& key,
                                               std::string* value) const {
   for (Constraints::const_iterator iter = begin(); iter != end(); ++iter) {
@@ -209,7 +209,7 @@
   ConstraintToOptional<std::string>(
       constraints, MediaConstraints::kAudioNetworkAdaptorConfig,
       &options->audio_network_adaptor_config);
-  // When |kAudioNetworkAdaptorConfig| is defined, it both means that audio
+  // When `kAudioNetworkAdaptorConfig` is defined, it both means that audio
   // network adaptor is desired, and provides the config string.
   if (options->audio_network_adaptor_config) {
     options->audio_network_adaptor = true;
diff --git a/sdk/objc/api/peerconnection/RTCAudioSource.h b/sdk/objc/api/peerconnection/RTCAudioSource.h
index 9f78dcd..9272fdf 100644
--- a/sdk/objc/api/peerconnection/RTCAudioSource.h
+++ b/sdk/objc/api/peerconnection/RTCAudioSource.h
@@ -20,7 +20,7 @@
 
 - (instancetype)init NS_UNAVAILABLE;
 
-// Sets the volume for the RTCMediaSource. |volume| is a gain value in the range
+// Sets the volume for the RTCMediaSource. `volume` is a gain value in the range
 // [0, 10].
 // Temporary fix to be able to modify volume of remote audio tracks.
 // TODO(kthelgason): Property stays here temporarily until a proper volume-api
diff --git a/sdk/objc/api/peerconnection/RTCConfiguration.h b/sdk/objc/api/peerconnection/RTCConfiguration.h
index 02461b0..4356b8d 100644
--- a/sdk/objc/api/peerconnection/RTCConfiguration.h
+++ b/sdk/objc/api/peerconnection/RTCConfiguration.h
@@ -84,7 +84,7 @@
 @property(nonatomic, nullable) RTC_OBJC_TYPE(RTCCertificate) * certificate;
 
 /** Which candidates the ICE agent is allowed to use. The W3C calls it
- * |iceTransportPolicy|, while in C++ it is called |type|. */
+ * `iceTransportPolicy`, while in C++ it is called `type`. */
 @property(nonatomic, assign) RTCIceTransportPolicy iceTransportPolicy;
 
 /** The media-bundling policy to use when gathering ICE candidates. */
@@ -144,7 +144,7 @@
  */
 @property(nonatomic, assign) BOOL shouldPresumeWritableWhenFullyRelayed;
 
-/* This flag is only effective when |continualGatheringPolicy| is
+/* This flag is only effective when `continualGatheringPolicy` is
  * RTCContinualGatheringPolicyGatherContinually.
  *
  * If YES, after the ICE transport type is changed such that new types of
diff --git a/sdk/objc/api/peerconnection/RTCDataChannel.h b/sdk/objc/api/peerconnection/RTCDataChannel.h
index 2d0661f..6f4ef37 100644
--- a/sdk/objc/api/peerconnection/RTCDataChannel.h
+++ b/sdk/objc/api/peerconnection/RTCDataChannel.h
@@ -21,13 +21,13 @@
 /** NSData representation of the underlying buffer. */
 @property(nonatomic, readonly) NSData *data;
 
-/** Indicates whether |data| contains UTF-8 or binary data. */
+/** Indicates whether `data` contains UTF-8 or binary data. */
 @property(nonatomic, readonly) BOOL isBinary;
 
 - (instancetype)init NS_UNAVAILABLE;
 
 /**
- * Initialize an RTCDataBuffer from NSData. |isBinary| indicates whether |data|
+ * Initialize an RTCDataBuffer from NSData. `isBinary` indicates whether `data`
  * contains UTF-8 or binary data.
  */
 - (instancetype)initWithData:(NSData *)data isBinary:(BOOL)isBinary;
@@ -47,7 +47,7 @@
     didReceiveMessageWithBuffer:(RTC_OBJC_TYPE(RTCDataBuffer) *)buffer;
 
 @optional
-/** The data channel's |bufferedAmount| changed. */
+/** The data channel's `bufferedAmount` changed. */
 - (void)dataChannel:(RTC_OBJC_TYPE(RTCDataChannel) *)dataChannel
     didChangeBufferedAmount:(uint64_t)amount;
 
@@ -124,7 +124,7 @@
 /** Closes the data channel. */
 - (void)close;
 
-/** Attempt to send |data| on this data channel's underlying data transport. */
+/** Attempt to send `data` on this data channel's underlying data transport. */
 - (BOOL)sendData:(RTC_OBJC_TYPE(RTCDataBuffer) *)data;
 
 @end
diff --git a/sdk/objc/api/peerconnection/RTCFileLogger.h b/sdk/objc/api/peerconnection/RTCFileLogger.h
index 853e673..cb397c9 100644
--- a/sdk/objc/api/peerconnection/RTCFileLogger.h
+++ b/sdk/objc/api/peerconnection/RTCFileLogger.h
@@ -43,7 +43,7 @@
 // kRTCFileLoggerTypeCall.
 @property(nonatomic, readonly) RTCFileLoggerRotationType rotationType;
 
-// Disables buffering disk writes. Should be set before |start|. Buffering
+// Disables buffering disk writes. Should be set before `start`. Buffering
 // is enabled by default for performance.
 @property(nonatomic, assign) BOOL shouldDisableBuffering;
 
diff --git a/sdk/objc/api/peerconnection/RTCIceServer.h b/sdk/objc/api/peerconnection/RTCIceServer.h
index dd66c61..7ddcbc1 100644
--- a/sdk/objc/api/peerconnection/RTCIceServer.h
+++ b/sdk/objc/api/peerconnection/RTCIceServer.h
@@ -37,9 +37,9 @@
 @property(nonatomic, readonly) RTCTlsCertPolicy tlsCertPolicy;
 
 /**
-  If the URIs in |urls| only contain IP addresses, this field can be used
+  If the URIs in `urls` only contain IP addresses, this field can be used
   to indicate the hostname, which may be necessary for TLS (using the SNI
-  extension). If |urls| itself contains the hostname, this isn't necessary.
+  extension). If `urls` itself contains the hostname, this isn't necessary.
  */
 @property(nonatomic, readonly, nullable) NSString *hostname;
 
diff --git a/sdk/objc/api/peerconnection/RTCPeerConnection.h b/sdk/objc/api/peerconnection/RTCPeerConnection.h
index 79e0625..98088ec 100644
--- a/sdk/objc/api/peerconnection/RTCPeerConnection.h
+++ b/sdk/objc/api/peerconnection/RTCPeerConnection.h
@@ -174,7 +174,7 @@
  */
 @property(nonatomic, weak, nullable) id<RTC_OBJC_TYPE(RTCPeerConnectionDelegate)> delegate;
 /** This property is not available with RTCSdpSemanticsUnifiedPlan. Please use
- *  |senders| instead.
+ *  `senders` instead.
  */
 @property(nonatomic, readonly) NSArray<RTC_OBJC_TYPE(RTCMediaStream) *> *localStreams;
 @property(nonatomic, readonly, nullable) RTC_OBJC_TYPE(RTCSessionDescription) * localDescription;
@@ -207,7 +207,7 @@
 
 - (instancetype)init NS_UNAVAILABLE;
 
-/** Sets the PeerConnection's global configuration to |configuration|.
+/** Sets the PeerConnection's global configuration to `configuration`.
  *  Any changes to STUN/TURN servers or ICE candidate policy will affect the
  *  next gathering phase, and cause the next call to createOffer to generate
  *  new ICE credentials. Note that the BUNDLE and RTCP-multiplexing policies
@@ -243,7 +243,7 @@
 
 /** Add a new media stream track to be sent on this peer connection, and return
  *  the newly created RTCRtpSender. The RTCRtpSender will be
- * associated with the streams specified in the |streamIds| list.
+ * associated with the streams specified in the `streamIds` list.
  *
  *  Errors: If an error occurs, returns nil. An error can occur if:
  *  - A sender already exists for the track.
@@ -265,7 +265,7 @@
  *  transceivers. Adding a transceiver will cause future calls to CreateOffer
  *  to add a media description for the corresponding transceiver.
  *
- *  The initial value of |mid| in the returned transceiver is nil. Setting a
+ *  The initial value of `mid` in the returned transceiver is nil. Setting a
  *  new session description may change it to a non-nil value.
  *
  *  https://w3c.github.io/webrtc-pc/#dom-rtcpeerconnection-addtransceiver
@@ -325,7 +325,7 @@
 
 /** Limits the bandwidth allocated for all RTP streams sent by this
  *  PeerConnection. Nil parameters will be unchanged. Setting
- * |currentBitrateBps| will force the available bitrate estimate to the given
+ * `currentBitrateBps` will force the available bitrate estimate to the given
  *  value. Returns YES if the parameters were successfully updated.
  */
 - (BOOL)setBweMinBitrateBps:(nullable NSNumber *)minBitrateBps
@@ -365,7 +365,7 @@
 @interface RTC_OBJC_TYPE (RTCPeerConnection)
 (Stats)
 
-    /** Gather stats for the given RTCMediaStreamTrack. If |mediaStreamTrack| is nil
+    /** Gather stats for the given RTCMediaStreamTrack. If `mediaStreamTrack` is nil
      *  statistics are gathered for all tracks.
      */
     - (void)statsForTrack
diff --git a/sdk/objc/components/audio/RTCAudioSession+Private.h b/sdk/objc/components/audio/RTCAudioSession+Private.h
index 8496ca6..8ee4fdd 100644
--- a/sdk/objc/components/audio/RTCAudioSession+Private.h
+++ b/sdk/objc/components/audio/RTCAudioSession+Private.h
@@ -22,8 +22,8 @@
      */
     @property(nonatomic, readonly) int activationCount;
 
-/** The number of times |beginWebRTCSession| was called without a balanced call
- *  to |endWebRTCSession|.
+/** The number of times `beginWebRTCSession` was called without a balanced call
+ *  to `endWebRTCSession`.
  */
 @property(nonatomic, readonly) int webRTCSessionCount;
 
@@ -57,16 +57,16 @@
 /** Configure the audio session for WebRTC. This call will fail if the session
  *  is already configured. On other failures, we will attempt to restore the
  *  previously used audio session configuration.
- *  |lockForConfiguration| must be called first.
+ *  `lockForConfiguration` must be called first.
  *  Successful calls to configureWebRTCSession must be matched by calls to
- *  |unconfigureWebRTCSession|.
+ *  `unconfigureWebRTCSession`.
  */
 - (BOOL)configureWebRTCSession:(NSError **)outError;
 
 /** Unconfigures the session for WebRTC. This will attempt to restore the
- *  audio session to the settings used before |configureWebRTCSession| was
+ *  audio session to the settings used before `configureWebRTCSession` was
  *  called.
- *  |lockForConfiguration| must be called first.
+ *  `lockForConfiguration` must be called first.
  */
 - (BOOL)unconfigureWebRTCSession:(NSError **)outError;
 
diff --git a/sdk/objc/components/audio/RTCAudioSession.h b/sdk/objc/components/audio/RTCAudioSession.h
index 79658e3..59250fe 100644
--- a/sdk/objc/components/audio/RTCAudioSession.h
+++ b/sdk/objc/components/audio/RTCAudioSession.h
@@ -209,9 +209,9 @@
 /** Relinquishes exclusive access to the audio session. */
 - (void)unlockForConfiguration;
 
-/** If |active|, activates the audio session if it isn't already active.
+/** If `active`, activates the audio session if it isn't already active.
  *  Successful calls must be balanced with a setActive:NO when activation is no
- *  longer required. If not |active|, deactivates the audio session if one is
+ *  longer required. If not `active`, deactivates the audio session if one is
  *  active and this is the last balanced call. When deactivating, the
  *  AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation option is passed to
  *  AVAudioSession.
@@ -219,7 +219,7 @@
 - (BOOL)setActive:(BOOL)active error:(NSError **)outError;
 
 // The following methods are proxies for the associated methods on
-// AVAudioSession. |lockForConfiguration| must be called before using them
+// AVAudioSession. `lockForConfiguration` must be called before using them
 // otherwise they will fail with kRTCAudioSessionErrorLockRequired.
 
 - (BOOL)setCategory:(NSString *)category
@@ -245,13 +245,13 @@
     /** Applies the configuration to the current session. Attempts to set all
      *  properties even if previous ones fail. Only the last error will be
      *  returned.
-     *  |lockForConfiguration| must be called first.
+     *  `lockForConfiguration` must be called first.
      */
     - (BOOL)setConfiguration : (RTC_OBJC_TYPE(RTCAudioSessionConfiguration) *)configuration error
     : (NSError **)outError;
 
 /** Convenience method that calls both setConfiguration and setActive.
- *  |lockForConfiguration| must be called first.
+ *  `lockForConfiguration` must be called first.
  */
 - (BOOL)setConfiguration:(RTC_OBJC_TYPE(RTCAudioSessionConfiguration) *)configuration
                   active:(BOOL)active
diff --git a/sdk/objc/components/audio/RTCNativeAudioSessionDelegateAdapter.h b/sdk/objc/components/audio/RTCNativeAudioSessionDelegateAdapter.h
index e28f26f..6a75f01 100644
--- a/sdk/objc/components/audio/RTCNativeAudioSessionDelegateAdapter.h
+++ b/sdk/objc/components/audio/RTCNativeAudioSessionDelegateAdapter.h
@@ -23,7 +23,7 @@
 
 - (instancetype)init NS_UNAVAILABLE;
 
-/** |observer| is a raw pointer and should be kept alive
+/** `observer` is a raw pointer and should be kept alive
  *  for this object's lifetime.
  */
 - (instancetype)initWithObserver:(webrtc::AudioSessionObserver *)observer NS_DESIGNATED_INITIALIZER;
diff --git a/sdk/objc/components/capturer/RTCFileVideoCapturer.m b/sdk/objc/components/capturer/RTCFileVideoCapturer.m
index 4c39ccd..bcf1506 100644
--- a/sdk/objc/components/capturer/RTCFileVideoCapturer.m
+++ b/sdk/objc/components/capturer/RTCFileVideoCapturer.m
@@ -165,7 +165,7 @@
   int64_t presentationDifferenceRound = lroundf(presentationDifference * NSEC_PER_SEC);
 
   __block dispatch_source_t timer = [self createStrictTimer];
-  // Strict timer that will fire |presentationDifferenceRound| ns from now and never again.
+  // Strict timer that will fire `presentationDifferenceRound` ns from now and never again.
   dispatch_source_set_timer(timer,
                             dispatch_time(DISPATCH_TIME_NOW, presentationDifferenceRound),
                             DISPATCH_TIME_FOREVER,
diff --git a/sdk/objc/components/network/RTCNetworkMonitor+Private.h b/sdk/objc/components/network/RTCNetworkMonitor+Private.h
index efb37bb..89866ea 100644
--- a/sdk/objc/components/network/RTCNetworkMonitor+Private.h
+++ b/sdk/objc/components/network/RTCNetworkMonitor+Private.h
@@ -14,7 +14,7 @@
 
 @interface RTCNetworkMonitor ()
 
-/** |observer| is a raw pointer and should be kept alive
+/** `observer` is a raw pointer and should be kept alive
  *  for this object's lifetime.
  */
 - (instancetype)initWithObserver:(webrtc::NetworkMonitorObserver *)observer
diff --git a/sdk/objc/components/renderer/opengl/RTCEAGLVideoView.m b/sdk/objc/components/renderer/opengl/RTCEAGLVideoView.m
index a3435a7..89e62d2 100644
--- a/sdk/objc/components/renderer/opengl/RTCEAGLVideoView.m
+++ b/sdk/objc/components/renderer/opengl/RTCEAGLVideoView.m
@@ -31,11 +31,11 @@
 // the method that will trigger the binding of the render
 // buffer. Because the standard behaviour of -[UIView setNeedsDisplay]
 // is disabled for the reasons above, the RTC_OBJC_TYPE(RTCEAGLVideoView) maintains
-// its own |isDirty| flag.
+// its own `isDirty` flag.
 
 @interface RTC_OBJC_TYPE (RTCEAGLVideoView)
 ()<GLKViewDelegate>
-    // |videoFrame| is set when we receive a frame from a worker thread and is read
+    // `videoFrame` is set when we receive a frame from a worker thread and is read
     // from the display link callback so atomicity is required.
     @property(atomic, strong) RTC_OBJC_TYPE(RTCVideoFrame) * videoFrame;
 @property(nonatomic, readonly) GLKView *glkView;
@@ -183,7 +183,7 @@
 // redrawn. This occurs on main thread.
 - (void)glkView:(GLKView *)view drawInRect:(CGRect)rect {
   // The renderer will draw the frame to the framebuffer corresponding to the
-  // one used by |view|.
+  // one used by `view`.
   RTC_OBJC_TYPE(RTCVideoFrame) *frame = self.videoFrame;
   if (!frame || frame.timeStampNs == _lastDrawnFrameTimeStampNs) {
     return;
diff --git a/sdk/objc/components/renderer/opengl/RTCNSGLVideoView.m b/sdk/objc/components/renderer/opengl/RTCNSGLVideoView.m
index de54e36..168c731 100644
--- a/sdk/objc/components/renderer/opengl/RTCNSGLVideoView.m
+++ b/sdk/objc/components/renderer/opengl/RTCNSGLVideoView.m
@@ -25,7 +25,7 @@
 
 @interface RTC_OBJC_TYPE (RTCNSGLVideoView)
 ()
-    // |videoFrame| is set when we receive a frame from a worker thread and is read
+    // `videoFrame` is set when we receive a frame from a worker thread and is read
     // from the display link callback so atomicity is required.
     @property(atomic, strong) RTC_OBJC_TYPE(RTCVideoFrame) *
     videoFrame;
diff --git a/sdk/objc/components/renderer/opengl/RTCShader.mm b/sdk/objc/components/renderer/opengl/RTCShader.mm
index ea4228e..8eccd7f 100644
--- a/sdk/objc/components/renderer/opengl/RTCShader.mm
+++ b/sdk/objc/components/renderer/opengl/RTCShader.mm
@@ -36,7 +36,7 @@
   "    v_texcoord = texcoord;\n"
   "}\n";
 
-// Compiles a shader of the given |type| with GLSL source |source| and returns
+// Compiles a shader of the given `type` with GLSL source `source` and returns
 // the shader handle or 0 on error.
 GLuint RTCCreateShader(GLenum type, const GLchar *source) {
   GLuint shader = glCreateShader(type);
diff --git a/sdk/objc/components/video_codec/RTCVideoEncoderH264.mm b/sdk/objc/components/video_codec/RTCVideoEncoderH264.mm
index 7c0d029..8794849 100644
--- a/sdk/objc/components/video_codec/RTCVideoEncoderH264.mm
+++ b/sdk/objc/components/video_codec/RTCVideoEncoderH264.mm
@@ -273,7 +273,7 @@
 }
 
 // The function returns the max allowed sample rate (pixels per second) that
-// can be processed by given encoder with |profile_level_id|.
+// can be processed by given encoder with `profile_level_id`.
 // See https://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-H.264-201610-S!!PDF-E&type=items
 // for details.
 NSUInteger GetMaxSampleRate(const webrtc::H264ProfileLevelId &profile_level_id) {
@@ -723,7 +723,7 @@
   if (_compressionSession) {
     SetVTSessionProperty(_compressionSession, kVTCompressionPropertyKey_AverageBitRate, bitrateBps);
 
-    // With zero |_maxAllowedFrameRate|, we fall back to automatic frame rate detection.
+    // With zero `_maxAllowedFrameRate`, we fall back to automatic frame rate detection.
     if (_maxAllowedFrameRate > 0) {
       SetVTSessionProperty(
           _compressionSession, kVTCompressionPropertyKey_ExpectedFrameRate, frameRate);
diff --git a/sdk/objc/components/video_codec/nalu_rewriter.cc b/sdk/objc/components/video_codec/nalu_rewriter.cc
index 60382d2..b7330e1 100644
--- a/sdk/objc/components/video_codec/nalu_rewriter.cc
+++ b/sdk/objc/components/video_codec/nalu_rewriter.cc
@@ -111,7 +111,7 @@
   }
   size_t bytes_remaining = block_buffer_size;
   while (bytes_remaining > 0) {
-    // The size type here must match |nalu_header_size|, we expect 4 bytes.
+    // The size type here must match `nalu_header_size`, we expect 4 bytes.
     // Read the length of the next packet of data. Must convert from big endian
     // to host endian.
     RTC_DCHECK_GE(bytes_remaining, (size_t)nalu_header_size);
diff --git a/sdk/objc/components/video_codec/nalu_rewriter.h b/sdk/objc/components/video_codec/nalu_rewriter.h
index d94ce7b..c647497 100644
--- a/sdk/objc/components/video_codec/nalu_rewriter.h
+++ b/sdk/objc/components/video_codec/nalu_rewriter.h
@@ -26,7 +26,7 @@
 
 // Converts a sample buffer emitted from the VideoToolbox encoder into a buffer
 // suitable for RTP. The sample buffer is in avcc format whereas the rtp buffer
-// needs to be in Annex B format. Data is written directly to |annexb_buffer|.
+// needs to be in Annex B format. Data is written directly to `annexb_buffer`.
 bool H264CMSampleBufferToAnnexBBuffer(CMSampleBufferRef avcc_sample_buffer,
                                       bool is_keyframe,
                                       rtc::Buffer* annexb_buffer);
@@ -34,8 +34,8 @@
 // Converts a buffer received from RTP into a sample buffer suitable for the
 // VideoToolbox decoder. The RTP buffer is in annex b format whereas the sample
 // buffer is in avcc format.
-// If |is_keyframe| is true then |video_format| is ignored since the format will
-// be read from the buffer. Otherwise |video_format| must be provided.
+// If `is_keyframe` is true then `video_format` is ignored since the format will
+// be read from the buffer. Otherwise `video_format` must be provided.
 // Caller is responsible for releasing the created sample buffer.
 bool H264AnnexBBufferToCMSampleBuffer(const uint8_t* annexb_buffer,
                                       size_t annexb_buffer_size,
diff --git a/sdk/objc/components/video_frame_buffer/RTCCVPixelBuffer.h b/sdk/objc/components/video_frame_buffer/RTCCVPixelBuffer.h
index 17eebd0..664d9bb 100644
--- a/sdk/objc/components/video_frame_buffer/RTCCVPixelBuffer.h
+++ b/sdk/objc/components/video_frame_buffer/RTCCVPixelBuffer.h
@@ -40,9 +40,9 @@
 - (BOOL)requiresScalingToWidth:(int)width height:(int)height;
 - (int)bufferSizeForCroppingAndScalingToWidth:(int)width height:(int)height;
 
-/** The minimum size of the |tmpBuffer| must be the number of bytes returned from the
+/** The minimum size of the `tmpBuffer` must be the number of bytes returned from the
  * bufferSizeForCroppingAndScalingToWidth:height: method.
- * If that size is 0, the |tmpBuffer| may be nil.
+ * If that size is 0, the `tmpBuffer` may be nil.
  */
 - (BOOL)cropAndScaleTo:(CVPixelBufferRef)outputPixelBuffer
         withTempBuffer:(nullable uint8_t *)tmpBuffer;
diff --git a/sdk/objc/native/api/audio_device_module.h b/sdk/objc/native/api/audio_device_module.h
index 8925f30..3405469 100644
--- a/sdk/objc/native/api/audio_device_module.h
+++ b/sdk/objc/native/api/audio_device_module.h
@@ -17,9 +17,9 @@
 
 namespace webrtc {
 
-// If |bypass_voice_processing| is true, WebRTC will attempt to disable hardware
+// If `bypass_voice_processing` is true, WebRTC will attempt to disable hardware
 // audio processing on iOS.
-// Warning: Setting |bypass_voice_processing| will have unpredictable
+// Warning: Setting `bypass_voice_processing` will have unpredictable
 // consequences for the audio path in the device. It is not advisable to use in
 // most scenarios.
 rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceModule(
diff --git a/sdk/objc/native/src/audio/audio_device_ios.h b/sdk/objc/native/src/audio/audio_device_ios.h
index a57e719..5afc49a 100644
--- a/sdk/objc/native/src/audio/audio_device_ios.h
+++ b/sdk/objc/native/src/audio/audio_device_ios.h
@@ -164,7 +164,7 @@
   bool IsInterrupted();
 
  private:
-  // Called by the relevant AudioSessionObserver methods on |thread_|.
+  // Called by the relevant AudioSessionObserver methods on `thread_`.
   void HandleInterruptionBegin();
   void HandleInterruptionEnd();
   void HandleValidRouteChange();
@@ -173,7 +173,7 @@
   void HandlePlayoutGlitchDetected();
   void HandleOutputVolumeChange();
 
-  // Uses current |playout_parameters_| and |record_parameters_| to inform the
+  // Uses current `playout_parameters_` and `record_parameters_` to inform the
   // audio device buffer (ADB) about our internal audio parameters.
   void UpdateAudioDeviceBuffer();
 
@@ -181,7 +181,7 @@
   // values may be different once the AVAudioSession has been activated.
   // This method asks for the current hardware parameters and takes actions
   // if they should differ from what we have asked for initially. It also
-  // defines |playout_parameters_| and |record_parameters_|.
+  // defines `playout_parameters_` and `record_parameters_`.
   void SetupAudioBuffersForActiveAudioSession();
 
   // Creates the audio unit.
diff --git a/sdk/objc/native/src/audio/audio_device_ios.mm b/sdk/objc/native/src/audio/audio_device_ios.mm
index f51714c..e3020ec 100644
--- a/sdk/objc/native/src/audio/audio_device_ios.mm
+++ b/sdk/objc/native/src/audio/audio_device_ios.mm
@@ -386,7 +386,7 @@
   // Allocate AudioBuffers to be used as storage for the received audio.
   // The AudioBufferList structure works as a placeholder for the
   // AudioBuffer structure, which holds a pointer to the actual data buffer
-  // in |record_audio_buffer_|. Recorded audio will be rendered into this memory
+  // in `record_audio_buffer_`. Recorded audio will be rendered into this memory
   // at each input callback when calling AudioUnitRender().
   AudioBufferList audio_buffer_list;
   audio_buffer_list.mNumberBuffers = 1;
@@ -397,7 +397,7 @@
   audio_buffer->mData = reinterpret_cast<int8_t*>(record_audio_buffer_.data());
 
   // Obtain the recorded audio samples by initiating a rendering cycle.
-  // Since it happens on the input bus, the |io_data| parameter is a reference
+  // Since it happens on the input bus, the `io_data` parameter is a reference
   // to the preallocated audio buffer list that the audio unit renders into.
   // We can make the audio unit provide a buffer instead in io_data, but we
   // currently just use our own.
@@ -467,7 +467,7 @@
 
   // Read decoded 16-bit PCM samples from WebRTC (using a size that matches
   // the native I/O audio unit) and copy the result to the audio buffer in the
-  // |io_data| destination.
+  // `io_data` destination.
   fine_audio_buffer_->GetPlayoutData(
       rtc::ArrayView<int16_t>(static_cast<int16_t*>(audio_buffer->mData), num_frames),
       kFixedPlayoutDelayEstimate);
diff --git a/sdk/objc/native/src/network_monitor_observer.h b/sdk/objc/native/src/network_monitor_observer.h
index 85fd3b9..6fd126a 100644
--- a/sdk/objc/native/src/network_monitor_observer.h
+++ b/sdk/objc/native/src/network_monitor_observer.h
@@ -24,7 +24,7 @@
  public:
   // Called when a path update occurs, on network monitor dispatch queue.
   //
-  // |adapter_type_by_name| is a map from interface name (i.e. "pdp_ip0") to
+  // `adapter_type_by_name` is a map from interface name (i.e. "pdp_ip0") to
   // adapter type, for all available interfaces on the current path. If an
   // interface name isn't present it can be assumed to be unavailable.
   virtual void OnPathUpdate(
diff --git a/sdk/objc/unittests/RTCAudioDeviceModule_xctest.mm b/sdk/objc/unittests/RTCAudioDeviceModule_xctest.mm
index 094e246..f8ce844 100644
--- a/sdk/objc/unittests/RTCAudioDeviceModule_xctest.mm
+++ b/sdk/objc/unittests/RTCAudioDeviceModule_xctest.mm
@@ -118,7 +118,7 @@
 // Play out a test file during this time (unit is in seconds).
 static const NSUInteger kFilePlayTimeInSec = 15;
 // Run the full-duplex test during this time (unit is in seconds).
-// Note that first |kNumIgnoreFirstCallbacks| are ignored.
+// Note that first `kNumIgnoreFirstCallbacks` are ignored.
 static const NSUInteger kFullDuplexTimeInSec = 10;
 // Wait for the callback sequence to stabilize by ignoring this amount of the
 // initial callbacks (avoids initial FIFO access).