Use backticks not vertical bars to denote variables in comments for /modules/audio_device
Bug: webrtc:12338
Change-Id: I27ad3a5fe6e765379e4e4f42783558c5522bab38
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/227091
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Artem Titov <titovartem@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#34620}
diff --git a/modules/audio_device/android/aaudio_player.cc b/modules/audio_device/android/aaudio_player.cc
index 6d310ed..7f63512 100644
--- a/modules/audio_device/android/aaudio_player.cc
+++ b/modules/audio_device/android/aaudio_player.cc
@@ -184,7 +184,7 @@
}
// Read audio data from the WebRTC source using the FineAudioBuffer object
- // and write that data into |audio_data| to be played out by AAudio.
+ // and write that data into `audio_data` to be played out by AAudio.
// Prime output with zeros during a short initial phase to avoid distortion.
// TODO(henrika): do more work to figure out of if the initial forced silence
// period is really needed.
diff --git a/modules/audio_device/android/aaudio_player.h b/modules/audio_device/android/aaudio_player.h
index 9e9182a..4bf3ee3 100644
--- a/modules/audio_device/android/aaudio_player.h
+++ b/modules/audio_device/android/aaudio_player.h
@@ -76,8 +76,8 @@
protected:
// AAudioObserverInterface implementation.
- // For an output stream, this function should render and write |num_frames|
- // of data in the streams current data format to the |audio_data| buffer.
+ // For an output stream, this function should render and write `num_frames`
+ // of data in the streams current data format to the `audio_data` buffer.
// Called on a real-time thread owned by AAudio.
aaudio_data_callback_result_t OnDataCallback(void* audio_data,
int32_t num_frames) override;
diff --git a/modules/audio_device/android/aaudio_recorder.cc b/modules/audio_device/android/aaudio_recorder.cc
index 95f1a1a..68c9cee 100644
--- a/modules/audio_device/android/aaudio_recorder.cc
+++ b/modules/audio_device/android/aaudio_recorder.cc
@@ -146,7 +146,7 @@
}
}
-// Read and process |num_frames| of data from the |audio_data| buffer.
+// Read and process `num_frames` of data from the `audio_data` buffer.
// TODO(henrika): possibly add trace here to be included in systrace.
// See https://developer.android.com/studio/profile/systrace-commandline.html.
aaudio_data_callback_result_t AAudioRecorder::OnDataCallback(
@@ -180,7 +180,7 @@
RTC_DLOG(INFO) << "input latency: " << latency_millis_
<< ", num_frames: " << num_frames;
}
- // Copy recorded audio in |audio_data| to the WebRTC sink using the
+ // Copy recorded audio in `audio_data` to the WebRTC sink using the
// FineAudioBuffer object.
fine_audio_buffer_->DeliverRecordedData(
rtc::MakeArrayView(static_cast<const int16_t*>(audio_data),
diff --git a/modules/audio_device/android/aaudio_recorder.h b/modules/audio_device/android/aaudio_recorder.h
index bbf2cac..d0ad6be 100644
--- a/modules/audio_device/android/aaudio_recorder.h
+++ b/modules/audio_device/android/aaudio_recorder.h
@@ -69,8 +69,8 @@
protected:
// AAudioObserverInterface implementation.
- // For an input stream, this function should read |num_frames| of recorded
- // data, in the stream's current data format, from the |audio_data| buffer.
+ // For an input stream, this function should read `num_frames` of recorded
+ // data, in the stream's current data format, from the `audio_data` buffer.
// Called on a real-time thread owned by AAudio.
aaudio_data_callback_result_t OnDataCallback(void* audio_data,
int32_t num_frames) override;
diff --git a/modules/audio_device/android/audio_device_unittest.cc b/modules/audio_device/android/audio_device_unittest.cc
index 20c36c7..11f747e 100644
--- a/modules/audio_device/android/audio_device_unittest.cc
+++ b/modules/audio_device/android/audio_device_unittest.cc
@@ -68,7 +68,7 @@
static const size_t kBitsPerSample = 16;
static const size_t kBytesPerSample = kBitsPerSample / 8;
// Run the full-duplex test during this time (unit is in seconds).
-// Note that first |kNumIgnoreFirstCallbacks| are ignored.
+// Note that first `kNumIgnoreFirstCallbacks` are ignored.
static const int kFullDuplexTimeInSec = 5;
// Wait for the callback sequence to stabilize by ignoring this amount of the
// initial callbacks (avoids initial FIFO access).
@@ -127,7 +127,7 @@
void Write(const void* source, size_t num_frames) override {}
// Read samples from file stored in memory (at construction) and copy
- // |num_frames| (<=> 10ms) to the |destination| byte buffer.
+ // `num_frames` (<=> 10ms) to the `destination` byte buffer.
void Read(void* destination, size_t num_frames) override {
memcpy(destination, static_cast<int16_t*>(&file_[file_pos_]),
num_frames * sizeof(int16_t));
@@ -171,7 +171,7 @@
~FifoAudioStream() { Flush(); }
- // Allocate new memory, copy |num_frames| samples from |source| into memory
+ // Allocate new memory, copy `num_frames` samples from `source` into memory
// and add pointer to the memory location to end of the list.
// Increases the size of the FIFO by one element.
void Write(const void* source, size_t num_frames) override {
@@ -192,8 +192,8 @@
total_written_elements_ += size;
}
- // Read pointer to data buffer from front of list, copy |num_frames| of stored
- // data into |destination| and delete the utilized memory allocation.
+ // Read pointer to data buffer from front of list, copy `num_frames` of stored
+ // data into `destination` and delete the utilized memory allocation.
// Decreases the size of the FIFO by one element.
void Read(void* destination, size_t num_frames) override {
ASSERT_EQ(num_frames, frames_per_buffer_);
@@ -251,7 +251,7 @@
rec_count_(0),
pulse_time_(0) {}
- // Insert periodic impulses in first two samples of |destination|.
+ // Insert periodic impulses in first two samples of `destination`.
void Read(void* destination, size_t num_frames) override {
ASSERT_EQ(num_frames, frames_per_buffer_);
if (play_count_ == 0) {
@@ -272,14 +272,14 @@
}
}
- // Detect received impulses in |source|, derive time between transmission and
+ // Detect received impulses in `source`, derive time between transmission and
// detection and add the calculated delay to list of latencies.
void Write(const void* source, size_t num_frames) override {
ASSERT_EQ(num_frames, frames_per_buffer_);
rec_count_++;
if (pulse_time_ == 0) {
// Avoid detection of new impulse response until a new impulse has
- // been transmitted (sets |pulse_time_| to value larger than zero).
+ // been transmitted (sets `pulse_time_` to value larger than zero).
return;
}
const int16_t* ptr16 = static_cast<const int16_t*>(source);
@@ -298,7 +298,7 @@
// Total latency is the difference between transmit time and detection
// tome plus the extra delay within the buffer in which we detected the
// received impulse. It is transmitted at sample 0 but can be received
- // at sample N where N > 0. The term |extra_delay| accounts for N and it
+ // at sample N where N > 0. The term `extra_delay` accounts for N and it
// is a value between 0 and 10ms.
latencies_.push_back(now_time - pulse_time_ + extra_delay);
pulse_time_ = 0;
diff --git a/modules/audio_device/android/audio_manager.cc b/modules/audio_device/android/audio_manager.cc
index 9c8137b..7de2065 100644
--- a/modules/audio_device/android/audio_manager.cc
+++ b/modules/audio_device/android/audio_manager.cc
@@ -98,7 +98,7 @@
// The delay estimate can take one of two fixed values depending on if the
// device supports low-latency output or not. However, it is also possible
// that the user explicitly selects the high-latency audio path, hence we use
- // the selected |audio_layer| here to set the delay estimate.
+ // the selected `audio_layer` here to set the delay estimate.
delay_estimate_in_milliseconds_ =
(audio_layer == AudioDeviceModule::kAndroidJavaAudio)
? kHighLatencyModeDelayEstimateInMilliseconds
diff --git a/modules/audio_device/android/audio_record_jni.cc b/modules/audio_device/android/audio_record_jni.cc
index a3aa855..2c28ab2 100644
--- a/modules/audio_device/android/audio_record_jni.cc
+++ b/modules/audio_device/android/audio_record_jni.cc
@@ -270,8 +270,8 @@
audio_device_buffer_->SetRecordedBuffer(direct_buffer_address_,
frames_per_buffer_);
// We provide one (combined) fixed delay estimate for the APM and use the
- // |playDelayMs| parameter only. Components like the AEC only sees the sum
- // of |playDelayMs| and |recDelayMs|, hence the distributions does not matter.
+ // `playDelayMs` parameter only. Components like the AEC only sees the sum
+ // of `playDelayMs` and `recDelayMs`, hence the distributions does not matter.
audio_device_buffer_->SetVQEData(total_delay_in_milliseconds_, 0);
if (audio_device_buffer_->DeliverRecordedData() == -1) {
RTC_LOG(INFO) << "AudioDeviceBuffer::DeliverRecordedData failed";
diff --git a/modules/audio_device/android/audio_record_jni.h b/modules/audio_device/android/audio_record_jni.h
index c445360..66a6a89 100644
--- a/modules/audio_device/android/audio_record_jni.h
+++ b/modules/audio_device/android/audio_record_jni.h
@@ -87,8 +87,8 @@
private:
// Called from Java side so we can cache the address of the Java-manged
- // |byte_buffer| in |direct_buffer_address_|. The size of the buffer
- // is also stored in |direct_buffer_capacity_in_bytes_|.
+ // `byte_buffer` in `direct_buffer_address_`. The size of the buffer
+ // is also stored in `direct_buffer_capacity_in_bytes_`.
// This method will be called by the WebRtcAudioRecord constructor, i.e.,
// on the same thread that this object is created on.
static void JNICALL CacheDirectBufferAddress(JNIEnv* env,
@@ -98,8 +98,8 @@
void OnCacheDirectBufferAddress(JNIEnv* env, jobject byte_buffer);
// Called periodically by the Java based WebRtcAudioRecord object when
- // recording has started. Each call indicates that there are |length| new
- // bytes recorded in the memory area |direct_buffer_address_| and it is
+ // recording has started. Each call indicates that there are `length` new
+ // bytes recorded in the memory area `direct_buffer_address_` and it is
// now time to send these to the consumer.
// This method is called on a high-priority thread from Java. The name of
// the thread is 'AudioRecordThread'.
@@ -142,10 +142,10 @@
// possible values. See audio_common.h for details.
int total_delay_in_milliseconds_;
- // Cached copy of address to direct audio buffer owned by |j_audio_record_|.
+ // Cached copy of address to direct audio buffer owned by `j_audio_record_`.
void* direct_buffer_address_;
- // Number of bytes in the direct audio buffer owned by |j_audio_record_|.
+ // Number of bytes in the direct audio buffer owned by `j_audio_record_`.
size_t direct_buffer_capacity_in_bytes_;
// Number audio frames per audio buffer. Each audio frame corresponds to
diff --git a/modules/audio_device/android/audio_track_jni.h b/modules/audio_device/android/audio_track_jni.h
index 62bcba4..7eb6908 100644
--- a/modules/audio_device/android/audio_track_jni.h
+++ b/modules/audio_device/android/audio_track_jni.h
@@ -88,8 +88,8 @@
private:
// Called from Java side so we can cache the address of the Java-manged
- // |byte_buffer| in |direct_buffer_address_|. The size of the buffer
- // is also stored in |direct_buffer_capacity_in_bytes_|.
+ // `byte_buffer` in `direct_buffer_address_`. The size of the buffer
+ // is also stored in `direct_buffer_capacity_in_bytes_`.
// Called on the same thread as the creating thread.
static void JNICALL CacheDirectBufferAddress(JNIEnv* env,
jobject obj,
@@ -98,8 +98,8 @@
void OnCacheDirectBufferAddress(JNIEnv* env, jobject byte_buffer);
// Called periodically by the Java based WebRtcAudioTrack object when
- // playout has started. Each call indicates that |length| new bytes should
- // be written to the memory area |direct_buffer_address_| for playout.
+ // playout has started. Each call indicates that `length` new bytes should
+ // be written to the memory area `direct_buffer_address_` for playout.
// This method is called on a high-priority thread from Java. The name of
// the thread is 'AudioTrackThread'.
static void JNICALL GetPlayoutData(JNIEnv* env,
@@ -133,10 +133,10 @@
// AudioManager.
const AudioParameters audio_parameters_;
- // Cached copy of address to direct audio buffer owned by |j_audio_track_|.
+ // Cached copy of address to direct audio buffer owned by `j_audio_track_`.
void* direct_buffer_address_;
- // Number of bytes in the direct audio buffer owned by |j_audio_track_|.
+ // Number of bytes in the direct audio buffer owned by `j_audio_track_`.
size_t direct_buffer_capacity_in_bytes_;
// Number of audio frames per audio buffer. Each audio frame corresponds to
diff --git a/modules/audio_device/android/build_info.h b/modules/audio_device/android/build_info.h
index 2f27093..3647e56 100644
--- a/modules/audio_device/android/build_info.h
+++ b/modules/audio_device/android/build_info.h
@@ -64,7 +64,7 @@
SdkCode GetSdkVersion();
private:
- // Helper method which calls a static getter method with |name| and returns
+ // Helper method which calls a static getter method with `name` and returns
// a string from Java.
std::string GetStringFromJava(const char* name);
diff --git a/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java b/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java
index 5efc813..01e83ea 100644
--- a/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java
+++ b/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java
@@ -23,7 +23,7 @@
// This class wraps control of three different platform effects. Supported
// effects are: AcousticEchoCanceler (AEC) and NoiseSuppressor (NS).
// Calling enable() will active all effects that are
-// supported by the device if the corresponding |shouldEnableXXX| member is set.
+// supported by the device if the corresponding `shouldEnableXXX` member is set.
public class WebRtcAudioEffects {
private static final boolean DEBUG = false;
@@ -162,7 +162,7 @@
}
// Call this method to enable or disable the platform AEC. It modifies
- // |shouldEnableAec| which is used in enable() where the actual state
+ // `shouldEnableAec` which is used in enable() where the actual state
// of the AEC effect is modified. Returns true if HW AEC is supported and
// false otherwise.
public boolean setAEC(boolean enable) {
@@ -181,7 +181,7 @@
}
// Call this method to enable or disable the platform NS. It modifies
- // |shouldEnableNs| which is used in enable() where the actual state
+ // `shouldEnableNs` which is used in enable() where the actual state
// of the NS effect is modified. Returns true if HW NS is supported and
// false otherwise.
public boolean setNS(boolean enable) {
@@ -269,7 +269,7 @@
}
}
- // Returns true for effect types in |type| that are of "VoIP" types:
+ // Returns true for effect types in `type` that are of "VoIP" types:
// Acoustic Echo Canceler (AEC) or Automatic Gain Control (AGC) or
// Noise Suppressor (NS). Note that, an extra check for support is needed
// in each comparison since some devices includes effects in the
@@ -306,7 +306,7 @@
}
// Returns true if an effect of the specified type is available. Functionally
- // equivalent to (NoiseSuppressor|AutomaticGainControl|...).isAvailable(), but
+ // equivalent to (NoiseSuppressor`AutomaticGainControl`...).isAvailable(), but
// faster as it avoids the expensive OS call to enumerate effects.
private static boolean isEffectTypeAvailable(UUID effectType) {
Descriptor[] effects = getAvailableEffects();
diff --git a/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java b/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java
index c712a32..fa188be 100644
--- a/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java
+++ b/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java
@@ -366,7 +366,7 @@
return AudioSource.VOICE_COMMUNICATION;
}
- // Sets all recorded samples to zero if |mute| is true, i.e., ensures that
+ // Sets all recorded samples to zero if `mute` is true, i.e., ensures that
// the microphone is muted.
public static void setMicrophoneMute(boolean mute) {
Logging.w(TAG, "setMicrophoneMute(" + mute + ")");
diff --git a/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java b/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java
index 7e6ad5a..95fd2e0 100644
--- a/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java
+++ b/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java
@@ -78,7 +78,7 @@
private @Nullable AudioTrack audioTrack;
private @Nullable AudioTrackThread audioThread;
- // Samples to be played are replaced by zeros if |speakerMute| is set to true.
+ // Samples to be played are replaced by zeros if `speakerMute` is set to true.
// Can be used to ensure that the speaker is fully muted.
private static volatile boolean speakerMute;
private byte[] emptyBytes;
@@ -239,9 +239,9 @@
Logging.d(TAG, "minBufferSizeInBytes: " + minBufferSizeInBytes);
// For the streaming mode, data must be written to the audio sink in
// chunks of size (given by byteBuffer.capacity()) less than or equal
- // to the total buffer size |minBufferSizeInBytes|. But, we have seen
+ // to the total buffer size `minBufferSizeInBytes`. But, we have seen
// reports of "getMinBufferSize(): error querying hardware". Hence, it
- // can happen that |minBufferSizeInBytes| contains an invalid value.
+ // can happen that `minBufferSizeInBytes` contains an invalid value.
if (minBufferSizeInBytes < byteBuffer.capacity()) {
reportWebRtcAudioTrackInitError("AudioTrack.getMinBufferSize returns an invalid value.");
return -1;
@@ -481,7 +481,7 @@
private native void nativeGetPlayoutData(int bytes, long nativeAudioRecord);
- // Sets all samples to be played out to zero if |mute| is true, i.e.,
+ // Sets all samples to be played out to zero if `mute` is true, i.e.,
// ensures that the speaker is muted.
public static void setSpeakerMute(boolean mute) {
Logging.w(TAG, "setSpeakerMute(" + mute + ")");
diff --git a/modules/audio_device/android/opensles_player.h b/modules/audio_device/android/opensles_player.h
index 78af29b..41593a4 100644
--- a/modules/audio_device/android/opensles_player.h
+++ b/modules/audio_device/android/opensles_player.h
@@ -86,7 +86,7 @@
// Reads audio data in PCM format using the AudioDeviceBuffer.
// Can be called both on the main thread (during Start()) and from the
// internal audio thread while output streaming is active.
- // If the |silence| flag is set, the audio is filled with zeros instead of
+ // If the `silence` flag is set, the audio is filled with zeros instead of
// asking the WebRTC layer for real audio data. This procedure is also known
// as audio priming.
void EnqueuePlayoutData(bool silence);
@@ -97,7 +97,7 @@
// Obtaines the SL Engine Interface from the existing global Engine object.
// The interface exposes creation methods of all the OpenSL ES object types.
- // This method defines the |engine_| member variable.
+ // This method defines the `engine_` member variable.
bool ObtainEngineInterface();
// Creates/destroys the output mix object.
diff --git a/modules/audio_device/android/opensles_recorder.h b/modules/audio_device/android/opensles_recorder.h
index 5f975d7..e659c3c 100644
--- a/modules/audio_device/android/opensles_recorder.h
+++ b/modules/audio_device/android/opensles_recorder.h
@@ -83,7 +83,7 @@
private:
// Obtaines the SL Engine Interface from the existing global Engine object.
// The interface exposes creation methods of all the OpenSL ES object types.
- // This method defines the |engine_| member variable.
+ // This method defines the `engine_` member variable.
bool ObtainEngineInterface();
// Creates/destroys the audio recorder and the simple-buffer queue object.
@@ -104,7 +104,7 @@
// Wraps calls to SLAndroidSimpleBufferQueueState::Enqueue() and it can be
// called both on the main thread (but before recording has started) and from
// the internal audio thread while input streaming is active. It uses
- // |simple_buffer_queue_| but no lock is needed since the initial calls from
+ // `simple_buffer_queue_` but no lock is needed since the initial calls from
// the main thread and the native callback thread are mutually exclusive.
bool EnqueueAudioBuffer();
diff --git a/modules/audio_device/audio_device_buffer.cc b/modules/audio_device/audio_device_buffer.cc
index 9770454..572982e 100644
--- a/modules/audio_device/audio_device_buffer.cc
+++ b/modules/audio_device/audio_device_buffer.cc
@@ -160,10 +160,10 @@
// recorded. Measurements (max of absolute level) are taken twice per second,
// which means that if e.g 10 seconds of audio has been recorded, a total of
// 20 level estimates must all be identical to zero to trigger the histogram.
- // |only_silence_recorded_| can only be cleared on the native audio thread
+ // `only_silence_recorded_` can only be cleared on the native audio thread
// that drives audio capture but we know by design that the audio has stopped
// when this method is called, hence there should not be aby conflicts. Also,
- // the fact that |only_silence_recorded_| can be affected during the complete
+ // the fact that `only_silence_recorded_` can be affected during the complete
// call makes chances of conflicts with potentially one last callback very
// small.
const size_t time_since_start = rtc::TimeSince(rec_start_time_);
@@ -245,7 +245,7 @@
// Returns the largest absolute value in a signed 16-bit vector.
max_abs = WebRtcSpl_MaxAbsValueW16(rec_buffer_.data(), rec_buffer_.size());
rec_stat_count_ = 0;
- // Set |only_silence_recorded_| to false as soon as at least one detection
+ // Set `only_silence_recorded_` to false as soon as at least one detection
// of a non-zero audio packet is found. It can only be restored to true
// again by restarting the call.
if (max_abs > 0) {
diff --git a/modules/audio_device/audio_device_unittest.cc b/modules/audio_device/audio_device_unittest.cc
index b0af952..a0ec149 100644
--- a/modules/audio_device/audio_device_unittest.cc
+++ b/modules/audio_device/audio_device_unittest.cc
@@ -162,14 +162,14 @@
// channel configuration. No conversion is needed.
std::copy(buffer.begin(), buffer.end(), destination.begin());
} else if (destination.size() == 2 * buffer.size()) {
- // Recorded input signal in |buffer| is in mono. Do channel upmix to
+ // Recorded input signal in `buffer` is in mono. Do channel upmix to
// match stereo output (1 -> 2).
for (size_t i = 0; i < buffer.size(); ++i) {
destination[2 * i] = buffer[i];
destination[2 * i + 1] = buffer[i];
}
} else if (buffer.size() == 2 * destination.size()) {
- // Recorded input signal in |buffer| is in stereo. Do channel downmix
+ // Recorded input signal in `buffer` is in stereo. Do channel downmix
// to match mono output (2 -> 1).
for (size_t i = 0; i < destination.size(); ++i) {
destination[i] =
@@ -219,7 +219,7 @@
write_thread_checker_.Detach();
}
- // Insert periodic impulses in first two samples of |destination|.
+ // Insert periodic impulses in first two samples of `destination`.
void Read(rtc::ArrayView<int16_t> destination) override {
RTC_DCHECK_RUN_ON(&read_thread_checker_);
if (read_count_ == 0) {
@@ -240,7 +240,7 @@
}
}
- // Detect received impulses in |source|, derive time between transmission and
+ // Detect received impulses in `source`, derive time between transmission and
// detection and add the calculated delay to list of latencies.
void Write(rtc::ArrayView<const int16_t> source) override {
RTC_DCHECK_RUN_ON(&write_thread_checker_);
@@ -249,7 +249,7 @@
write_count_++;
if (!pulse_time_) {
// Avoid detection of new impulse response until a new impulse has
- // been transmitted (sets |pulse_time_| to value larger than zero).
+ // been transmitted (sets `pulse_time_` to value larger than zero).
return;
}
// Find index (element position in vector) of the max element.
@@ -267,7 +267,7 @@
// Total latency is the difference between transmit time and detection
// tome plus the extra delay within the buffer in which we detected the
// received impulse. It is transmitted at sample 0 but can be received
- // at sample N where N > 0. The term |extra_delay| accounts for N and it
+ // at sample N where N > 0. The term `extra_delay` accounts for N and it
// is a value between 0 and 10ms.
latencies_.push_back(now_time - *pulse_time_ + extra_delay);
pulse_time_.reset();
@@ -586,7 +586,7 @@
rtc::scoped_refptr<AudioDeviceModuleForTest> CreateAudioDevice() {
// Use the default factory for kPlatformDefaultAudio and a special factory
// CreateWindowsCoreAudioAudioDeviceModuleForTest() for kWindowsCoreAudio2.
- // The value of |audio_layer_| is set at construction by GetParam() and two
+ // The value of `audio_layer_` is set at construction by GetParam() and two
// different layers are tested on Windows only.
if (audio_layer_ == AudioDeviceModule::kPlatformDefaultAudio) {
return AudioDeviceModule::CreateForTest(audio_layer_,
diff --git a/modules/audio_device/dummy/file_audio_device.h b/modules/audio_device/dummy/file_audio_device.h
index f4a6b765..4d6858f 100644
--- a/modules/audio_device/dummy/file_audio_device.h
+++ b/modules/audio_device/dummy/file_audio_device.h
@@ -28,8 +28,8 @@
// and plays out into a file.
class FileAudioDevice : public AudioDeviceGeneric {
public:
- // Constructs a file audio device with |id|. It will read audio from
- // |inputFilename| and record output audio to |outputFilename|.
+ // Constructs a file audio device with `id`. It will read audio from
+ // `inputFilename` and record output audio to `outputFilename`.
//
// The input file should be a readable 48k stereo raw file, and the output
// file should point to a writable location. The output format will also be
diff --git a/modules/audio_device/fine_audio_buffer.cc b/modules/audio_device/fine_audio_buffer.cc
index b4f3c37..4f3f48c 100644
--- a/modules/audio_device/fine_audio_buffer.cc
+++ b/modules/audio_device/fine_audio_buffer.cc
@@ -113,7 +113,7 @@
record_buffer_.AppendData(audio_buffer.data(), audio_buffer.size());
// Consume samples from buffer in chunks of 10ms until there is not
// enough data left. The number of remaining samples in the cache is given by
- // the new size of the internal |record_buffer_|.
+ // the new size of the internal `record_buffer_`.
const size_t num_elements_10ms =
record_channels_ * record_samples_per_channel_10ms_;
while (record_buffer_.size() >= num_elements_10ms) {
diff --git a/modules/audio_device/fine_audio_buffer.h b/modules/audio_device/fine_audio_buffer.h
index 210eda8..99f282c1 100644
--- a/modules/audio_device/fine_audio_buffer.h
+++ b/modules/audio_device/fine_audio_buffer.h
@@ -29,7 +29,7 @@
// accumulated 10ms worth of data to the ADB every second call.
class FineAudioBuffer {
public:
- // |device_buffer| is a buffer that provides 10ms of audio data.
+ // `device_buffer` is a buffer that provides 10ms of audio data.
FineAudioBuffer(AudioDeviceBuffer* audio_device_buffer);
~FineAudioBuffer();
@@ -42,18 +42,18 @@
bool IsReadyForPlayout() const;
bool IsReadyForRecord() const;
- // Copies audio samples into |audio_buffer| where number of requested
+ // Copies audio samples into `audio_buffer` where number of requested
// elements is specified by |audio_buffer.size()|. The producer will always
// fill up the audio buffer and if no audio exists, the buffer will contain
- // silence instead. The provided delay estimate in |playout_delay_ms| should
+ // silence instead. The provided delay estimate in `playout_delay_ms` should
// contain an estimate of the latency between when an audio frame is read from
// WebRTC and when it is played out on the speaker.
void GetPlayoutData(rtc::ArrayView<int16_t> audio_buffer,
int playout_delay_ms);
- // Consumes the audio data in |audio_buffer| and sends it to the WebRTC layer
+ // Consumes the audio data in `audio_buffer` and sends it to the WebRTC layer
// in chunks of 10ms. The sum of the provided delay estimate in
- // |record_delay_ms| and the latest |playout_delay_ms| in GetPlayoutData()
+ // `record_delay_ms` and the latest `playout_delay_ms` in GetPlayoutData()
// are given to the AEC in the audio processing module.
// They can be fixed values on most platforms and they are ignored if an
// external (hardware/built-in) AEC is used.
@@ -72,11 +72,11 @@
// time of this object.
AudioDeviceBuffer* const audio_device_buffer_;
// Number of audio samples per channel per 10ms. Set once at construction
- // based on parameters in |audio_device_buffer|.
+ // based on parameters in `audio_device_buffer`.
const size_t playout_samples_per_channel_10ms_;
const size_t record_samples_per_channel_10ms_;
// Number of audio channels. Set once at construction based on parameters in
- // |audio_device_buffer|.
+ // `audio_device_buffer`.
const size_t playout_channels_;
const size_t record_channels_;
// Storage for output samples from which a consumer can read audio buffers
diff --git a/modules/audio_device/fine_audio_buffer_unittest.cc b/modules/audio_device/fine_audio_buffer_unittest.cc
index 2199067..36ea85f 100644
--- a/modules/audio_device/fine_audio_buffer_unittest.cc
+++ b/modules/audio_device/fine_audio_buffer_unittest.cc
@@ -36,7 +36,7 @@
// E.g. if there are two buffers of size 3, buffer 1 would contain 0,1,2 and
// buffer 2 would contain 3,4,5. Note that SCHAR_MAX is 127 so wrap-around
// will happen.
-// |buffer| is the audio buffer to verify.
+// `buffer` is the audio buffer to verify.
bool VerifyBuffer(const int16_t* buffer, int buffer_number, int size) {
int start_value = (buffer_number * size) % SCHAR_MAX;
for (int i = 0; i < size; ++i) {
@@ -51,9 +51,9 @@
// called (which is done implicitly when calling GetBufferData). It writes the
// sequence 0,1,..SCHAR_MAX-1,0,1,... to the buffer. Note that this is likely a
// buffer of different size than the one VerifyBuffer verifies.
-// |iteration| is the number of calls made to UpdateBuffer prior to this call.
-// |samples_per_10_ms| is the number of samples that should be written to the
-// buffer (|arg0|).
+// `iteration` is the number of calls made to UpdateBuffer prior to this call.
+// `samples_per_10_ms` is the number of samples that should be written to the
+// buffer (`arg0`).
ACTION_P2(UpdateBuffer, iteration, samples_per_10_ms) {
int16_t* buffer = static_cast<int16_t*>(arg0);
int start_value = (iteration * samples_per_10_ms) % SCHAR_MAX;
@@ -64,7 +64,7 @@
return samples_per_10_ms / kChannels;
}
-// Writes a periodic ramp pattern to the supplied |buffer|. See UpdateBuffer()
+// Writes a periodic ramp pattern to the supplied `buffer`. See UpdateBuffer()
// for details.
void UpdateInputBuffer(int16_t* buffer, int iteration, int size) {
int start_value = (iteration * size) % SCHAR_MAX;
@@ -74,7 +74,7 @@
}
// Action macro which verifies that the recorded 10ms chunk of audio data
-// (in |arg0|) contains the correct reference values even if they have been
+// (in `arg0`) contains the correct reference values even if they have been
// supplied using a buffer size that is smaller or larger than 10ms.
// See VerifyBuffer() for details.
ACTION_P2(VerifyInputBuffer, iteration, samples_per_10_ms) {
diff --git a/modules/audio_device/include/audio_device_factory.h b/modules/audio_device/include/audio_device_factory.h
index 9c19d61..edd7686 100644
--- a/modules/audio_device/include/audio_device_factory.h
+++ b/modules/audio_device/include/audio_device_factory.h
@@ -20,7 +20,7 @@
// Creates an AudioDeviceModule (ADM) for Windows based on the Core Audio API.
// The creating thread must be a COM thread; otherwise nullptr will be returned.
-// By default |automatic_restart| is set to true and it results in support for
+// By default `automatic_restart` is set to true and it results in support for
// automatic restart of audio if e.g. the existing device is removed. If set to
// false, no attempt to restart audio is performed under these conditions.
//
diff --git a/modules/audio_device/include/test_audio_device.cc b/modules/audio_device/include/test_audio_device.cc
index 8351e8a..d8ab22f 100644
--- a/modules/audio_device/include/test_audio_device.cc
+++ b/modules/audio_device/include/test_audio_device.cc
@@ -48,10 +48,10 @@
: public webrtc_impl::AudioDeviceModuleDefault<TestAudioDeviceModule> {
public:
// Creates a new TestAudioDeviceModule. When capturing or playing, 10 ms audio
- // frames will be processed every 10ms / |speed|.
- // |capturer| is an object that produces audio data. Can be nullptr if this
+ // frames will be processed every 10ms / `speed`.
+ // `capturer` is an object that produces audio data. Can be nullptr if this
// device is never used for recording.
- // |renderer| is an object that receives audio data that would have been
+ // `renderer` is an object that receives audio data that would have been
// played out. Can be nullptr if this device is never used for playing.
// Use one of the Create... functions to get these instances.
TestAudioDeviceModuleImpl(TaskQueueFactory* task_queue_factory,
@@ -142,13 +142,13 @@
}
// Blocks until the Renderer refuses to receive data.
- // Returns false if |timeout_ms| passes before that happens.
+ // Returns false if `timeout_ms` passes before that happens.
bool WaitForPlayoutEnd(int timeout_ms = rtc::Event::kForever) override {
return done_rendering_.Wait(timeout_ms);
}
// Blocks until the Recorder stops producing data.
- // Returns false if |timeout_ms| passes before that happens.
+ // Returns false if `timeout_ms` passes before that happens.
bool WaitForRecordingEnd(int timeout_ms = rtc::Event::kForever) override {
return done_capturing_.Wait(timeout_ms);
}
diff --git a/modules/audio_device/include/test_audio_device.h b/modules/audio_device/include/test_audio_device.h
index 48888a4..fd006a3 100644
--- a/modules/audio_device/include/test_audio_device.h
+++ b/modules/audio_device/include/test_audio_device.h
@@ -42,7 +42,7 @@
virtual int SamplingFrequency() const = 0;
// Returns the number of channels of captured audio data.
virtual int NumChannels() const = 0;
- // Replaces the contents of |buffer| with 10ms of captured audio data
+ // Replaces the contents of `buffer` with 10ms of captured audio data
// (see TestAudioDeviceModule::SamplesPerFrame). Returns true if the
// capturer can keep producing data, or false when the capture finishes.
virtual bool Capture(rtc::BufferT<int16_t>* buffer) = 0;
@@ -73,10 +73,10 @@
~TestAudioDeviceModule() override {}
// Creates a new TestAudioDeviceModule. When capturing or playing, 10 ms audio
- // frames will be processed every 10ms / |speed|.
- // |capturer| is an object that produces audio data. Can be nullptr if this
+ // frames will be processed every 10ms / `speed`.
+ // `capturer` is an object that produces audio data. Can be nullptr if this
// device is never used for recording.
- // |renderer| is an object that receives audio data that would have been
+ // `renderer` is an object that receives audio data that would have been
// played out. Can be nullptr if this device is never used for playing.
// Use one of the Create... functions to get these instances.
static rtc::scoped_refptr<TestAudioDeviceModule> Create(
@@ -85,9 +85,9 @@
std::unique_ptr<Renderer> renderer,
float speed = 1);
- // Returns a Capturer instance that generates a signal of |num_channels|
+ // Returns a Capturer instance that generates a signal of `num_channels`
// channels where every second frame is zero and every second frame is evenly
- // distributed random noise with max amplitude |max_amplitude|.
+ // distributed random noise with max amplitude `max_amplitude`.
static std::unique_ptr<PulsedNoiseCapturer> CreatePulsedNoiseCapturer(
int16_t max_amplitude,
int sampling_frequency_in_hz,
@@ -109,7 +109,7 @@
// Returns a Capturer instance that gets its data from a file.
// Automatically detects sample rate and num of channels.
- // |repeat| - if true, the file will be replayed from the start when we reach
+ // `repeat` - if true, the file will be replayed from the start when we reach
// the end of file.
static std::unique_ptr<Capturer> CreateWavFileReader(std::string filename,
bool repeat = false);
@@ -140,10 +140,10 @@
bool Recording() const override = 0;
// Blocks until the Renderer refuses to receive data.
- // Returns false if |timeout_ms| passes before that happens.
+ // Returns false if `timeout_ms` passes before that happens.
virtual bool WaitForPlayoutEnd(int timeout_ms = rtc::Event::kForever) = 0;
// Blocks until the Recorder stops producing data.
- // Returns false if |timeout_ms| passes before that happens.
+ // Returns false if `timeout_ms` passes before that happens.
virtual bool WaitForRecordingEnd(int timeout_ms = rtc::Event::kForever) = 0;
};
diff --git a/modules/audio_device/linux/audio_device_pulse_linux.cc b/modules/audio_device/linux/audio_device_pulse_linux.cc
index 7742420..4876c0f 100644
--- a/modules/audio_device/linux/audio_device_pulse_linux.cc
+++ b/modules/audio_device/linux/audio_device_pulse_linux.cc
@@ -1169,7 +1169,7 @@
_startPlay = true;
}
- // Both |_startPlay| and |_playing| needs protction since they are also
+ // Both `_startPlay` and `_playing` needs protction since they are also
// accessed on the playout thread.
// The audio thread will signal when playout has started.
diff --git a/modules/audio_device/mac/audio_device_mac.cc b/modules/audio_device/mac/audio_device_mac.cc
index 2088b01..e0d4419 100644
--- a/modules/audio_device/mac/audio_device_mac.cc
+++ b/modules/audio_device/mac/audio_device_mac.cc
@@ -1365,7 +1365,7 @@
} else {
// We signal a stop for a shared device even when rendering has
// not yet ended. This is to ensure the IOProc will return early as
- // intended (by checking |_recording|) before accessing
+ // intended (by checking `_recording`) before accessing
// resources we free below (e.g. the capture converter).
//
// In the case of a shared devcie, the IOProc will verify
@@ -1476,7 +1476,7 @@
if (_playing && renderDeviceIsAlive == 1) {
// We signal a stop for a shared device even when capturing has not
// yet ended. This is to ensure the IOProc will return early as
- // intended (by checking |_playing|) before accessing resources we
+ // intended (by checking `_playing`) before accessing resources we
// free below (e.g. the render converter).
//
// In the case of a shared device, the IOProc will verify capturing
diff --git a/modules/audio_device/win/audio_device_core_win.cc b/modules/audio_device/win/audio_device_core_win.cc
index 8bfa0ea..41ed8fc 100644
--- a/modules/audio_device/win/audio_device_core_win.cc
+++ b/modules/audio_device/win/audio_device_core_win.cc
@@ -3000,8 +3000,8 @@
dmoBuffer.pBuffer->AddRef();
// Poll the DMO for AEC processed capture data. The DMO will
- // copy available data to |dmoBuffer|, and should only return
- // 10 ms frames. The value of |dwStatus| should be ignored.
+ // copy available data to `dmoBuffer`, and should only return
+ // 10 ms frames. The value of `dwStatus` should be ignored.
hr = _dmo->ProcessOutput(0, 1, &dmoBuffer, &dwStatus);
SAFE_RELEASE(dmoBuffer.pBuffer);
dwStatus = dmoBuffer.dwStatus;
diff --git a/modules/audio_device/win/audio_device_module_win.cc b/modules/audio_device/win/audio_device_module_win.cc
index 8cc4b7f..ad26953 100644
--- a/modules/audio_device/win/audio_device_module_win.cc
+++ b/modules/audio_device/win/audio_device_module_win.cc
@@ -499,7 +499,7 @@
// The AudioDeviceBuffer (ADB) instance is needed for sending/receiving audio
// to/from the WebRTC layer. Created and owned by this object. Used by
- // both |input_| and |output_| but they use orthogonal parts of the ADB.
+ // both `input_` and `output_` but they use orthogonal parts of the ADB.
std::unique_ptr<AudioDeviceBuffer> audio_device_buffer_;
// Set to true after a successful call to Init(). Cleared by Terminate().
diff --git a/modules/audio_device/win/core_audio_base_win.cc b/modules/audio_device/win/core_audio_base_win.cc
index 7d93fcb1..12c5146 100644
--- a/modules/audio_device/win/core_audio_base_win.cc
+++ b/modules/audio_device/win/core_audio_base_win.cc
@@ -35,7 +35,7 @@
// TODO(henrika): more research is needed before we can enable low-latency.
const bool kEnableLowLatencyIfSupported = false;
-// Each unit of reference time is 100 nanoseconds, hence |kReftimesPerSec|
+// Each unit of reference time is 100 nanoseconds, hence `kReftimesPerSec`
// corresponds to one second.
// TODO(henrika): possibly add usage in Init().
// const REFERENCE_TIME kReferenceTimesPerSecond = 10000000;
@@ -230,9 +230,9 @@
}
bool CoreAudioBase::IsDefaultDeviceId(const std::string& device_id) const {
- // Returns true if |device_id| corresponds to the id of the default
+ // Returns true if `device_id` corresponds to the id of the default
// device. Note that, if only one device is available (or if the user has not
- // explicitly set a default device), |device_id| will also math
+ // explicitly set a default device), `device_id` will also math
// IsDefaultCommunicationsDeviceId().
return (IsInput() &&
(device_id == core_audio_utility::GetDefaultInputDeviceID())) ||
@@ -242,9 +242,9 @@
bool CoreAudioBase::IsDefaultCommunicationsDeviceId(
const std::string& device_id) const {
- // Returns true if |device_id| corresponds to the id of the default
+ // Returns true if `device_id` corresponds to the id of the default
// communication device. Note that, if only one device is available (or if
- // the user has not explicitly set a communication device), |device_id| will
+ // the user has not explicitly set a communication device), `device_id` will
// also math IsDefaultDeviceId().
return (IsInput() &&
(device_id ==
@@ -341,9 +341,9 @@
RTC_DCHECK(!audio_client_);
RTC_DCHECK(!audio_session_control_.Get());
- // Use an existing combination of |device_index_| and |device_id_| to set
+ // Use an existing combination of `device_index_` and `device_id_` to set
// parameters which are required to create an audio client. It is up to the
- // parent class to set |device_index_| and |device_id_|.
+ // parent class to set `device_index_` and `device_id_`.
std::string device_id = AudioDeviceName::kDefaultDeviceId;
ERole role = ERole();
if (IsDefaultDevice(device_index_)) {
@@ -400,7 +400,7 @@
return false;
}
- // Define the output WAVEFORMATEXTENSIBLE format in |format_|.
+ // Define the output WAVEFORMATEXTENSIBLE format in `format_`.
WAVEFORMATEX* format = &format_.Format;
format->wFormatTag = WAVE_FORMAT_EXTENSIBLE;
// Check the preferred channel configuration and request implicit channel
@@ -475,7 +475,7 @@
// Initialize the audio stream between the client and the device in shared
// mode using event-driven buffer handling. Also, using 0 as requested
// buffer size results in a default (minimum) endpoint buffer size.
- // TODO(henrika): possibly increase |requested_buffer_size| to add
+ // TODO(henrika): possibly increase `requested_buffer_size` to add
// robustness.
const REFERENCE_TIME requested_buffer_size = 0;
if (FAILED(core_audio_utility::SharedModeInitialize(
@@ -905,15 +905,15 @@
wait_array, false, INFINITE);
switch (wait_result) {
case WAIT_OBJECT_0 + 0:
- // |stop_event_| has been set.
+ // `stop_event_` has been set.
streaming = false;
break;
case WAIT_OBJECT_0 + 1:
- // |restart_event_| has been set.
+ // `restart_event_` has been set.
error = !HandleRestartEvent();
break;
case WAIT_OBJECT_0 + 2:
- // |audio_samples_event_| has been set.
+ // `audio_samples_event_` has been set.
error = !on_data_callback_(device_frequency);
break;
default:
diff --git a/modules/audio_device/win/core_audio_base_win.h b/modules/audio_device/win/core_audio_base_win.h
index afcc6a6..a9a769e 100644
--- a/modules/audio_device/win/core_audio_base_win.h
+++ b/modules/audio_device/win/core_audio_base_win.h
@@ -63,7 +63,7 @@
// Callback definition for notifications of run-time error messages. It can
// be called e.g. when an active audio device is removed and an audio stream
- // is disconnected (|error| is then set to kStreamDisconnected). Both input
+ // is disconnected (`error` is then set to kStreamDisconnected). Both input
// and output clients implements OnErrorCallback() and will trigger an
// internal restart sequence for kStreamDisconnected.
// This method is currently always called on the audio thread.
@@ -103,13 +103,13 @@
// Releases all allocated COM resources in the base class.
void ReleaseCOMObjects();
- // Returns number of active devices given the specified |direction_| set
+ // Returns number of active devices given the specified `direction_` set
// by the parent (input or output).
int NumberOfActiveDevices() const;
// Returns total number of enumerated audio devices which is the sum of all
// active devices plus two extra (one default and one default
- // communications). The value in |direction_| determines if capture or
+ // communications). The value in `direction_` determines if capture or
// render devices are counted.
int NumberOfEnumeratedDevices() const;
diff --git a/modules/audio_device/win/core_audio_input_win.cc b/modules/audio_device/win/core_audio_input_win.cc
index 8ea7426..be4aec8 100644
--- a/modules/audio_device/win/core_audio_input_win.cc
+++ b/modules/audio_device/win/core_audio_input_win.cc
@@ -105,17 +105,17 @@
RTC_DCHECK(!audio_capture_client_);
// Creates an IAudioClient instance and stores the valid interface pointer in
- // |audio_client3_|, |audio_client2_|, or |audio_client_| depending on
+ // `audio_client3_`, `audio_client2_`, or `audio_client_` depending on
// platform support. The base class will use optimal input parameters and do
// an event driven shared mode initialization. The utilized format will be
- // stored in |format_| and can be used for configuration and allocation of
+ // stored in `format_` and can be used for configuration and allocation of
// audio buffers.
if (!CoreAudioBase::Init()) {
return -1;
}
RTC_DCHECK(audio_client_);
- // Configure the recording side of the audio device buffer using |format_|
+ // Configure the recording side of the audio device buffer using `format_`
// after a trivial sanity check of the format structure.
RTC_DCHECK(audio_device_buffer_);
WAVEFORMATEX* format = &format_.Format;
@@ -353,7 +353,7 @@
format_.Format.nBlockAlign * num_frames_to_read);
RTC_DLOG(LS_WARNING) << "Captured audio is replaced by silence";
} else {
- // Copy recorded audio in |audio_data| to the WebRTC sink using the
+ // Copy recorded audio in `audio_data` to the WebRTC sink using the
// FineAudioBuffer object.
fine_audio_buffer_->DeliverRecordedData(
rtc::MakeArrayView(reinterpret_cast<const int16_t*>(audio_data),
@@ -397,13 +397,13 @@
if (!qpc_to_100ns_) {
return absl::nullopt;
}
- // Input parameter |capture_time_100ns| contains the performance counter at
+ // Input parameter `capture_time_100ns` contains the performance counter at
// the time that the audio endpoint device recorded the device position of
// the first audio frame in the data packet converted into 100ns units.
// We derive a delay estimate by:
// - sampling the current performance counter (qpc_now_raw),
// - converting it into 100ns time units (now_time_100ns), and
- // - subtracting |capture_time_100ns| from now_time_100ns.
+ // - subtracting `capture_time_100ns` from now_time_100ns.
LARGE_INTEGER perf_counter_now = {};
if (!::QueryPerformanceCounter(&perf_counter_now)) {
return absl::nullopt;
diff --git a/modules/audio_device/win/core_audio_output_win.cc b/modules/audio_device/win/core_audio_output_win.cc
index 36ec703..bd4132a 100644
--- a/modules/audio_device/win/core_audio_output_win.cc
+++ b/modules/audio_device/win/core_audio_output_win.cc
@@ -102,17 +102,17 @@
RTC_DCHECK(!audio_render_client_);
// Creates an IAudioClient instance and stores the valid interface pointer in
- // |audio_client3_|, |audio_client2_|, or |audio_client_| depending on
+ // `audio_client3_`, `audio_client2_`, or `audio_client_` depending on
// platform support. The base class will use optimal output parameters and do
// an event driven shared mode initialization. The utilized format will be
- // stored in |format_| and can be used for configuration and allocation of
+ // stored in `format_` and can be used for configuration and allocation of
// audio buffers.
if (!CoreAudioBase::Init()) {
return -1;
}
RTC_DCHECK(audio_client_);
- // Configure the playout side of the audio device buffer using |format_|
+ // Configure the playout side of the audio device buffer using `format_`
// after a trivial sanity check of the format structure.
RTC_DCHECK(audio_device_buffer_);
WAVEFORMATEX* format = &format_.Format;
@@ -334,7 +334,7 @@
}
// Get audio data from WebRTC and write it to the allocated buffer in
- // |audio_data|. The playout latency is not updated for each callback.
+ // `audio_data`. The playout latency is not updated for each callback.
fine_audio_buffer_->GetPlayoutData(
rtc::MakeArrayView(reinterpret_cast<int16_t*>(audio_data),
num_requested_frames * format_.Format.nChannels),
@@ -360,7 +360,7 @@
UINT64 position = 0;
UINT64 qpc_position = 0;
int delay_ms = 0;
- // Get the device position through output parameter |position|. This is the
+ // Get the device position through output parameter `position`. This is the
// stream position of the sample that is currently playing through the
// speakers.
_com_error error = audio_clock_->GetPosition(&position, &qpc_position);
diff --git a/modules/audio_device/win/core_audio_utility_win.cc b/modules/audio_device/win/core_audio_utility_win.cc
index 289abe9..5950c8d 100644
--- a/modules/audio_device/win/core_audio_utility_win.cc
+++ b/modules/audio_device/win/core_audio_utility_win.cc
@@ -38,9 +38,9 @@
// Converts from channel mask to list of included channels.
// Each audio data format contains channels for one or more of the positions
// listed below. The number of channels simply equals the number of nonzero
-// flag bits in the |channel_mask|. The relative positions of the channels
+// flag bits in the `channel_mask`. The relative positions of the channels
// within each block of audio data always follow the same relative ordering
-// as the flag bits in the table below. For example, if |channel_mask| contains
+// as the flag bits in the table below. For example, if `channel_mask` contains
// the value 0x00000033, the format defines four audio channels that are
// assigned for playback to the front-left, front-right, back-left,
// and back-right speakers, respectively. The channel data should be interleaved
@@ -278,8 +278,8 @@
return SUCCEEDED(device->GetState(&state)) && (state & DEVICE_STATE_ACTIVE);
}
-// Retrieve an audio device specified by |device_id| or a default device
-// specified by data-flow direction and role if |device_id| is default.
+// Retrieve an audio device specified by `device_id` or a default device
+// specified by data-flow direction and role if `device_id` is default.
ComPtr<IMMDevice> CreateDeviceInternal(const std::string& device_id,
EDataFlow data_flow,
ERole role) {
@@ -500,7 +500,7 @@
}
// Loop over all active devices and add friendly name and unique id to the
- // |device_names| queue. For now, devices are added at indexes 0, 1, ..., N-1
+ // `device_names` queue. For now, devices are added at indexes 0, 1, ..., N-1
// but they will be moved to 2,3,..., N+1 at the next stage when default and
// default communication devices are added at index 0 and 1.
ComPtr<IMMDevice> audio_device;
@@ -611,7 +611,7 @@
return hr;
int sample_rate = mix_format.Format.nSamplesPerSec;
- // Override default sample rate if |fixed_sample_rate| is set and different
+ // Override default sample rate if `fixed_sample_rate` is set and different
// from the default rate.
if (fixed_sample_rate > 0 && fixed_sample_rate != sample_rate) {
RTC_DLOG(INFO) << "Using fixed sample rate instead of the preferred: "
@@ -909,7 +909,7 @@
props.eCategory = AudioCategory_Communications;
// Hardware-offloaded audio processing allows the main audio processing tasks
// to be performed outside the computer's main CPU. Check support and log the
- // result but hard-code |bIsOffload| to FALSE for now.
+ // result but hard-code `bIsOffload` to FALSE for now.
// TODO(henrika): evaluate hardware-offloading. Might complicate usage of
// IAudioClient::GetMixFormat().
BOOL supports_offload = FALSE;
@@ -989,7 +989,7 @@
// The GetMixFormat method retrieves the stream format that the audio engine
// uses for its internal processing of shared-mode streams. The method
// allocates the storage for the structure and this memory will be released
- // when |mix_format| goes out of scope. The GetMixFormat method retrieves a
+ // when `mix_format` goes out of scope. The GetMixFormat method retrieves a
// format descriptor that is in the form of a WAVEFORMATEXTENSIBLE structure
// instead of a standalone WAVEFORMATEX structure. The method outputs a
// pointer to the WAVEFORMATEX structure that is embedded at the start of
@@ -1017,7 +1017,7 @@
return AUDCLNT_E_UNSUPPORTED_FORMAT;
}
- // Log a warning for the rare case where |mix_format| only contains a
+ // Log a warning for the rare case where `mix_format` only contains a
// stand-alone WAVEFORMATEX structure but don't return.
if (!wrapped_format.IsExtensible()) {
RTC_DLOG(WARNING)
@@ -1079,8 +1079,8 @@
REFERENCE_TIME* device_period) {
RTC_DLOG(INFO) << "GetDevicePeriod";
RTC_DCHECK(client);
- // The |default_period| parameter specifies the default scheduling period
- // for a shared-mode stream. The |minimum_period| parameter specifies the
+ // The `default_period` parameter specifies the default scheduling period
+ // for a shared-mode stream. The `minimum_period` parameter specifies the
// minimum scheduling period for an exclusive-mode stream.
// The time is expressed in 100-nanosecond units.
REFERENCE_TIME default_period = 0;
@@ -1203,8 +1203,8 @@
}
RTC_DLOG(INFO) << "stream_flags: 0x" << rtc::ToHex(stream_flags);
- // Initialize the shared mode client for minimal delay if |buffer_duration|
- // is 0 or possibly a higher delay (more robust) if |buffer_duration| is
+ // Initialize the shared mode client for minimal delay if `buffer_duration`
+ // is 0 or possibly a higher delay (more robust) if `buffer_duration` is
// larger than 0. The actual size is given by IAudioClient::GetBufferSize().
_com_error error = client->Initialize(
AUDCLNT_SHAREMODE_SHARED, stream_flags, buffer_duration, 0,
@@ -1294,7 +1294,7 @@
// Initialize the shared mode client for lowest possible latency.
// It is assumed that GetSharedModeEnginePeriod() has been used to query the
- // smallest possible engine period and that it is given by |period_in_frames|.
+ // smallest possible engine period and that it is given by `period_in_frames`.
_com_error error = client->InitializeSharedAudioStream(
stream_flags, period_in_frames,
reinterpret_cast<const WAVEFORMATEX*>(format), nullptr);
diff --git a/modules/audio_device/win/core_audio_utility_win.h b/modules/audio_device/win/core_audio_utility_win.h
index 79203dc..95ed911 100644
--- a/modules/audio_device/win/core_audio_utility_win.h
+++ b/modules/audio_device/win/core_audio_utility_win.h
@@ -34,7 +34,7 @@
namespace webrtc_win {
// Utility class which registers a thread with MMCSS in the constructor and
-// deregisters MMCSS in the destructor. The task name is given by |task_name|.
+// deregisters MMCSS in the destructor. The task name is given by `task_name`.
// The Multimedia Class Scheduler service (MMCSS) enables multimedia
// applications to ensure that their time-sensitive processing receives
// prioritized access to CPU resources without denying CPU resources to
@@ -84,7 +84,7 @@
explicit ScopedMMCSSRegistration(const wchar_t* task_name) {
RTC_DLOG(INFO) << "ScopedMMCSSRegistration: " << rtc::ToUtf8(task_name);
- // Register the calling thread with MMCSS for the supplied |task_name|.
+ // Register the calling thread with MMCSS for the supplied `task_name`.
DWORD mmcss_task_index = 0;
mmcss_handle_ = AvSetMmThreadCharacteristicsW(task_name, &mmcss_task_index);
if (mmcss_handle_ == nullptr) {
@@ -304,7 +304,7 @@
// Header file Mmdeviceapi.h defines the interfaces in the MMDevice API.
// Number of active audio devices in the specified data flow direction.
-// Set |data_flow| to eAll to retrieve the total number of active audio
+// Set `data_flow` to eAll to retrieve the total number of active audio
// devices.
int NumberOfActiveDevices(EDataFlow data_flow);
@@ -327,7 +327,7 @@
std::string GetCommunicationsOutputDeviceID();
// Creates an IMMDevice interface corresponding to the unique device id in
-// |device_id|, or by data-flow direction and role if |device_id| is set to
+// `device_id`, or by data-flow direction and role if `device_id` is set to
// AudioDeviceName::kDefaultDeviceId.
Microsoft::WRL::ComPtr<IMMDevice> CreateDevice(const std::string& device_id,
EDataFlow data_flow,
@@ -339,8 +339,8 @@
webrtc::AudioDeviceName GetDeviceName(IMMDevice* device);
// Gets the user-friendly name of the endpoint device which is represented
-// by a unique id in |device_id|, or by data-flow direction and role if
-// |device_id| is set to AudioDeviceName::kDefaultDeviceId.
+// by a unique id in `device_id`, or by data-flow direction and role if
+// `device_id` is set to AudioDeviceName::kDefaultDeviceId.
std::string GetFriendlyName(const std::string& device_id,
EDataFlow data_flow,
ERole role);
@@ -349,11 +349,11 @@
EDataFlow GetDataFlow(IMMDevice* device);
// Enumerates all input devices and adds the names (friendly name and unique
-// device id) to the list in |device_names|.
+// device id) to the list in `device_names`.
bool GetInputDeviceNames(webrtc::AudioDeviceNames* device_names);
// Enumerates all output devices and adds the names (friendly name and unique
-// device id) to the list in |device_names|.
+// device id) to the list in `device_names`.
bool GetOutputDeviceNames(webrtc::AudioDeviceNames* device_names);
// The Windows Audio Session API (WASAPI) enables client applications to
@@ -361,18 +361,18 @@
// device. Header files Audioclient.h and Audiopolicy.h define the WASAPI
// interfaces.
-// Creates an IAudioSessionManager2 interface for the specified |device|.
+// Creates an IAudioSessionManager2 interface for the specified `device`.
// This interface provides access to e.g. the IAudioSessionEnumerator
Microsoft::WRL::ComPtr<IAudioSessionManager2> CreateSessionManager2(
IMMDevice* device);
-// Creates an IAudioSessionEnumerator interface for the specified |device|.
+// Creates an IAudioSessionEnumerator interface for the specified `device`.
// The client can use the interface to enumerate audio sessions on the audio
// device
Microsoft::WRL::ComPtr<IAudioSessionEnumerator> CreateSessionEnumerator(
IMMDevice* device);
-// Number of active audio sessions for the given |device|. Expired or inactive
+// Number of active audio sessions for the given `device`. Expired or inactive
// sessions are not included.
int NumberOfActiveSessions(IMMDevice* device);
@@ -387,15 +387,15 @@
CreateClient3(const std::string& device_id, EDataFlow data_flow, ERole role);
// Sets the AudioCategory_Communications category. Should be called before
-// GetSharedModeMixFormat() and IsFormatSupported(). The |client| argument must
+// GetSharedModeMixFormat() and IsFormatSupported(). The `client` argument must
// be an IAudioClient2 or IAudioClient3 interface pointer, hence only supported
// on Windows 8 and above.
// TODO(henrika): evaluate effect (if any).
HRESULT SetClientProperties(IAudioClient2* client);
// Returns the buffer size limits of the hardware audio engine in
-// 100-nanosecond units given a specified |format|. Does not require prior
-// audio stream initialization. The |client| argument must be an IAudioClient2
+// 100-nanosecond units given a specified `format`. Does not require prior
+// audio stream initialization. The `client` argument must be an IAudioClient2
// or IAudioClient3 interface pointer, hence only supported on Windows 8 and
// above.
// TODO(henrika): always fails with AUDCLNT_E_OFFLOAD_MODE_ONLY.
@@ -412,29 +412,29 @@
HRESULT GetSharedModeMixFormat(IAudioClient* client,
WAVEFORMATEXTENSIBLE* format);
-// Returns true if the specified |client| supports the format in |format|
-// for the given |share_mode| (shared or exclusive). The client can call this
+// Returns true if the specified `client` supports the format in `format`
+// for the given `share_mode` (shared or exclusive). The client can call this
// method before calling IAudioClient::Initialize.
bool IsFormatSupported(IAudioClient* client,
AUDCLNT_SHAREMODE share_mode,
const WAVEFORMATEXTENSIBLE* format);
// For a shared-mode stream, the audio engine periodically processes the
-// data in the endpoint buffer at the period obtained in |device_period|.
-// For an exclusive mode stream, |device_period| corresponds to the minimum
+// data in the endpoint buffer at the period obtained in `device_period`.
+// For an exclusive mode stream, `device_period` corresponds to the minimum
// time interval between successive processing by the endpoint device.
// This period plus the stream latency between the buffer and endpoint device
// represents the minimum possible latency that an audio application can
-// achieve. The time in |device_period| is expressed in 100-nanosecond units.
+// achieve. The time in `device_period` is expressed in 100-nanosecond units.
HRESULT GetDevicePeriod(IAudioClient* client,
AUDCLNT_SHAREMODE share_mode,
REFERENCE_TIME* device_period);
// Returns the range of periodicities supported by the engine for the specified
-// stream |format|. The periodicity of the engine is the rate at which the
+// stream `format`. The periodicity of the engine is the rate at which the
// engine wakes an event-driven audio client to transfer audio data to or from
// the engine. Can be used for low-latency support on some devices.
-// The |client| argument must be an IAudioClient3 interface pointer, hence only
+// The `client` argument must be an IAudioClient3 interface pointer, hence only
// supported on Windows 10 and above.
HRESULT GetSharedModeEnginePeriod(IAudioClient3* client3,
const WAVEFORMATEXTENSIBLE* format,
@@ -443,14 +443,14 @@
uint32_t* min_period_in_frames,
uint32_t* max_period_in_frames);
-// Get the preferred audio parameters for the given |client| corresponding to
+// Get the preferred audio parameters for the given `client` corresponding to
// the stream format that the audio engine uses for its internal processing of
// shared-mode streams. The acquired values should only be utilized for shared
// mode streamed since there are no preferred settings for an exclusive mode
// stream.
HRESULT GetPreferredAudioParameters(IAudioClient* client,
webrtc::AudioParameters* params);
-// As above but override the preferred sample rate and use |sample_rate|
+// As above but override the preferred sample rate and use `sample_rate`
// instead. Intended mainly for testing purposes and in combination with rate
// conversion.
HRESULT GetPreferredAudioParameters(IAudioClient* client,
@@ -461,20 +461,20 @@
// the client must initialize it once, and only once, to initialize the audio
// stream between the client and the device. In shared mode, the client
// connects indirectly through the audio engine which does the mixing.
-// If a valid event is provided in |event_handle|, the client will be
-// initialized for event-driven buffer handling. If |event_handle| is set to
+// If a valid event is provided in `event_handle`, the client will be
+// initialized for event-driven buffer handling. If `event_handle` is set to
// nullptr, event-driven buffer handling is not utilized. To achieve the
// minimum stream latency between the client application and audio endpoint
-// device, set |buffer_duration| to 0. A client has the option of requesting a
+// device, set `buffer_duration` to 0. A client has the option of requesting a
// buffer size that is larger than what is strictly necessary to make timing
// glitches rare or nonexistent. Increasing the buffer size does not necessarily
// increase the stream latency. Each unit of reference time is 100 nanoseconds.
-// The |auto_convert_pcm| parameter can be used for testing purposes to ensure
+// The `auto_convert_pcm` parameter can be used for testing purposes to ensure
// that the sample rate of the client side does not have to match the audio
-// engine mix format. If |auto_convert_pcm| is set to true, a rate converter
-// will be inserted to convert between the sample rate in |format| and the
+// engine mix format. If `auto_convert_pcm` is set to true, a rate converter
+// will be inserted to convert between the sample rate in `format` and the
// preferred rate given by GetPreferredAudioParameters().
-// The output parameter |endpoint_buffer_size| contains the size of the
+// The output parameter `endpoint_buffer_size` contains the size of the
// endpoint buffer and it is expressed as the number of audio frames the
// buffer can hold.
HRESULT SharedModeInitialize(IAudioClient* client,
@@ -486,7 +486,7 @@
// Works as SharedModeInitialize() but adds support for using smaller engine
// periods than the default period.
-// The |client| argument must be an IAudioClient3 interface pointer, hence only
+// The `client` argument must be an IAudioClient3 interface pointer, hence only
// supported on Windows 10 and above.
// TODO(henrika): can probably be merged into SharedModeInitialize() to avoid
// duplicating code. Keeping as separate method for now until decided if we
@@ -499,43 +499,43 @@
uint32_t* endpoint_buffer_size);
// Creates an IAudioRenderClient client for an existing IAudioClient given by
-// |client|. The IAudioRenderClient interface enables a client to write
+// `client`. The IAudioRenderClient interface enables a client to write
// output data to a rendering endpoint buffer. The methods in this interface
// manage the movement of data packets that contain audio-rendering data.
Microsoft::WRL::ComPtr<IAudioRenderClient> CreateRenderClient(
IAudioClient* client);
// Creates an IAudioCaptureClient client for an existing IAudioClient given by
-// |client|. The IAudioCaptureClient interface enables a client to read
+// `client`. The IAudioCaptureClient interface enables a client to read
// input data from a capture endpoint buffer. The methods in this interface
// manage the movement of data packets that contain capture data.
Microsoft::WRL::ComPtr<IAudioCaptureClient> CreateCaptureClient(
IAudioClient* client);
// Creates an IAudioClock interface for an existing IAudioClient given by
-// |client|. The IAudioClock interface enables a client to monitor a stream's
+// `client`. The IAudioClock interface enables a client to monitor a stream's
// data rate and the current position in the stream.
Microsoft::WRL::ComPtr<IAudioClock> CreateAudioClock(IAudioClient* client);
// Creates an AudioSessionControl interface for an existing IAudioClient given
-// by |client|. The IAudioControl interface enables a client to configure the
+// by `client`. The IAudioControl interface enables a client to configure the
// control parameters for an audio session and to monitor events in the session.
Microsoft::WRL::ComPtr<IAudioSessionControl> CreateAudioSessionControl(
IAudioClient* client);
// Creates an ISimpleAudioVolume interface for an existing IAudioClient given by
-// |client|. This interface enables a client to control the master volume level
+// `client`. This interface enables a client to control the master volume level
// of an active audio session.
Microsoft::WRL::ComPtr<ISimpleAudioVolume> CreateSimpleAudioVolume(
IAudioClient* client);
// Fills up the endpoint rendering buffer with silence for an existing
-// IAudioClient given by |client| and a corresponding IAudioRenderClient
-// given by |render_client|.
+// IAudioClient given by `client` and a corresponding IAudioRenderClient
+// given by `render_client`.
bool FillRenderEndpointBufferWithSilence(IAudioClient* client,
IAudioRenderClient* render_client);
-// Prints/logs all fields of the format structure in |format|.
+// Prints/logs all fields of the format structure in `format`.
// Also supports extended versions (WAVEFORMATEXTENSIBLE).
std::string WaveFormatToString(const WaveFormatWrapper format);
@@ -543,8 +543,8 @@
// generic webrtc::TimeDelta which then can be converted to any time unit.
webrtc::TimeDelta ReferenceTimeToTimeDelta(REFERENCE_TIME time);
-// Converts size expressed in number of audio frames, |num_frames|, into
-// milliseconds given a specified |sample_rate|.
+// Converts size expressed in number of audio frames, `num_frames`, into
+// milliseconds given a specified `sample_rate`.
double FramesToMilliseconds(uint32_t num_frames, uint16_t sample_rate);
// Converts a COM error into a human-readable string.
diff --git a/modules/audio_device/win/core_audio_utility_win_unittest.cc b/modules/audio_device/win/core_audio_utility_win_unittest.cc
index 9f1ce5e..277f54e 100644
--- a/modules/audio_device/win/core_audio_utility_win_unittest.cc
+++ b/modules/audio_device/win/core_audio_utility_win_unittest.cc
@@ -107,7 +107,7 @@
TEST_F(CoreAudioUtilityWinTest, WaveFormatWrapperExtended) {
// Use default constructor for WAVEFORMATEXTENSIBLE and verify that it
- // results in same size as for WAVEFORMATEX even if the size of |format_ex|
+ // results in same size as for WAVEFORMATEX even if the size of `format_ex`
// equals the size of WAVEFORMATEXTENSIBLE.
WAVEFORMATEXTENSIBLE format_ex = {};
core_audio_utility::WaveFormatWrapper wave_format_ex(&format_ex);
@@ -319,7 +319,7 @@
EDataFlow data_flow[] = {eRender, eCapture};
// Obtain reference to an IAudioSessionManager2 interface for a default audio
- // endpoint device specified by two different data flows and the |eConsole|
+ // endpoint device specified by two different data flows and the `eConsole`
// role.
for (size_t i = 0; i < arraysize(data_flow); ++i) {
ComPtr<IMMDevice> device(core_audio_utility::CreateDevice(
@@ -339,7 +339,7 @@
// Obtain reference to an IAudioSessionEnumerator interface for a default
// audio endpoint device specified by two different data flows and the
- // |eConsole| role.
+ // `eConsole` role.
for (size_t i = 0; i < arraysize(data_flow); ++i) {
ComPtr<IMMDevice> device(core_audio_utility::CreateDevice(
AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole));
@@ -364,7 +364,7 @@
EDataFlow data_flow[] = {eRender, eCapture};
// Count number of active audio session for a default audio endpoint device
- // specified by two different data flows and the |eConsole| role.
+ // specified by two different data flows and the `eConsole` role.
// Ensure that the number of active audio sessions is less than or equal to
// the total number of audio sessions on that same device.
for (size_t i = 0; i < arraysize(data_flow); ++i) {
@@ -394,7 +394,7 @@
EDataFlow data_flow[] = {eRender, eCapture};
// Obtain reference to an IAudioClient interface for a default audio endpoint
- // device specified by two different data flows and the |eConsole| role.
+ // device specified by two different data flows and the `eConsole` role.
for (size_t i = 0; i < arraysize(data_flow); ++i) {
ComPtr<IAudioClient> client = core_audio_utility::CreateClient(
AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole);
@@ -409,7 +409,7 @@
EDataFlow data_flow[] = {eRender, eCapture};
// Obtain reference to an IAudioClient2 interface for a default audio endpoint
- // device specified by two different data flows and the |eConsole| role.
+ // device specified by two different data flows and the `eConsole` role.
for (size_t i = 0; i < arraysize(data_flow); ++i) {
ComPtr<IAudioClient2> client2 = core_audio_utility::CreateClient2(
AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole);
@@ -424,7 +424,7 @@
EDataFlow data_flow[] = {eRender, eCapture};
// Obtain reference to an IAudioClient3 interface for a default audio endpoint
- // device specified by two different data flows and the |eConsole| role.
+ // device specified by two different data flows and the `eConsole` role.
for (size_t i = 0; i < arraysize(data_flow); ++i) {
ComPtr<IAudioClient3> client3 = core_audio_utility::CreateClient3(
AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole);