Start using ArrayView in AudioFrame, update PushResampler
Start introducing ArrayView to AudioFrame and code that flows down
from there. In this first step:
* Add `data_view()` that returns a read-only ArrayView for the
audio buffer. When AudioFrame is not initialized however, data_view()
will return a nullptr whereas the current data() method never returns
nullptr.
* Add `mutable_data()` that requires two arguments for properly setting
the samples per channel and number of channels that's required for
accurately reserving the returned mutable ArrayView.
A notable behavior change is that if the requested number of channels
is larger than supported or the calculated buffer size is too large,
the function will trigger a check.
* Add TODOs for following work.
Bug: chromium:335805780
Change-Id: I2937de800422589ebe6a3840b3caadf3d9ff8b00
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/347982
Reviewed-by: Per Ã…hgren <peah@webrtc.org>
Commit-Queue: Tomas Gunnarsson <tommi@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#42202}
diff --git a/api/audio/BUILD.gn b/api/audio/BUILD.gn
index de654c7..749a0d9 100644
--- a/api/audio/BUILD.gn
+++ b/api/audio/BUILD.gn
@@ -34,6 +34,7 @@
]
deps = [
+ "..:array_view",
"..:rtp_packet_info",
"../../rtc_base:checks",
"../../rtc_base:logging",
diff --git a/api/audio/audio_frame.cc b/api/audio/audio_frame.cc
index 4ddaaf6..375e1b5 100644
--- a/api/audio/audio_frame.cc
+++ b/api/audio/audio_frame.cc
@@ -22,6 +22,20 @@
static_assert(sizeof(data_) == kMaxDataSizeBytes, "kMaxDataSizeBytes");
}
+AudioFrame::AudioFrame(int sample_rate_hz,
+ size_t num_channels,
+ ChannelLayout layout /*= CHANNEL_LAYOUT_UNSUPPORTED*/)
+ : samples_per_channel_(SampleRateToDefaultChannelSize(sample_rate_hz)),
+ sample_rate_hz_(sample_rate_hz),
+ num_channels_(num_channels),
+ channel_layout_(layout == CHANNEL_LAYOUT_UNSUPPORTED
+ ? GuessChannelLayout(num_channels)
+ : layout) {
+ RTC_DCHECK_LE(num_channels_, kMaxConcurrentChannels);
+ RTC_DCHECK_GT(sample_rate_hz_, 0);
+ RTC_DCHECK_GT(samples_per_channel_, 0u);
+}
+
void AudioFrame::Reset() {
ResetWithoutMuting();
muted_ = true;
@@ -51,6 +65,7 @@
SpeechType speech_type,
VADActivity vad_activity,
size_t num_channels) {
+ RTC_CHECK_LE(num_channels, kMaxConcurrentChannels);
timestamp_ = timestamp;
samples_per_channel_ = samples_per_channel;
sample_rate_hz_ = sample_rate_hz;
@@ -110,12 +125,26 @@
}
const int16_t* AudioFrame::data() const {
- return muted_ ? empty_data() : data_;
+ return muted_ ? zeroed_data().begin() : data_;
}
-// TODO(henrik.lundin) Can we skip zeroing the buffer?
-// See https://bugs.chromium.org/p/webrtc/issues/detail?id=5647.
+rtc::ArrayView<const int16_t> AudioFrame::data_view() const {
+ const auto samples = samples_per_channel_ * num_channels_;
+ // If you get a nullptr from `data_view()`, it's likely because the
+ // samples_per_channel_ and/or num_channels_ haven't been properly set.
+ // Since `data_view()` returns an rtc::ArrayView<>, we inherit the behavior
+ // in ArrayView when the view size is 0 that ArrayView<>::data() will always
+ // return nullptr. So, even when an AudioFrame is muted and we want to
+ // return `zeroed_data()`, if samples_per_channel_ or num_channels_ is 0,
+ // the view will point to nullptr.
+ return muted_ ? zeroed_data().subview(0, samples)
+ : rtc::ArrayView<const int16_t>(&data_[0], samples);
+}
+
int16_t* AudioFrame::mutable_data() {
+ // TODO: bugs.webrtc.org/5647 - Can we skip zeroing the buffer?
+ // Consider instead if we should rather zero the buffer when `muted_` is set
+ // to `true`.
if (muted_) {
memset(data_, 0, kMaxDataSizeBytes);
muted_ = false;
@@ -123,6 +152,29 @@
return data_;
}
+rtc::ArrayView<int16_t> AudioFrame::mutable_data(size_t samples_per_channel,
+ size_t num_channels) {
+ const size_t total_samples = samples_per_channel * num_channels;
+ RTC_CHECK_LE(total_samples, kMaxDataSizeSamples);
+ RTC_CHECK_LE(num_channels, kMaxConcurrentChannels);
+ // Sanity check for valid argument values during development.
+ // If `samples_per_channel` is <= kMaxConcurrentChannels but larger than 0,
+ // then chances are the order of arguments is incorrect.
+ RTC_DCHECK((samples_per_channel == 0 && num_channels == 0) ||
+ samples_per_channel > kMaxConcurrentChannels);
+
+ // TODO: bugs.webrtc.org/5647 - Can we skip zeroing the buffer?
+ // Consider instead if we should rather zero the whole buffer when `muted_` is
+ // set to `true`.
+ if (muted_) {
+ memset(data_, 0, total_samples * sizeof(int16_t));
+ muted_ = false;
+ }
+ samples_per_channel_ = samples_per_channel;
+ num_channels_ = num_channels;
+ return rtc::ArrayView<int16_t>(&data_[0], total_samples);
+}
+
void AudioFrame::Mute() {
muted_ = true;
}
@@ -146,10 +198,20 @@
RTC_CHECK_LE(samples_per_channel_ * num_channels_, kMaxDataSizeSamples);
}
+void AudioFrame::SetSampleRateAndChannelSize(int sample_rate) {
+ sample_rate_hz_ = sample_rate;
+ // We could call `AudioProcessing::GetFrameSize()` here, but that requires
+ // adding a dependency on the ":audio_processing" build target, which can
+ // complicate the dependency tree. Some refactoring is probably in order to
+ // get some consistency around this since there are many places across the
+ // code that assume this default buffer size.
+ samples_per_channel_ = SampleRateToDefaultChannelSize(sample_rate_hz_);
+}
+
// static
-const int16_t* AudioFrame::empty_data() {
+rtc::ArrayView<const int16_t> AudioFrame::zeroed_data() {
static int16_t* null_data = new int16_t[kMaxDataSizeSamples]();
- return &null_data[0];
+ return rtc::ArrayView<const int16_t>(null_data, kMaxDataSizeSamples);
}
} // namespace webrtc
diff --git a/api/audio/audio_frame.h b/api/audio/audio_frame.h
index 81d1255..665127e 100644
--- a/api/audio/audio_frame.h
+++ b/api/audio/audio_frame.h
@@ -14,11 +14,30 @@
#include <stddef.h>
#include <stdint.h>
+#include "api/array_view.h"
#include "api/audio/channel_layout.h"
#include "api/rtp_packet_infos.h"
+#include "rtc_base/checks.h"
namespace webrtc {
+// Default webrtc buffer size in milliseconds.
+constexpr size_t kDefaultAudioBufferLengthMs = 10u;
+
+// Default total number of audio buffers per second based on the default length.
+constexpr size_t kDefaultAudioBuffersPerSec =
+ 1000u / kDefaultAudioBufferLengthMs;
+
+// Returns the number of samples a buffer needs to hold for ~10ms of a single
+// audio channel at a given sample rate.
+// See also `AudioProcessing::GetFrameSize()`.
+inline size_t SampleRateToDefaultChannelSize(size_t sample_rate) {
+ // Basic sanity check. 192kHz is the highest supported input sample rate.
+ RTC_DCHECK_LE(sample_rate, 192000);
+ return sample_rate / kDefaultAudioBuffersPerSec;
+}
+/////////////////////////////////////////////////////////////////////
+
/* This class holds up to 120 ms of super-wideband (32 kHz) stereo audio. It
* allows for adding and subtracting frames while keeping track of the resulting
* states.
@@ -57,6 +76,15 @@
AudioFrame();
+ // Construct an audio frame with frame length properties and channel
+ // information. `samples_per_channel()` will be initialized to a 10ms buffer
+ // size and if `layout` is not specified (default value of
+ // CHANNEL_LAYOUT_UNSUPPORTED is set), then the channel layout is derived
+ // (guessed) from `num_channels`.
+ AudioFrame(int sample_rate_hz,
+ size_t num_channels,
+ ChannelLayout layout = CHANNEL_LAYOUT_UNSUPPORTED);
+
AudioFrame(const AudioFrame&) = delete;
AudioFrame& operator=(const AudioFrame&) = delete;
@@ -68,6 +96,7 @@
// ResetWithoutMuting() to skip this wasteful zeroing.
void ResetWithoutMuting();
+ // TODO: b/335805780 - Accept ArrayView.
void UpdateFrame(uint32_t timestamp,
const int16_t* data,
size_t samples_per_channel,
@@ -90,11 +119,29 @@
int64_t ElapsedProfileTimeMs() const;
// data() returns a zeroed static buffer if the frame is muted.
- // mutable_frame() always returns a non-static buffer; the first call to
- // mutable_frame() zeros the non-static buffer and marks the frame unmuted.
+ // TODO: b/335805780 - Return ArrayView.
const int16_t* data() const;
+
+ // Returns a read-only view of all the valid samples held by the AudioFrame.
+ // Note that for a muted AudioFrame, the size of the returned view will be
+ // 0u and the contained data will be nullptr.
+ rtc::ArrayView<const int16_t> data_view() const;
+
+ // mutable_frame() always returns a non-static buffer; the first call to
+ // mutable_frame() zeros the buffer and marks the frame as unmuted.
+ // TODO: b/335805780 - Return ArrayView based on the current values for
+ // samples per channel and num channels.
int16_t* mutable_data();
+ // Grants write access to the audio buffer. The size of the returned writable
+ // view is determined by the `samples_per_channel` and `num_channels`
+ // dimensions which the function checks for correctness and stores in the
+ // internal member variables; `samples_per_channel()` and `num_channels()`
+ // respectively.
+ // If the state is currently muted, the returned view will be zeroed out.
+ rtc::ArrayView<int16_t> mutable_data(size_t samples_per_channel,
+ size_t num_channels);
+
// Prefer to mute frames using AudioFrameOperations::Mute.
void Mute();
// Frame is muted by default.
@@ -119,6 +166,10 @@
return absolute_capture_timestamp_ms_;
}
+ // Sets the sample_rate_hz and samples_per_channel properties based on a
+ // given sample rate and calculates a default 10ms samples_per_channel value.
+ void SetSampleRateAndChannelSize(int sample_rate);
+
// RTP timestamp of the first sample in the AudioFrame.
uint32_t timestamp_ = 0;
// Time since the first frame in milliseconds.
@@ -157,9 +208,9 @@
private:
// A permanently zeroed out buffer to represent muted frames. This is a
- // header-only class, so the only way to avoid creating a separate empty
+ // header-only class, so the only way to avoid creating a separate zeroed
// buffer per translation unit is to wrap a static in an inline function.
- static const int16_t* empty_data();
+ static rtc::ArrayView<const int16_t> zeroed_data();
int16_t data_[kMaxDataSizeSamples];
bool muted_ = true;
diff --git a/api/audio/test/audio_frame_unittest.cc b/api/audio/test/audio_frame_unittest.cc
index dbf45ce..52d7e42 100644
--- a/api/audio/test/audio_frame_unittest.cc
+++ b/api/audio/test/audio_frame_unittest.cc
@@ -19,10 +19,27 @@
namespace {
+bool AllSamplesAre(int16_t sample, rtc::ArrayView<const int16_t> samples) {
+ for (const auto s : samples) {
+ if (s != sample) {
+ return false;
+ }
+ }
+ return true;
+}
+
bool AllSamplesAre(int16_t sample, const AudioFrame& frame) {
- const int16_t* frame_data = frame.data();
- for (size_t i = 0; i < frame.max_16bit_samples(); i++) {
- if (frame_data[i] != sample) {
+ return AllSamplesAre(sample, frame.data_view());
+}
+
+// Checks the values of samples in the AudioFrame buffer, regardless of whether
+// they're valid or not, and disregard the `muted()` state of the frame.
+// I.e. use `max_16bit_samples()` instead of the audio properties
+// `num_samples * samples_per_channel`.
+bool AllBufferSamplesAre(int16_t sample, const AudioFrame& frame) {
+ const auto* data = frame.data_view().data();
+ for (size_t i = 0; i < frame.max_16bit_samples(); ++i) {
+ if (data[i] != sample) {
return false;
}
}
@@ -38,29 +55,46 @@
} // namespace
-TEST(AudioFrameTest, FrameStartsMuted) {
+TEST(AudioFrameTest, FrameStartsZeroedAndMuted) {
AudioFrame frame;
EXPECT_TRUE(frame.muted());
+ EXPECT_TRUE(frame.data_view().empty());
EXPECT_TRUE(AllSamplesAre(0, frame));
}
+// TODO: b/335805780 - Delete test when `mutable_data()` returns ArrayView.
+TEST(AudioFrameTest, UnmutedFrameIsInitiallyZeroedLegacy) {
+ AudioFrame frame(kSampleRateHz, kNumChannelsMono, CHANNEL_LAYOUT_NONE);
+ frame.mutable_data();
+ EXPECT_FALSE(frame.muted());
+ EXPECT_TRUE(AllSamplesAre(0, frame));
+ EXPECT_TRUE(AllBufferSamplesAre(0, frame));
+}
+
TEST(AudioFrameTest, UnmutedFrameIsInitiallyZeroed) {
AudioFrame frame;
- frame.mutable_data();
+ auto data = frame.mutable_data(kSamplesPerChannel, kNumChannelsMono);
EXPECT_FALSE(frame.muted());
+ EXPECT_EQ(frame.data_view().size(), kSamplesPerChannel);
+ EXPECT_EQ(data.size(), kSamplesPerChannel);
EXPECT_TRUE(AllSamplesAre(0, frame));
}
TEST(AudioFrameTest, MutedFrameBufferIsZeroed) {
AudioFrame frame;
- int16_t* frame_data = frame.mutable_data();
+ int16_t* frame_data =
+ frame.mutable_data(kSamplesPerChannel, kNumChannelsMono).begin();
+ EXPECT_FALSE(frame.muted());
+ // Fill the reserved buffer with non-zero data.
for (size_t i = 0; i < frame.max_16bit_samples(); i++) {
frame_data[i] = 17;
}
ASSERT_TRUE(AllSamplesAre(17, frame));
+ ASSERT_TRUE(AllBufferSamplesAre(17, frame));
frame.Mute();
EXPECT_TRUE(frame.muted());
EXPECT_TRUE(AllSamplesAre(0, frame));
+ ASSERT_TRUE(AllBufferSamplesAre(0, frame));
}
TEST(AudioFrameTest, UpdateFrameMono) {
@@ -95,11 +129,17 @@
EXPECT_EQ(kSamplesPerChannel, frame.samples_per_channel());
EXPECT_EQ(kNumChannelsStereo, frame.num_channels());
EXPECT_EQ(CHANNEL_LAYOUT_STEREO, frame.channel_layout());
+ EXPECT_TRUE(frame.muted());
- frame.UpdateFrame(kTimestamp, nullptr /* data */, kSamplesPerChannel,
+ // Initialize the frame with valid `kNumChannels5_1` data to make sure we
+ // get an unmuted frame with valid samples.
+ int16_t samples[kSamplesPerChannel * kNumChannels5_1] = {17};
+ frame.UpdateFrame(kTimestamp, samples /* data */, kSamplesPerChannel,
kSampleRateHz, AudioFrame::kPLC, AudioFrame::kVadActive,
kNumChannels5_1);
+ EXPECT_FALSE(frame.muted());
EXPECT_EQ(kSamplesPerChannel, frame.samples_per_channel());
+ EXPECT_EQ(kSamplesPerChannel * kNumChannels5_1, frame.data_view().size());
EXPECT_EQ(kNumChannels5_1, frame.num_channels());
EXPECT_EQ(CHANNEL_LAYOUT_5_1, frame.channel_layout());
}
@@ -121,6 +161,7 @@
EXPECT_EQ(frame2.vad_activity_, frame1.vad_activity_);
EXPECT_EQ(frame2.num_channels_, frame1.num_channels_);
+ EXPECT_EQ(frame2.data_view().size(), frame1.data_view().size());
EXPECT_EQ(frame2.muted(), frame1.muted());
EXPECT_EQ(0, memcmp(frame2.data(), frame1.data(), sizeof(samples)));
diff --git a/audio/audio_transport_impl.cc b/audio/audio_transport_impl.cc
index 42a81d5..d1ecb0f 100644
--- a/audio/audio_transport_impl.cc
+++ b/audio/audio_transport_impl.cc
@@ -70,20 +70,21 @@
int Resample(const AudioFrame& frame,
const int destination_sample_rate,
PushResampler<int16_t>* resampler,
- int16_t* destination) {
+ rtc::ArrayView<int16_t> destination) {
TRACE_EVENT2("webrtc", "Resample", "frame sample rate", frame.sample_rate_hz_,
"destination_sample_rate", destination_sample_rate);
const int number_of_channels = static_cast<int>(frame.num_channels_);
const int target_number_of_samples_per_channel =
destination_sample_rate / 100;
+ RTC_CHECK_EQ(destination.size(),
+ frame.num_channels_ * target_number_of_samples_per_channel);
+
resampler->InitializeIfNeeded(frame.sample_rate_hz_, destination_sample_rate,
number_of_channels);
// TODO(yujo): make resampler take an AudioFrame, and add special case
// handling of muted frames.
- return resampler->Resample(
- frame.data(), frame.samples_per_channel_ * number_of_channels,
- destination, number_of_channels * target_number_of_samples_per_channel);
+ return resampler->Resample(frame.data_view(), destination);
}
} // namespace
@@ -232,8 +233,10 @@
RTC_DCHECK_EQ(error, AudioProcessing::kNoError);
}
- nSamplesOut = Resample(mixed_frame_, samplesPerSec, &render_resampler_,
- static_cast<int16_t*>(audioSamples));
+ nSamplesOut =
+ Resample(mixed_frame_, samplesPerSec, &render_resampler_,
+ rtc::ArrayView<int16_t>(static_cast<int16_t*>(audioSamples),
+ nSamples * nChannels));
RTC_DCHECK_EQ(nSamplesOut, nChannels * nSamples);
return 0;
}
@@ -263,8 +266,10 @@
*elapsed_time_ms = mixed_frame_.elapsed_time_ms_;
*ntp_time_ms = mixed_frame_.ntp_time_ms_;
- auto output_samples = Resample(mixed_frame_, sample_rate, &render_resampler_,
- static_cast<int16_t*>(audio_data));
+ int output_samples =
+ Resample(mixed_frame_, sample_rate, &render_resampler_,
+ rtc::ArrayView<int16_t>(static_cast<int16_t*>(audio_data),
+ number_of_channels * number_of_frames));
RTC_DCHECK_EQ(output_samples, number_of_channels * number_of_frames);
}
diff --git a/audio/remix_resample.cc b/audio/remix_resample.cc
index 178af62..a0cf7cc 100644
--- a/audio/remix_resample.cc
+++ b/audio/remix_resample.cc
@@ -14,6 +14,7 @@
#include "audio/utility/audio_frame_operations.h"
#include "common_audio/resampler/include/push_resampler.h"
#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
namespace webrtc {
namespace voe {
@@ -67,15 +68,22 @@
// how much to zero here; or 2) make resampler accept a hint that the input is
// zeroed.
const size_t src_length = samples_per_channel * audio_ptr_num_channels;
- int out_length =
- resampler->Resample(audio_ptr, src_length, dst_frame->mutable_data(),
- AudioFrame::kMaxDataSizeSamples);
+ // Ensure the `samples_per_channel_` member is set correctly based on the
+ // destination sample rate, number of channels and assumed 10ms buffer size.
+ // TODO(tommi): Could we rather assume that this has been done by the caller?
+ dst_frame->SetSampleRateAndChannelSize(dst_frame->sample_rate_hz_);
+
+ int out_length = resampler->Resample(
+ rtc::ArrayView<const int16_t>(audio_ptr, src_length),
+ dst_frame->mutable_data(dst_frame->samples_per_channel_,
+ dst_frame->num_channels_));
if (out_length == -1) {
RTC_FATAL() << "Resample failed: audio_ptr = " << audio_ptr
<< ", src_length = " << src_length
<< ", dst_frame->mutable_data() = "
<< dst_frame->mutable_data();
}
+
dst_frame->samples_per_channel_ = out_length / audio_ptr_num_channels;
// Upmix after resampling.
diff --git a/audio/remix_resample.h b/audio/remix_resample.h
index bd8da76..580ba40 100644
--- a/audio/remix_resample.h
+++ b/audio/remix_resample.h
@@ -17,6 +17,8 @@
namespace webrtc {
namespace voe {
+// Note: The RemixAndResample methods assume 10ms buffer sizes.
+
// Upmix or downmix and resample the audio to `dst_frame`. Expects `dst_frame`
// to have its sample rate and channels members set to the desired values.
// Updates the `samples_per_channel_` member accordingly.
diff --git a/common_audio/resampler/include/push_resampler.h b/common_audio/resampler/include/push_resampler.h
index 3da6712..35783b6 100644
--- a/common_audio/resampler/include/push_resampler.h
+++ b/common_audio/resampler/include/push_resampler.h
@@ -14,11 +14,14 @@
#include <memory>
#include <vector>
+#include "api/array_view.h"
+
namespace webrtc {
class PushSincResampler;
// Wraps PushSincResampler to provide stereo support.
+// Note: This implementation assumes 10ms buffer sizes throughout.
// TODO(ajm): add support for an arbitrary number of channels.
template <typename T>
class PushResampler {
@@ -34,7 +37,7 @@
// Returns the total number of samples provided in destination (e.g. 32 kHz,
// 2 channel audio gives 640 samples).
- int Resample(const T* src, size_t src_length, T* dst, size_t dst_capacity);
+ int Resample(rtc::ArrayView<const T> src, rtc::ArrayView<T> dst);
private:
int src_sample_rate_hz_;
diff --git a/common_audio/resampler/push_resampler.cc b/common_audio/resampler/push_resampler.cc
index 810d778..0af5ec7 100644
--- a/common_audio/resampler/push_resampler.cc
+++ b/common_audio/resampler/push_resampler.cc
@@ -73,32 +73,31 @@
}
template <typename T>
-int PushResampler<T>::Resample(const T* src,
- size_t src_length,
- T* dst,
- size_t dst_capacity) {
+int PushResampler<T>::Resample(rtc::ArrayView<const T> src,
+ rtc::ArrayView<T> dst) {
// These checks used to be factored out of this template function due to
// Windows debug build issues with clang. http://crbug.com/615050
const size_t src_size_10ms = (src_sample_rate_hz_ / 100) * num_channels_;
const size_t dst_size_10ms = (dst_sample_rate_hz_ / 100) * num_channels_;
- RTC_DCHECK_EQ(src_length, src_size_10ms);
- RTC_DCHECK_GE(dst_capacity, dst_size_10ms);
+ RTC_DCHECK_EQ(src.size(), src_size_10ms);
+ RTC_DCHECK_GE(dst.size(), dst_size_10ms);
if (src_sample_rate_hz_ == dst_sample_rate_hz_) {
// The old resampler provides this memcpy facility in the case of matching
// sample rates, so reproduce it here for the sinc resampler.
- memcpy(dst, src, src_length * sizeof(T));
- return static_cast<int>(src_length);
+ memcpy(dst.data(), src.data(), src.size() * sizeof(T));
+ return static_cast<int>(src.size());
}
- const size_t src_length_mono = src_length / num_channels_;
- const size_t dst_capacity_mono = dst_capacity / num_channels_;
+ const size_t src_length_mono = src.size() / num_channels_;
+ const size_t dst_capacity_mono = dst.size() / num_channels_;
for (size_t ch = 0; ch < num_channels_; ++ch) {
channel_data_array_[ch] = channel_resamplers_[ch].source.data();
}
- Deinterleave(src, src_length_mono, num_channels_, channel_data_array_.data());
+ Deinterleave(src.data(), src_length_mono, num_channels_,
+ channel_data_array_.data());
size_t dst_length_mono = 0;
@@ -112,7 +111,8 @@
channel_data_array_[ch] = channel_resamplers_[ch].destination.data();
}
- Interleave(channel_data_array_.data(), dst_length_mono, num_channels_, dst);
+ Interleave(channel_data_array_.data(), dst_length_mono, num_channels_,
+ dst.data());
return static_cast<int>(dst_length_mono * num_channels_);
}
diff --git a/modules/audio_coding/acm2/acm_resampler.cc b/modules/audio_coding/acm2/acm_resampler.cc
index e307c6c..bcac7b6 100644
--- a/modules/audio_coding/acm2/acm_resampler.cc
+++ b/modules/audio_coding/acm2/acm_resampler.cc
@@ -45,8 +45,9 @@
return -1;
}
- int out_length =
- resampler_.Resample(in_audio, in_length, out_audio, out_capacity_samples);
+ int out_length = resampler_.Resample(
+ rtc::ArrayView<const int16_t>(in_audio, in_length),
+ rtc::ArrayView<int16_t>(out_audio, out_capacity_samples));
if (out_length == -1) {
RTC_LOG(LS_ERROR) << "Resample(" << in_audio << ", " << in_length << ", "
<< out_audio << ", " << out_capacity_samples
diff --git a/modules/audio_mixer/audio_mixer_impl_unittest.cc b/modules/audio_mixer/audio_mixer_impl_unittest.cc
index 2044cb9..b04b706 100644
--- a/modules/audio_mixer/audio_mixer_impl_unittest.cc
+++ b/modules/audio_mixer/audio_mixer_impl_unittest.cc
@@ -517,13 +517,8 @@
other_frame->samples_per_channel_ = kSamplesPerChannel;
mixer->AddSource(&other_source);
-#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+#if GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
EXPECT_DEATH(mixer->Mix(kNumberOfChannels, &frame_for_mixing), "");
-#elif !RTC_DCHECK_IS_ON
- mixer->Mix(kNumberOfChannels, &frame_for_mixing);
- EXPECT_EQ(frame_for_mixing.num_channels_, kNumberOfChannels);
- EXPECT_EQ(frame_for_mixing.sample_rate_hz_,
- HighOutputRateCalculator::kDefaultFrequency);
#endif
}
diff --git a/modules/audio_mixer/frame_combiner_unittest.cc b/modules/audio_mixer/frame_combiner_unittest.cc
index 6c64d08..486f551 100644
--- a/modules/audio_mixer/frame_combiner_unittest.cc
+++ b/modules/audio_mixer/frame_combiner_unittest.cc
@@ -139,8 +139,9 @@
}
}
-// There are DCHECKs in place to check for invalid parameters.
-TEST(FrameCombinerDeathTest, DebugBuildCrashesWithManyChannels) {
+#if GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+// There are CHECKs in place to check for invalid parameters.
+TEST(FrameCombinerDeathTest, BuildCrashesWithManyChannels) {
FrameCombiner combiner(true);
for (const int rate : {8000, 18000, 34000, 48000}) {
for (const int number_of_channels : {10, 20, 21}) {
@@ -149,7 +150,9 @@
continue;
}
const std::vector<AudioFrame*> all_frames = {&frame1, &frame2};
- SetUpFrames(rate, number_of_channels);
+ // With an unsupported channel count, this will crash in
+ // `AudioFrame::UpdateFrame`.
+ EXPECT_DEATH(SetUpFrames(rate, number_of_channels), "");
const int number_of_frames = 2;
SCOPED_TRACE(
@@ -157,18 +160,14 @@
const std::vector<AudioFrame*> frames_to_combine(
all_frames.begin(), all_frames.begin() + number_of_frames);
AudioFrame audio_frame_for_mixing;
-#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
EXPECT_DEATH(
combiner.Combine(frames_to_combine, number_of_channels, rate,
frames_to_combine.size(), &audio_frame_for_mixing),
"");
-#elif !RTC_DCHECK_IS_ON
- combiner.Combine(frames_to_combine, number_of_channels, rate,
- frames_to_combine.size(), &audio_frame_for_mixing);
-#endif
}
}
}
+#endif // GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
TEST(FrameCombinerDeathTest, DebugBuildCrashesWithHighRate) {
FrameCombiner combiner(true);
@@ -249,7 +248,8 @@
TEST(FrameCombiner, CombiningOneFrameShouldNotChangeFrame) {
FrameCombiner combiner(false);
for (const int rate : {8000, 10000, 11000, 32000, 44100}) {
- for (const int number_of_channels : {1, 2, 4, 8, 10}) {
+ // kMaxConcurrentChannels is 8.
+ for (const int number_of_channels : {1, 2, 4, kMaxConcurrentChannels}) {
SCOPED_TRACE(ProduceDebugText(rate, number_of_channels, 1));
AudioFrame audio_frame_for_mixing;
diff --git a/modules/audio_processing/agc2/vad_wrapper.cc b/modules/audio_processing/agc2/vad_wrapper.cc
index af6325d..b391224 100644
--- a/modules/audio_processing/agc2/vad_wrapper.cc
+++ b/modules/audio_processing/agc2/vad_wrapper.cc
@@ -104,8 +104,7 @@
}
// Resample the first channel of `frame`.
RTC_DCHECK_EQ(frame.samples_per_channel(), frame_size_);
- resampler_.Resample(frame.channel(0).data(), frame_size_,
- resampled_buffer_.data(), resampled_buffer_.size());
+ resampler_.Resample(frame.channel(0), resampled_buffer_);
return vad_->Analyze(resampled_buffer_);
}
diff --git a/modules/audio_processing/audio_processing_unittest.cc b/modules/audio_processing/audio_processing_unittest.cc
index 4d3fc65..819e980 100644
--- a/modules/audio_processing/audio_processing_unittest.cc
+++ b/modules/audio_processing/audio_processing_unittest.cc
@@ -2198,7 +2198,8 @@
// necessary.
ASSERT_EQ(ref_length,
static_cast<size_t>(resampler.Resample(
- out_ptr, out_length, cmp_data.get(), ref_length)));
+ rtc::ArrayView<const float>(out_ptr, out_length),
+ rtc::ArrayView<float>(cmp_data.get(), ref_length))));
out_ptr = cmp_data.get();
}