Add audio view classes

From the new header file:
* MonoView<>: A single channel contiguous buffer of samples.
* InterleavedView<>: Channel samples are interleaved (side-by-side) in
  the buffer. A single channel InterleavedView<> is the same thing as a
  MonoView<>
* DeinterleavedView<>: Each channel's samples are contiguous within the
  buffer. Channels can be enumerated and accessing the
  individual channel data is done via MonoView<>.

There are also a few utility functions that offer a unified way to check
the properties regardless of what view type is in use.

Bug: chromium:335805780
Change-Id: I28196f8f4ded4fadc72ee32b62af304c62f4fc47
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/349300
Reviewed-by: Per Ã…hgren <peah@webrtc.org>
Commit-Queue: Tomas Gunnarsson <tommi@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#42377}
diff --git a/api/audio/BUILD.gn b/api/audio/BUILD.gn
index ac399d9..10c96d0 100644
--- a/api/audio/BUILD.gn
+++ b/api/audio/BUILD.gn
@@ -29,6 +29,7 @@
   sources = [
     "audio_frame.cc",
     "audio_frame.h",
+    "audio_view.h",
     "channel_layout.cc",
     "channel_layout.h",
   ]
diff --git a/api/audio/audio_frame.cc b/api/audio/audio_frame.cc
index ce89323..b7fcede 100644
--- a/api/audio/audio_frame.cc
+++ b/api/audio/audio_frame.cc
@@ -137,17 +137,17 @@
   return muted_ ? zeroed_data().begin() : data_;
 }
 
-rtc::ArrayView<const int16_t> AudioFrame::data_view() const {
-  const auto samples = samples_per_channel_ * num_channels_;
+InterleavedView<const int16_t> AudioFrame::data_view() const {
   // If you get a nullptr from `data_view()`, it's likely because the
-  // samples_per_channel_ and/or num_channels_ haven't been properly set.
-  // Since `data_view()` returns an rtc::ArrayView<>, we inherit the behavior
-  // in ArrayView when the view size is 0 that ArrayView<>::data() will always
-  // return nullptr. So, even when an AudioFrame is muted and we want to
-  // return `zeroed_data()`, if samples_per_channel_ or  num_channels_ is 0,
-  // the view will point to nullptr.
-  return muted_ ? zeroed_data().subview(0, samples)
-                : rtc::ArrayView<const int16_t>(&data_[0], samples);
+  // samples_per_channel_ and/or num_channels_ members haven't been properly
+  // set. Since `data_view()` returns an InterleavedView<> (which internally
+  // uses rtc::ArrayView<>), we inherit the behavior in InterleavedView when the
+  // view size is 0 that ArrayView<>::data() returns nullptr. So, even when an
+  // AudioFrame is muted and we want to return `zeroed_data()`, if
+  // samples_per_channel_ or  num_channels_ is 0, the view will point to
+  // nullptr.
+  return InterleavedView<const int16_t>(muted_ ? &zeroed_data()[0] : &data_[0],
+                                        samples_per_channel_, num_channels_);
 }
 
 int16_t* AudioFrame::mutable_data() {
@@ -161,8 +161,8 @@
   return data_;
 }
 
-rtc::ArrayView<int16_t> AudioFrame::mutable_data(size_t samples_per_channel,
-                                                 size_t num_channels) {
+InterleavedView<int16_t> AudioFrame::mutable_data(size_t samples_per_channel,
+                                                  size_t num_channels) {
   const size_t total_samples = samples_per_channel * num_channels;
   RTC_CHECK_LE(total_samples, kMaxDataSizeSamples);
   RTC_CHECK_LE(num_channels, kMaxConcurrentChannels);
@@ -183,7 +183,7 @@
   }
   samples_per_channel_ = samples_per_channel;
   num_channels_ = num_channels;
-  return rtc::ArrayView<int16_t>(&data_[0], total_samples);
+  return InterleavedView<int16_t>(&data_[0], samples_per_channel, num_channels);
 }
 
 void AudioFrame::Mute() {
diff --git a/api/audio/audio_frame.h b/api/audio/audio_frame.h
index 665127e..fa4c96c 100644
--- a/api/audio/audio_frame.h
+++ b/api/audio/audio_frame.h
@@ -15,6 +15,7 @@
 #include <stdint.h>
 
 #include "api/array_view.h"
+#include "api/audio/audio_view.h"
 #include "api/audio/channel_layout.h"
 #include "api/rtp_packet_infos.h"
 #include "rtc_base/checks.h"
@@ -96,7 +97,7 @@
   // ResetWithoutMuting() to skip this wasteful zeroing.
   void ResetWithoutMuting();
 
-  // TODO: b/335805780 - Accept ArrayView.
+  // TODO: b/335805780 - Accept InterleavedView.
   void UpdateFrame(uint32_t timestamp,
                    const int16_t* data,
                    size_t samples_per_channel,
@@ -119,18 +120,16 @@
   int64_t ElapsedProfileTimeMs() const;
 
   // data() returns a zeroed static buffer if the frame is muted.
-  // TODO: b/335805780 - Return ArrayView.
+  // TODO: b/335805780 - Return InterleavedView.
   const int16_t* data() const;
 
   // Returns a read-only view of all the valid samples held by the AudioFrame.
-  // Note that for a muted AudioFrame, the size of the returned view will be
-  // 0u and the contained data will be nullptr.
-  rtc::ArrayView<const int16_t> data_view() const;
+  // For a muted AudioFrame, the samples will all be 0.
+  InterleavedView<const int16_t> data_view() const;
 
   // mutable_frame() always returns a non-static buffer; the first call to
   // mutable_frame() zeros the buffer and marks the frame as unmuted.
-  // TODO: b/335805780 - Return ArrayView based on the current values for
-  // samples per channel and num channels.
+  // TODO: b/335805780 - Return an InterleavedView.
   int16_t* mutable_data();
 
   // Grants write access to the audio buffer. The size of the returned writable
@@ -139,8 +138,8 @@
   // internal member variables; `samples_per_channel()` and `num_channels()`
   // respectively.
   // If the state is currently muted, the returned view will be zeroed out.
-  rtc::ArrayView<int16_t> mutable_data(size_t samples_per_channel,
-                                       size_t num_channels);
+  InterleavedView<int16_t> mutable_data(size_t samples_per_channel,
+                                        size_t num_channels);
 
   // Prefer to mute frames using AudioFrameOperations::Mute.
   void Mute();
diff --git a/api/audio/audio_view.h b/api/audio/audio_view.h
new file mode 100644
index 0000000..ba5682b
--- /dev/null
+++ b/api/audio/audio_view.h
@@ -0,0 +1,253 @@
+/*
+ *  Copyright (c) 2024 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_AUDIO_AUDIO_VIEW_H_
+#define API_AUDIO_AUDIO_VIEW_H_
+
+#include "api/array_view.h"
+#include "api/audio/channel_layout.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+// This file contains 3 types of view classes:
+//
+// * MonoView<>: A single channel contiguous buffer of samples.
+//
+// * InterleavedView<>: Channel samples are interleaved (side-by-side) in
+//   the buffer. A single channel InterleavedView<> is the same thing as a
+//   MonoView<>
+//
+// * DeinterleavedView<>: Each channel's samples are contiguous within the
+//   buffer. Channels can be enumerated and accessing the individual channel
+//   data is done via MonoView<>.
+//
+// The views are comparable to and built on rtc::ArrayView<> but add
+// audio specific properties for the dimensions of the buffer and the above
+// specialized [de]interleaved support.
+//
+// There are also a few generic utility functions that can simplify
+// generic code for supporting more than one type of view.
+
+// MonoView<> represents a view over a single contiguous, audio buffer. This
+// can be either an single channel (mono) interleaved buffer (e.g. AudioFrame),
+// or a de-interleaved channel (e.g. from AudioBuffer).
+template <typename T>
+using MonoView = rtc::ArrayView<T>;
+
+// InterleavedView<> is a view over an interleaved audio buffer (e.g. from
+// AudioFrame).
+template <typename T>
+class InterleavedView {
+ public:
+  using value_type = T;
+
+  InterleavedView() = default;
+
+  template <typename U>
+  InterleavedView(U* data, size_t samples_per_channel, size_t num_channels)
+      : num_channels_(num_channels),
+        samples_per_channel_(samples_per_channel),
+        data_(data, num_channels * samples_per_channel) {
+    RTC_DCHECK_LE(num_channels_, kMaxConcurrentChannels);
+    RTC_DCHECK(num_channels_ == 0u || samples_per_channel_ != 0u);
+  }
+
+  // Construct an InterleavedView from a C-style array. Samples per channels
+  // is calculated based on the array size / num_channels.
+  template <typename U, size_t N>
+  InterleavedView(U (&array)[N],  // NOLINT
+                  size_t num_channels)
+      : InterleavedView(array, N / num_channels, num_channels) {
+    RTC_DCHECK_EQ(N % num_channels, 0u);
+  }
+
+  template <typename U>
+  InterleavedView(const InterleavedView<U>& other)
+      : num_channels_(other.num_channels()),
+        samples_per_channel_(other.samples_per_channel()),
+        data_(other.data()) {}
+
+  size_t num_channels() const { return num_channels_; }
+  size_t samples_per_channel() const { return samples_per_channel_; }
+  rtc::ArrayView<T> data() const { return data_; }
+  bool empty() const { return data_.empty(); }
+  size_t size() const { return data_.size(); }
+
+  MonoView<T> AsMono() const {
+    RTC_DCHECK_EQ(num_channels(), 1u);
+    RTC_DCHECK_EQ(data_.size(), samples_per_channel_);
+    return data_;
+  }
+
+  // A simple wrapper around memcpy that includes checks for properties.
+  // TODO(tommi): Consider if this can be utility function for both interleaved
+  // and deinterleaved views.
+  template <typename U>
+  void CopyFrom(const InterleavedView<U>& source) {
+    static_assert(sizeof(T) == sizeof(U), "");
+    RTC_DCHECK_EQ(num_channels(), source.num_channels());
+    RTC_DCHECK_EQ(samples_per_channel(), source.samples_per_channel());
+    RTC_DCHECK_GE(data_.size(), source.data().size());
+    const auto data = source.data();
+    memcpy(&data_[0], &data[0], data.size() * sizeof(U));
+  }
+
+  T& operator[](size_t idx) const { return data_[idx]; }
+  T* begin() const { return data_.begin(); }
+  T* end() const { return data_.end(); }
+  const T* cbegin() const { return data_.cbegin(); }
+  const T* cend() const { return data_.cend(); }
+  std::reverse_iterator<T*> rbegin() const { return data_.rbegin(); }
+  std::reverse_iterator<T*> rend() const { return data_.rend(); }
+  std::reverse_iterator<const T*> crbegin() const { return data_.crbegin(); }
+  std::reverse_iterator<const T*> crend() const { return data_.crend(); }
+
+ private:
+  // TODO(tommi): Consider having these both be stored as uint16_t to
+  // save a few bytes per view. Use `dchecked_cast` to support size_t during
+  // construction.
+  size_t num_channels_ = 0u;
+  size_t samples_per_channel_ = 0u;
+  rtc::ArrayView<T> data_;
+};
+
+template <typename T>
+class DeinterleavedView {
+ public:
+  using value_type = T;
+
+  DeinterleavedView() = default;
+
+  template <typename U>
+  DeinterleavedView(U* data, size_t samples_per_channel, size_t num_channels)
+      : num_channels_(num_channels),
+        samples_per_channel_(samples_per_channel),
+        data_(data, num_channels * samples_per_channel_) {}
+
+  template <typename U>
+  DeinterleavedView(const DeinterleavedView<U>& other)
+      : num_channels_(other.num_channels()),
+        samples_per_channel_(other.samples_per_channel()),
+        data_(other.data()) {}
+
+  // Returns a deinterleaved channel where `idx` is the zero based index,
+  // in the range [0 .. num_channels()-1].
+  MonoView<T> operator[](size_t idx) const {
+    RTC_DCHECK_LT(idx, num_channels_);
+    return MonoView<T>(&data_[idx * samples_per_channel_],
+                       samples_per_channel_);
+  }
+
+  size_t num_channels() const { return num_channels_; }
+  size_t samples_per_channel() const { return samples_per_channel_; }
+  rtc::ArrayView<T> data() const { return data_; }
+  bool empty() const { return data_.empty(); }
+  size_t size() const { return data_.size(); }
+
+  // Returns the first (and possibly only) channel.
+  MonoView<T> AsMono() const {
+    RTC_DCHECK_GE(num_channels(), 1u);
+    return (*this)[0];
+  }
+
+ private:
+  // TODO(tommi): Consider having these be stored as uint16_t to save a few
+  // bytes per view. Use `dchecked_cast` to support size_t during construction.
+  size_t num_channels_ = 0u;
+  size_t samples_per_channel_ = 0u;
+  rtc::ArrayView<T> data_;
+};
+
+template <typename T>
+constexpr size_t NumChannels(const MonoView<T>& view) {
+  return 1u;
+}
+
+template <typename T>
+size_t NumChannels(const InterleavedView<T>& view) {
+  return view.num_channels();
+}
+
+template <typename T>
+size_t NumChannels(const DeinterleavedView<T>& view) {
+  return view.num_channels();
+}
+
+template <typename T>
+constexpr bool IsMono(const MonoView<T>& view) {
+  return true;
+}
+
+template <typename T>
+constexpr bool IsInterleavedView(const MonoView<T>& view) {
+  return true;
+}
+
+template <typename T>
+constexpr bool IsInterleavedView(const InterleavedView<T>& view) {
+  return true;
+}
+
+template <typename T>
+constexpr bool IsInterleavedView(const DeinterleavedView<const T>& view) {
+  return false;
+}
+
+template <typename T>
+bool IsMono(const InterleavedView<T>& view) {
+  return NumChannels(view) == 1u;
+}
+
+template <typename T>
+bool IsMono(const DeinterleavedView<T>& view) {
+  return NumChannels(view) == 1u;
+}
+
+template <typename T>
+size_t SamplesPerChannel(const MonoView<T>& view) {
+  return view.size();
+}
+
+template <typename T>
+size_t SamplesPerChannel(const InterleavedView<T>& view) {
+  return view.samples_per_channel();
+}
+
+template <typename T>
+size_t SamplesPerChannel(const DeinterleavedView<T>& view) {
+  return view.samples_per_channel();
+}
+// A simple wrapper around memcpy that includes checks for properties.
+// The parameter order is the same as for memcpy(), first destination then
+// source.
+template <typename D, typename S>
+void CopySamples(D& destination, const S& source) {
+  static_assert(
+      sizeof(typename D::value_type) == sizeof(typename S::value_type), "");
+  // Here we'd really like to do
+  // static_assert(IsInterleavedView(destination) == IsInterleavedView(source),
+  //               "");
+  // but the compiler doesn't like it inside this template function for
+  // some reason. The following check is an approximation but unfortunately
+  // means that copying between a MonoView and single channel interleaved or
+  // deinterleaved views wouldn't work.
+  // static_assert(sizeof(destination) == sizeof(source),
+  //               "Incompatible view types");
+  RTC_DCHECK_EQ(NumChannels(destination), NumChannels(source));
+  RTC_DCHECK_EQ(SamplesPerChannel(destination), SamplesPerChannel(source));
+  RTC_DCHECK_GE(destination.data().size(), source.data().size());
+  memcpy(&destination[0], &source[0],
+         source.size() * sizeof(typename S::value_type));
+}
+
+}  // namespace webrtc
+
+#endif  // API_AUDIO_AUDIO_VIEW_H_
diff --git a/api/audio/test/BUILD.gn b/api/audio/test/BUILD.gn
index 8d5822f..91b8d31 100644
--- a/api/audio/test/BUILD.gn
+++ b/api/audio/test/BUILD.gn
@@ -17,6 +17,7 @@
     testonly = true
     sources = [
       "audio_frame_unittest.cc",
+      "audio_view_unittest.cc",
       "echo_canceller3_config_unittest.cc",
     ]
     deps = [
diff --git a/api/audio/test/audio_frame_unittest.cc b/api/audio/test/audio_frame_unittest.cc
index 52d7e42..f962838 100644
--- a/api/audio/test/audio_frame_unittest.cc
+++ b/api/audio/test/audio_frame_unittest.cc
@@ -19,7 +19,7 @@
 
 namespace {
 
-bool AllSamplesAre(int16_t sample, rtc::ArrayView<const int16_t> samples) {
+bool AllSamplesAre(int16_t sample, InterleavedView<const int16_t> samples) {
   for (const auto s : samples) {
     if (s != sample) {
       return false;
@@ -34,10 +34,11 @@
 
 // Checks the values of samples in the AudioFrame buffer, regardless of whether
 // they're valid or not, and disregard the `muted()` state of the frame.
-// I.e. use `max_16bit_samples()` instead of the audio properties
-// `num_samples * samples_per_channel`.
+// I.e. use `max_16bit_samples()` instead of `data_view().size()`
 bool AllBufferSamplesAre(int16_t sample, const AudioFrame& frame) {
-  const auto* data = frame.data_view().data();
+  auto view = frame.data_view();
+  RTC_DCHECK(!view.empty());
+  const int16_t* data = &view.data()[0];
   for (size_t i = 0; i < frame.max_16bit_samples(); ++i) {
     if (data[i] != sample) {
       return false;
@@ -75,8 +76,9 @@
   AudioFrame frame;
   auto data = frame.mutable_data(kSamplesPerChannel, kNumChannelsMono);
   EXPECT_FALSE(frame.muted());
+  EXPECT_TRUE(IsMono(data));
   EXPECT_EQ(frame.data_view().size(), kSamplesPerChannel);
-  EXPECT_EQ(data.size(), kSamplesPerChannel);
+  EXPECT_EQ(SamplesPerChannel(data), kSamplesPerChannel);
   EXPECT_TRUE(AllSamplesAre(0, frame));
 }
 
diff --git a/api/audio/test/audio_view_unittest.cc b/api/audio/test/audio_view_unittest.cc
new file mode 100644
index 0000000..62749ab
--- /dev/null
+++ b/api/audio/test/audio_view_unittest.cc
@@ -0,0 +1,158 @@
+/*
+ *  Copyright 2024 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/audio/audio_view.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr const float kFloatStepIncrease = 0.5f;
+constexpr const int16_t kIntStepIncrease = 1;
+
+template <typename T>
+void Increment(float& t) {
+  t += kFloatStepIncrease;
+}
+
+template <typename T>
+void Increment(int16_t& t) {
+  t += kIntStepIncrease;
+}
+
+// Fills a given buffer with monotonically increasing values.
+template <typename T>
+void FillBuffer(rtc::ArrayView<T> buffer) {
+  T value = {};
+  for (T& t : buffer) {
+    Increment<T>(value);
+    t = value;
+  }
+}
+
+}  // namespace
+
+TEST(AudioViewTest, MonoView) {
+  const size_t kArraySize = 100u;
+  int16_t arr[kArraySize];
+  FillBuffer(rtc::ArrayView<int16_t>(arr));
+
+  MonoView<int16_t> mono(arr);
+  MonoView<const int16_t> const_mono(arr);
+  EXPECT_EQ(mono.size(), kArraySize);
+  EXPECT_EQ(const_mono.size(), kArraySize);
+  EXPECT_EQ(&mono[0], &const_mono[0]);
+  EXPECT_EQ(mono[0], arr[0]);
+
+  EXPECT_EQ(1u, NumChannels(mono));
+  EXPECT_EQ(1u, NumChannels(const_mono));
+  EXPECT_EQ(100u, SamplesPerChannel(mono));
+  EXPECT_TRUE(IsMono(mono));
+  EXPECT_TRUE(IsMono(const_mono));
+}
+
+TEST(AudioViewTest, InterleavedView) {
+  const size_t kArraySize = 100u;
+  int16_t arr[kArraySize];
+  FillBuffer(rtc::ArrayView<int16_t>(arr));
+
+  InterleavedView<int16_t> interleaved(arr, kArraySize, 1);
+  EXPECT_EQ(NumChannels(interleaved), 1u);
+  EXPECT_TRUE(IsMono(interleaved));
+  EXPECT_EQ(SamplesPerChannel(interleaved), kArraySize);
+  EXPECT_EQ(interleaved.AsMono().size(), kArraySize);
+  EXPECT_EQ(&interleaved.AsMono()[0], &arr[0]);
+  EXPECT_EQ(interleaved.AsMono(), interleaved.data());
+
+  // Basic iterator test.
+  int i = 0;
+  for (auto s : interleaved) {
+    EXPECT_EQ(s, arr[i++]);
+  }
+
+  interleaved = InterleavedView<int16_t>(arr, kArraySize / 2, 2);
+  InterleavedView<const int16_t> const_interleaved(arr, 50, 2);
+  EXPECT_EQ(NumChannels(interleaved), 2u);
+  EXPECT_EQ(NumChannels(const_interleaved), 2u);
+  EXPECT_EQ(&const_interleaved[0], &interleaved[0]);
+  EXPECT_TRUE(!IsMono(interleaved));
+  EXPECT_TRUE(!IsMono(const_interleaved));
+  EXPECT_EQ(SamplesPerChannel(interleaved), 50u);
+  EXPECT_EQ(SamplesPerChannel(const_interleaved), 50u);
+
+  interleaved = InterleavedView<int16_t>(arr, 4);
+  EXPECT_EQ(NumChannels(interleaved), 4u);
+  InterleavedView<const int16_t> const_interleaved2(interleaved);
+  EXPECT_EQ(NumChannels(const_interleaved2), 4u);
+  EXPECT_EQ(SamplesPerChannel(interleaved), 25u);
+
+  const_interleaved2 = interleaved;
+  EXPECT_EQ(NumChannels(const_interleaved2), 4u);
+  EXPECT_EQ(&const_interleaved2[0], &interleaved[0]);
+}
+
+TEST(AudioViewTest, DeinterleavedView) {
+  const size_t kArraySize = 100u;
+  int16_t arr[kArraySize] = {};
+  DeinterleavedView<int16_t> di(arr, 10, 10);
+  DeinterleavedView<const int16_t> const_di(arr, 10, 10);
+  EXPECT_EQ(NumChannels(di), 10u);
+  EXPECT_EQ(SamplesPerChannel(di), 10u);
+  EXPECT_TRUE(!IsMono(di));
+  EXPECT_EQ(const_di[5][1], di[5][1]);  // Spot check.
+  // For deinterleaved views, although they may hold multiple channels,
+  // the AsMono() method is still available and returns the first channel
+  // in the view.
+  auto mono_ch = di.AsMono();
+  EXPECT_EQ(NumChannels(mono_ch), 1u);
+  EXPECT_EQ(SamplesPerChannel(mono_ch), 10u);
+  EXPECT_EQ(di[0], mono_ch);  // first channel should be same as mono.
+
+  di = DeinterleavedView<int16_t>(arr, 50, 2);
+  // Test assignment.
+  const_di = di;
+  EXPECT_EQ(&di.AsMono()[0], &const_di.AsMono()[0]);
+
+  // Access the second channel in the deinterleaved view.
+  // The start of the second channel should be directly after the first channel.
+  // The memory width of each channel is held by the `stride()` member which
+  // by default is the same value as samples per channel.
+  mono_ch = di[1];
+  EXPECT_EQ(SamplesPerChannel(mono_ch), 50u);
+  EXPECT_EQ(&mono_ch[0], &arr[di.samples_per_channel()]);
+}
+
+TEST(AudioViewTest, CopySamples) {
+  const size_t kArraySize = 100u;
+  int16_t source_arr[kArraySize] = {};
+  int16_t dest_arr[kArraySize] = {};
+  FillBuffer(rtc::ArrayView<int16_t>(source_arr));
+
+  InterleavedView<const int16_t> source(source_arr, 2);
+  InterleavedView<int16_t> destination(dest_arr, 2);
+
+  static_assert(IsInterleavedView(source) == IsInterleavedView(destination),
+                "");
+
+  // Values in `dest_arr` should all be 0, none of the values in `source_arr`
+  // should be 0.
+  for (size_t i = 0; i < kArraySize; ++i) {
+    ASSERT_EQ(dest_arr[i], 0);
+    ASSERT_NE(source_arr[i], 0);
+  }
+
+  CopySamples(destination, source);
+  for (size_t i = 0; i < kArraySize; ++i) {
+    ASSERT_EQ(dest_arr[i], source_arr[i]) << "i == " << i;
+  }
+}
+}  // namespace webrtc
diff --git a/audio/audio_transport_impl.cc b/audio/audio_transport_impl.cc
index d1ecb0f..d7bdd40 100644
--- a/audio/audio_transport_impl.cc
+++ b/audio/audio_transport_impl.cc
@@ -70,20 +70,21 @@
 int Resample(const AudioFrame& frame,
              const int destination_sample_rate,
              PushResampler<int16_t>* resampler,
-             rtc::ArrayView<int16_t> destination) {
+             InterleavedView<int16_t> destination) {
   TRACE_EVENT2("webrtc", "Resample", "frame sample rate", frame.sample_rate_hz_,
                "destination_sample_rate", destination_sample_rate);
-  const int number_of_channels = static_cast<int>(frame.num_channels_);
-  const int target_number_of_samples_per_channel =
-      destination_sample_rate / 100;
-  RTC_CHECK_EQ(destination.size(),
+  const size_t target_number_of_samples_per_channel =
+      SampleRateToDefaultChannelSize(destination_sample_rate);
+  RTC_DCHECK_EQ(NumChannels(destination), frame.num_channels_);
+  RTC_DCHECK_EQ(SamplesPerChannel(destination),
+                target_number_of_samples_per_channel);
+  RTC_CHECK_EQ(destination.data().size(),
                frame.num_channels_ * target_number_of_samples_per_channel);
 
   resampler->InitializeIfNeeded(frame.sample_rate_hz_, destination_sample_rate,
-                                number_of_channels);
+                                static_cast<int>(frame.num_channels()));
 
-  // TODO(yujo): make resampler take an AudioFrame, and add special case
-  // handling of muted frames.
+  // TODO(yujo): Add special case handling of muted frames.
   return resampler->Resample(frame.data_view(), destination);
 }
 }  // namespace
@@ -235,8 +236,8 @@
 
   nSamplesOut =
       Resample(mixed_frame_, samplesPerSec, &render_resampler_,
-               rtc::ArrayView<int16_t>(static_cast<int16_t*>(audioSamples),
-                                       nSamples * nChannels));
+               InterleavedView<int16_t>(static_cast<int16_t*>(audioSamples),
+                                        nSamples, nChannels));
   RTC_DCHECK_EQ(nSamplesOut, nChannels * nSamples);
   return 0;
 }
@@ -268,8 +269,8 @@
 
   int output_samples =
       Resample(mixed_frame_, sample_rate, &render_resampler_,
-               rtc::ArrayView<int16_t>(static_cast<int16_t*>(audio_data),
-                                       number_of_channels * number_of_frames));
+               InterleavedView<int16_t>(static_cast<int16_t*>(audio_data),
+                                        number_of_frames, number_of_channels));
   RTC_DCHECK_EQ(output_samples, number_of_channels * number_of_frames);
 }
 
diff --git a/audio/remix_resample.cc b/audio/remix_resample.cc
index 06752c2..26d0c85 100644
--- a/audio/remix_resample.cc
+++ b/audio/remix_resample.cc
@@ -50,11 +50,10 @@
         << "dst_frame->num_channels_: " << dst_frame->num_channels_;
 
     AudioFrameOperations::DownmixChannels(
-        rtc::ArrayView<const int16_t>(src_data,
-                                      num_channels * samples_per_channel),
-        num_channels, samples_per_channel, dst_frame->num_channels_,
-        rtc::ArrayView<int16_t>(&downmixed_audio[0], dst_frame->num_channels_ *
-                                                         samples_per_channel));
+        InterleavedView<const int16_t>(src_data, samples_per_channel,
+                                       num_channels),
+        InterleavedView<int16_t>(&downmixed_audio[0], samples_per_channel,
+                                 dst_frame->num_channels_));
     audio_ptr = downmixed_audio;
     audio_ptr_num_channels = dst_frame->num_channels_;
   }
@@ -71,30 +70,32 @@
   // resampler to return output length without doing the resample, so we know
   // how much to zero here; or 2) make resampler accept a hint that the input is
   // zeroed.
-  const size_t src_length = samples_per_channel * audio_ptr_num_channels;
+
   // Ensure the `samples_per_channel_` member is set correctly based on the
   // destination sample rate, number of channels and assumed 10ms buffer size.
   // TODO(tommi): Could we rather assume that this has been done by the caller?
   dst_frame->SetSampleRateAndChannelSize(dst_frame->sample_rate_hz_);
 
+  InterleavedView<const int16_t> src_view(audio_ptr, samples_per_channel,
+                                          audio_ptr_num_channels);
+  // Stash away the originally requested number of channels. Then provide
+  // `dst_frame` as a target buffer with the same number of channels as the
+  // source.
+  auto original_dst_number_of_channels = dst_frame->num_channels_;
   int out_length = resampler->Resample(
-      rtc::ArrayView<const int16_t>(audio_ptr, src_length),
-      dst_frame->mutable_data(dst_frame->samples_per_channel_,
-                              dst_frame->num_channels_));
-  if (out_length == -1) {
-    RTC_FATAL() << "Resample failed: audio_ptr = " << audio_ptr
-                << ", src_length = " << src_length
-                << ", dst_frame->mutable_data() = "
-                << dst_frame->mutable_data();
-  }
+      src_view, dst_frame->mutable_data(dst_frame->samples_per_channel_,
+                                        src_view.num_channels()));
+  RTC_CHECK_NE(out_length, -1) << "Resample failed: audio_ptr = " << audio_ptr
+                               << ", src_length = " << src_view.data().size();
 
-  dst_frame->samples_per_channel_ = out_length / audio_ptr_num_channels;
+  RTC_DCHECK_EQ(dst_frame->samples_per_channel(),
+                out_length / audio_ptr_num_channels);
 
   // Upmix after resampling.
-  if (num_channels == 1 && dst_frame->num_channels_ == 2) {
+  if (num_channels == 1 && original_dst_number_of_channels == 2) {
     // The audio in dst_frame really is mono at this point; MonoToStereo will
     // set this back to stereo.
-    dst_frame->num_channels_ = 1;
+    RTC_DCHECK_EQ(dst_frame->num_channels_, 1);
     AudioFrameOperations::UpmixChannels(2, dst_frame);
   }
 }
diff --git a/audio/utility/audio_frame_operations.cc b/audio/utility/audio_frame_operations.cc
index c4582a4..cf709e7 100644
--- a/audio/utility/audio_frame_operations.cc
+++ b/audio/utility/audio_frame_operations.cc
@@ -29,15 +29,17 @@
 
 }  // namespace
 
-void AudioFrameOperations::QuadToStereo(rtc::ArrayView<const int16_t> src_audio,
-                                        size_t samples_per_channel,
-                                        rtc::ArrayView<int16_t> dst_audio) {
-  RTC_DCHECK_EQ(src_audio.size(), samples_per_channel * 4);
-  RTC_DCHECK_EQ(dst_audio.size(), samples_per_channel * 2);
-  for (size_t i = 0; i < samples_per_channel; i++) {
-    dst_audio[i * 2] =
+void AudioFrameOperations::QuadToStereo(
+    InterleavedView<const int16_t> src_audio,
+    InterleavedView<int16_t> dst_audio) {
+  RTC_DCHECK_EQ(NumChannels(src_audio), 4);
+  RTC_DCHECK_EQ(NumChannels(dst_audio), 2);
+  RTC_DCHECK_EQ(SamplesPerChannel(src_audio), SamplesPerChannel(dst_audio));
+  for (size_t i = 0; i < SamplesPerChannel(src_audio); ++i) {
+    auto dst_frame = i * 2;
+    dst_audio[dst_frame] =
         (static_cast<int32_t>(src_audio[4 * i]) + src_audio[4 * i + 1]) >> 1;
-    dst_audio[i * 2 + 1] =
+    dst_audio[dst_frame + 1] =
         (static_cast<int32_t>(src_audio[4 * i + 2]) + src_audio[4 * i + 3]) >>
         1;
   }
@@ -52,9 +54,12 @@
                 AudioFrame::kMaxDataSizeSamples);
 
   if (!frame->muted()) {
-    auto current_data = frame->data_view();
-    QuadToStereo(current_data, frame->samples_per_channel_,
-                 frame->mutable_data(frame->samples_per_channel_, 2));
+    // Note that `src` and `dst` will map in to the same buffer, but the call
+    // to `mutable_data()` changes the layout of `frame`, so `src` and `dst`
+    // will have different dimensions (important to call `data_view()` first).
+    auto src = frame->data_view();
+    auto dst = frame->mutable_data(frame->samples_per_channel_, 2);
+    QuadToStereo(src, dst);
   } else {
     frame->num_channels_ = 2;
   }
@@ -63,21 +68,19 @@
 }
 
 void AudioFrameOperations::DownmixChannels(
-    rtc::ArrayView<const int16_t> src_audio,
-    size_t src_channels,
-    size_t samples_per_channel,
-    size_t dst_channels,
-    rtc::ArrayView<int16_t> dst_audio) {
-  RTC_DCHECK_EQ(src_audio.size(), src_channels * samples_per_channel);
-  RTC_DCHECK_EQ(dst_audio.size(), dst_channels * samples_per_channel);
-  if (src_channels > 1 && dst_channels == 1) {
-    DownmixInterleavedToMono(src_audio.data(), samples_per_channel,
-                             src_channels, &dst_audio[0]);
-  } else if (src_channels == 4 && dst_channels == 2) {
-    QuadToStereo(src_audio, samples_per_channel, dst_audio);
+    InterleavedView<const int16_t> src_audio,
+    InterleavedView<int16_t> dst_audio) {
+  RTC_DCHECK_EQ(SamplesPerChannel(src_audio), SamplesPerChannel(dst_audio));
+  if (NumChannels(src_audio) > 1 && IsMono(dst_audio)) {
+    // TODO(tommi): change DownmixInterleavedToMono to support InterleavedView
+    // and MonoView.
+    DownmixInterleavedToMono(&src_audio.data()[0], SamplesPerChannel(src_audio),
+                             NumChannels(src_audio), &dst_audio.data()[0]);
+  } else if (NumChannels(src_audio) == 4 && NumChannels(dst_audio) == 2) {
+    QuadToStereo(src_audio, dst_audio);
   } else {
-    RTC_DCHECK_NOTREACHED() << "src_channels: " << src_channels
-                            << ", dst_channels: " << dst_channels;
+    RTC_DCHECK_NOTREACHED() << "src_channels: " << NumChannels(src_audio)
+                            << ", dst_channels: " << NumChannels(dst_audio);
   }
 }
 
diff --git a/audio/utility/audio_frame_operations.h b/audio/utility/audio_frame_operations.h
index 3d1e996..41ea05e 100644
--- a/audio/utility/audio_frame_operations.h
+++ b/audio/utility/audio_frame_operations.h
@@ -28,9 +28,8 @@
   // Downmixes 4 channels `src_audio` to stereo `dst_audio`. This is an in-place
   // operation, meaning `src_audio` and `dst_audio` may point to the same
   // buffer.
-  static void QuadToStereo(rtc::ArrayView<const int16_t> src_audio,
-                           size_t samples_per_channel,
-                           rtc::ArrayView<int16_t> dst_audio);
+  static void QuadToStereo(InterleavedView<const int16_t> src_audio,
+                           InterleavedView<int16_t> dst_audio);
 
   // `frame.num_channels_` will be updated. This version checks that
   // `num_channels_` is 4 channels.
@@ -40,11 +39,8 @@
   // This is an in-place operation, meaning `src_audio` and `dst_audio`
   // may point to the same buffer. Supported channel combinations are
   // Stereo to Mono, Quad to Mono, and Quad to Stereo.
-  static void DownmixChannels(rtc::ArrayView<const int16_t> src_audio,
-                              size_t src_channels,
-                              size_t samples_per_channel,
-                              size_t dst_channels,
-                              rtc::ArrayView<int16_t> dst_audio);
+  static void DownmixChannels(InterleavedView<const int16_t> src_audio,
+                              InterleavedView<int16_t> dst_audio);
 
   // `frame.num_channels_` will be updated. This version checks that
   // `num_channels_` and `dst_channels` are valid and performs relevant downmix.
diff --git a/audio/utility/audio_frame_operations_unittest.cc b/audio/utility/audio_frame_operations_unittest.cc
index d50b685..feb86e8 100644
--- a/audio/utility/audio_frame_operations_unittest.cc
+++ b/audio/utility/audio_frame_operations_unittest.cc
@@ -31,7 +31,7 @@
                   int16_t ch3,
                   int16_t ch4,
                   AudioFrame* frame) {
-  rtc::ArrayView<int16_t> frame_data =
+  InterleavedView<int16_t> frame_data =
       frame->mutable_data(frame->samples_per_channel_, 4);
   for (size_t i = 0; i < frame->samples_per_channel_ * 4; i += 4) {
     frame_data[i] = ch1;
@@ -42,7 +42,7 @@
 }
 
 void SetFrameData(int16_t left, int16_t right, AudioFrame* frame) {
-  rtc::ArrayView<int16_t> frame_data =
+  InterleavedView<int16_t> frame_data =
       frame->mutable_data(frame->samples_per_channel_, 2);
   for (size_t i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
     frame_data[i] = left;
@@ -51,7 +51,7 @@
 }
 
 void SetFrameData(int16_t data, AudioFrame* frame) {
-  rtc::ArrayView<int16_t> frame_data =
+  InterleavedView<int16_t> frame_data =
       frame->mutable_data(frame->samples_per_channel_, 1);
   for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_;
        i++) {
@@ -167,7 +167,7 @@
   SetFrameData(4, 2, &frame_);
 
   AudioFrameOperations::DownmixChannels(
-      frame_.data_view(), 2, frame_.samples_per_channel_, 1,
+      frame_.data_view(),
       target_frame.mutable_data(frame_.samples_per_channel_, 1));
 
   AudioFrame mono_frame;
@@ -211,7 +211,7 @@
   SetFrameData(4, 2, 6, 8, &frame_);
 
   AudioFrameOperations::DownmixChannels(
-      frame_.data_view(), 4, frame_.samples_per_channel_, 1,
+      frame_.data_view(),
       target_frame.mutable_data(frame_.samples_per_channel_, 1));
   AudioFrame mono_frame;
   mono_frame.samples_per_channel_ = 320;
@@ -259,7 +259,7 @@
   SetFrameData(4, 2, 6, 8, &frame_);
 
   AudioFrameOperations::QuadToStereo(
-      frame_.data_view(), frame_.samples_per_channel_,
+      frame_.data_view(),
       target_frame.mutable_data(frame_.samples_per_channel_, 2));
   AudioFrame stereo_frame;
   stereo_frame.samples_per_channel_ = 320;
diff --git a/common_audio/BUILD.gn b/common_audio/BUILD.gn
index 9b7b55e..3603fdf 100644
--- a/common_audio/BUILD.gn
+++ b/common_audio/BUILD.gn
@@ -46,6 +46,7 @@
     ":common_audio_c",
     ":sinc_resampler",
     "../api:array_view",
+    "../api/audio:audio_frame_api",
     "../rtc_base:checks",
     "../rtc_base:gtest_prod",
     "../rtc_base:logging",
diff --git a/common_audio/resampler/include/push_resampler.h b/common_audio/resampler/include/push_resampler.h
index 35783b6..a899655 100644
--- a/common_audio/resampler/include/push_resampler.h
+++ b/common_audio/resampler/include/push_resampler.h
@@ -14,7 +14,7 @@
 #include <memory>
 #include <vector>
 
-#include "api/array_view.h"
+#include "api/audio/audio_view.h"
 
 namespace webrtc {
 
@@ -37,7 +37,7 @@
 
   // Returns the total number of samples provided in destination (e.g. 32 kHz,
   // 2 channel audio gives 640 samples).
-  int Resample(rtc::ArrayView<const T> src, rtc::ArrayView<T> dst);
+  int Resample(InterleavedView<const T> src, InterleavedView<T> dst);
 
  private:
   int src_sample_rate_hz_;
diff --git a/common_audio/resampler/push_resampler.cc b/common_audio/resampler/push_resampler.cc
index 0af5ec7..adcd518 100644
--- a/common_audio/resampler/push_resampler.cc
+++ b/common_audio/resampler/push_resampler.cc
@@ -15,6 +15,7 @@
 
 #include <memory>
 
+#include "api/audio/audio_frame.h"
 #include "common_audio/include/audio_util.h"
 #include "common_audio/resampler/push_sinc_resampler.h"
 #include "rtc_base/checks.h"
@@ -73,47 +74,45 @@
 }
 
 template <typename T>
-int PushResampler<T>::Resample(rtc::ArrayView<const T> src,
-                               rtc::ArrayView<T> dst) {
-  // These checks used to be factored out of this template function due to
-  // Windows debug build issues with clang. http://crbug.com/615050
-  const size_t src_size_10ms = (src_sample_rate_hz_ / 100) * num_channels_;
-  const size_t dst_size_10ms = (dst_sample_rate_hz_ / 100) * num_channels_;
-  RTC_DCHECK_EQ(src.size(), src_size_10ms);
-  RTC_DCHECK_GE(dst.size(), dst_size_10ms);
+int PushResampler<T>::Resample(InterleavedView<const T> src,
+                               InterleavedView<T> dst) {
+  RTC_DCHECK_EQ(NumChannels(src), num_channels_);
+  RTC_DCHECK_EQ(NumChannels(dst), num_channels_);
+  RTC_DCHECK_EQ(SamplesPerChannel(src),
+                SampleRateToDefaultChannelSize(src_sample_rate_hz_));
+  RTC_DCHECK_EQ(SamplesPerChannel(dst),
+                SampleRateToDefaultChannelSize(dst_sample_rate_hz_));
 
   if (src_sample_rate_hz_ == dst_sample_rate_hz_) {
     // The old resampler provides this memcpy facility in the case of matching
     // sample rates, so reproduce it here for the sinc resampler.
-    memcpy(dst.data(), src.data(), src.size() * sizeof(T));
-    return static_cast<int>(src.size());
+    CopySamples(dst, src);
+    return static_cast<int>(src.data().size());
   }
 
-  const size_t src_length_mono = src.size() / num_channels_;
-  const size_t dst_capacity_mono = dst.size() / num_channels_;
-
   for (size_t ch = 0; ch < num_channels_; ++ch) {
     channel_data_array_[ch] = channel_resamplers_[ch].source.data();
   }
 
-  Deinterleave(src.data(), src_length_mono, num_channels_,
+  // TODO: b/335805780 - Deinterleave should accept InterleavedView<> as input.
+  Deinterleave(&src.data()[0], src.samples_per_channel(), src.num_channels(),
                channel_data_array_.data());
 
-  size_t dst_length_mono = 0;
-
   for (auto& resampler : channel_resamplers_) {
-    dst_length_mono = resampler.resampler->Resample(
-        resampler.source.data(), src_length_mono, resampler.destination.data(),
-        dst_capacity_mono);
+    size_t dst_length_mono = resampler.resampler->Resample(
+        resampler.source.data(), src.samples_per_channel(),
+        resampler.destination.data(), dst.samples_per_channel());
+    RTC_DCHECK_EQ(dst_length_mono, dst.samples_per_channel());
   }
 
   for (size_t ch = 0; ch < num_channels_; ++ch) {
     channel_data_array_[ch] = channel_resamplers_[ch].destination.data();
   }
 
-  Interleave(channel_data_array_.data(), dst_length_mono, num_channels_,
-             dst.data());
-  return static_cast<int>(dst_length_mono * num_channels_);
+  // TODO: b/335805780 - Interleave should accept InterleavedView<> as dst.
+  Interleave(channel_data_array_.data(), dst.samples_per_channel(),
+             num_channels_, &dst[0]);
+  return static_cast<int>(dst.size());
 }
 
 // Explictly generate required instantiations.
diff --git a/modules/audio_coding/acm2/acm_remixing_unittest.cc b/modules/audio_coding/acm2/acm_remixing_unittest.cc
index a1a816f..7c0c0ef 100644
--- a/modules/audio_coding/acm2/acm_remixing_unittest.cc
+++ b/modules/audio_coding/acm2/acm_remixing_unittest.cc
@@ -28,11 +28,8 @@
 TEST(AcmRemixing, DownMixFrame) {
   std::vector<int16_t> out(480, 0);
   AudioFrame in;
-  in.num_channels_ = 2;
-  in.samples_per_channel_ = 480;
-
-  int16_t* const in_data = in.mutable_data();
-  for (size_t k = 0; k < in.samples_per_channel_; ++k) {
+  InterleavedView<int16_t> const in_data = in.mutable_data(480, 2);
+  for (size_t k = 0; k < in_data.samples_per_channel(); ++k) {
     in_data[2 * k] = 2;
     in_data[2 * k + 1] = 0;
   }
diff --git a/modules/audio_coding/acm2/acm_resampler.cc b/modules/audio_coding/acm2/acm_resampler.cc
index bcac7b6..6e2a3bc 100644
--- a/modules/audio_coding/acm2/acm_resampler.cc
+++ b/modules/audio_coding/acm2/acm_resampler.cc
@@ -12,6 +12,7 @@
 
 #include <string.h>
 
+#include "api/audio/audio_frame.h"
 #include "rtc_base/logging.h"
 
 namespace webrtc {
@@ -27,14 +28,20 @@
                                  size_t num_audio_channels,
                                  size_t out_capacity_samples,
                                  int16_t* out_audio) {
-  size_t in_length = in_freq_hz * num_audio_channels / 100;
+  InterleavedView<const int16_t> src(
+      in_audio, SampleRateToDefaultChannelSize(in_freq_hz), num_audio_channels);
+  InterleavedView<int16_t> dst(out_audio,
+                               SampleRateToDefaultChannelSize(out_freq_hz),
+                               num_audio_channels);
+  RTC_DCHECK_GE(out_capacity_samples, dst.size());
   if (in_freq_hz == out_freq_hz) {
-    if (out_capacity_samples < in_length) {
+    if (out_capacity_samples < src.data().size()) {
       RTC_DCHECK_NOTREACHED();
       return -1;
     }
-    memcpy(out_audio, in_audio, in_length * sizeof(int16_t));
-    return static_cast<int>(in_length / num_audio_channels);
+    CopySamples(dst, src);
+    RTC_DCHECK_EQ(dst.samples_per_channel(), src.samples_per_channel());
+    return static_cast<int>(dst.samples_per_channel());
   }
 
   if (resampler_.InitializeIfNeeded(in_freq_hz, out_freq_hz,
@@ -45,17 +52,16 @@
     return -1;
   }
 
-  int out_length = resampler_.Resample(
-      rtc::ArrayView<const int16_t>(in_audio, in_length),
-      rtc::ArrayView<int16_t>(out_audio, out_capacity_samples));
+  int out_length = resampler_.Resample(src, dst);
   if (out_length == -1) {
-    RTC_LOG(LS_ERROR) << "Resample(" << in_audio << ", " << in_length << ", "
-                      << out_audio << ", " << out_capacity_samples
+    RTC_LOG(LS_ERROR) << "Resample(" << in_audio << ", " << src.data().size()
+                      << ", " << out_audio << ", " << out_capacity_samples
                       << ") failed.";
     return -1;
   }
-
-  return static_cast<int>(out_length / num_audio_channels);
+  RTC_DCHECK_EQ(out_length, dst.size());
+  RTC_DCHECK_EQ(out_length / num_audio_channels, dst.samples_per_channel());
+  return static_cast<int>(dst.samples_per_channel());
 }
 
 }  // namespace acm2
diff --git a/modules/audio_coding/acm2/acm_resampler.h b/modules/audio_coding/acm2/acm_resampler.h
index 96ba93a..cc323ce 100644
--- a/modules/audio_coding/acm2/acm_resampler.h
+++ b/modules/audio_coding/acm2/acm_resampler.h
@@ -24,6 +24,7 @@
   ACMResampler();
   ~ACMResampler();
 
+  // TODO: b/335805780 - Change to accept InterleavedView<>.
   int Resample10Msec(const int16_t* in_audio,
                      int in_freq_hz,
                      int out_freq_hz,
diff --git a/modules/audio_coding/acm2/audio_coding_module_unittest.cc b/modules/audio_coding/acm2/audio_coding_module_unittest.cc
index 498e5aa..75e38bc 100644
--- a/modules/audio_coding/acm2/audio_coding_module_unittest.cc
+++ b/modules/audio_coding/acm2/audio_coding_module_unittest.cc
@@ -263,8 +263,7 @@
 TEST_F(AudioCodingModuleTestOldApiDeathTest, FailOnZeroDesiredFrequency) {
   AudioFrame audio_frame;
   bool muted;
-  RTC_EXPECT_DEATH(acm_receiver_->GetAudio(0, &audio_frame, &muted),
-                   "dst_sample_rate_hz");
+  RTC_EXPECT_DEATH(acm_receiver_->GetAudio(0, &audio_frame, &muted), "");
 }
 #endif
 
diff --git a/modules/audio_processing/agc2/vad_wrapper.cc b/modules/audio_processing/agc2/vad_wrapper.cc
index b391224..331fd36 100644
--- a/modules/audio_processing/agc2/vad_wrapper.cc
+++ b/modules/audio_processing/agc2/vad_wrapper.cc
@@ -104,7 +104,16 @@
   }
   // Resample the first channel of `frame`.
   RTC_DCHECK_EQ(frame.samples_per_channel(), frame_size_);
-  resampler_.Resample(frame.channel(0), resampled_buffer_);
+
+  // TODO: b/335805780 - channel() should return a MonoView<> which there
+  // should be a Resample() implementation for. There's no need to
+  // "deinterleave" a mono buffer, which is what Resample() currently does,
+  // so here we should be able to directly resample the channel buffer.
+  auto channel = frame.channel(0);
+  InterleavedView<const float> src(channel.data(), channel.size(), 1);
+  InterleavedView<float> dst(resampled_buffer_.data(), resampled_buffer_.size(),
+                             1);
+  resampler_.Resample(src, dst);
 
   return vad_->Analyze(resampled_buffer_);
 }
diff --git a/modules/audio_processing/audio_processing_unittest.cc b/modules/audio_processing/audio_processing_unittest.cc
index 819e980..02e87ec 100644
--- a/modules/audio_processing/audio_processing_unittest.cc
+++ b/modules/audio_processing/audio_processing_unittest.cc
@@ -2150,10 +2150,12 @@
       ASSERT_TRUE(out_file != NULL);
       ASSERT_TRUE(ref_file != NULL);
 
-      const size_t ref_length =
-          AudioProcessing::GetFrameSize(ref_rate) * out_num;
-      const size_t out_length =
-          AudioProcessing::GetFrameSize(out_rate) * out_num;
+      const size_t ref_samples_per_channel =
+          AudioProcessing::GetFrameSize(ref_rate);
+      const size_t ref_length = ref_samples_per_channel * out_num;
+      const size_t out_samples_per_channel =
+          AudioProcessing::GetFrameSize(out_rate);
+      const size_t out_length = out_samples_per_channel * out_num;
       // Data from the reference file.
       std::unique_ptr<float[]> ref_data(new float[ref_length]);
       // Data from the output file.
@@ -2196,10 +2198,12 @@
         if (out_rate != ref_rate) {
           // Resample the output back to its internal processing rate if
           // necessary.
+          InterleavedView<const float> src(out_ptr, out_samples_per_channel,
+                                           out_num);
+          InterleavedView<float> dst(cmp_data.get(), ref_samples_per_channel,
+                                     out_num);
           ASSERT_EQ(ref_length,
-                    static_cast<size_t>(resampler.Resample(
-                        rtc::ArrayView<const float>(out_ptr, out_length),
-                        rtc::ArrayView<float>(cmp_data.get(), ref_length))));
+                    static_cast<size_t>(resampler.Resample(src, dst)));
           out_ptr = cmp_data.get();
         }