Align Int16FrameData test class with AudioFrame

This updates test code that tests interleaved audio frames to use
some of the same properties and types as AudioFrame (rather than copy).

The CL also moves code from audio_processing_unittest.cc that modifies
the buffer owned by Int16FrameData, into Int16FrameData.

Bug: none
Change-Id: Iab37227deb302bf4fc832633d312262e5249caad
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/355960
Reviewed-by: Per Ã…hgren <peah@webrtc.org>
Commit-Queue: Tomas Gunnarsson <tommi@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#43424}
diff --git a/modules/audio_processing/audio_processing_unittest.cc b/modules/audio_processing/audio_processing_unittest.cc
index db4a819..266cc56 100644
--- a/modules/audio_processing/audio_processing_unittest.cc
+++ b/modules/audio_processing/audio_processing_unittest.cc
@@ -113,56 +113,6 @@
   }
 }
 
-void SetFrameTo(Int16FrameData* frame, int16_t value) {
-  for (size_t i = 0; i < frame->samples_per_channel * frame->num_channels;
-       ++i) {
-    frame->data[i] = value;
-  }
-}
-
-void SetFrameTo(Int16FrameData* frame, int16_t left, int16_t right) {
-  ASSERT_EQ(2u, frame->num_channels);
-  for (size_t i = 0; i < frame->samples_per_channel * 2; i += 2) {
-    frame->data[i] = left;
-    frame->data[i + 1] = right;
-  }
-}
-
-void ScaleFrame(Int16FrameData* frame, float scale) {
-  for (size_t i = 0; i < frame->samples_per_channel * frame->num_channels;
-       ++i) {
-    frame->data[i] = FloatS16ToS16(frame->data[i] * scale);
-  }
-}
-
-bool FrameDataAreEqual(const Int16FrameData& frame1,
-                       const Int16FrameData& frame2) {
-  if (frame1.samples_per_channel != frame2.samples_per_channel) {
-    return false;
-  }
-  if (frame1.num_channels != frame2.num_channels) {
-    return false;
-  }
-  if (memcmp(
-          frame1.data.data(), frame2.data.data(),
-          frame1.samples_per_channel * frame1.num_channels * sizeof(int16_t))) {
-    return false;
-  }
-  return true;
-}
-
-rtc::ArrayView<int16_t> GetMutableFrameData(Int16FrameData* frame) {
-  int16_t* ptr = frame->data.data();
-  const size_t len = frame->samples_per_channel * frame->num_channels;
-  return rtc::ArrayView<int16_t>(ptr, len);
-}
-
-rtc::ArrayView<const int16_t> GetFrameData(const Int16FrameData& frame) {
-  const int16_t* ptr = frame.data.data();
-  const size_t len = frame.samples_per_channel * frame.num_channels;
-  return rtc::ArrayView<const int16_t>(ptr, len);
-}
-
 void EnableAllAPComponents(AudioProcessing* ap) {
   AudioProcessing::Config apm_config = ap->GetConfig();
   apm_config.echo_canceller.enabled = true;
@@ -194,9 +144,8 @@
 }
 
 int16_t MaxAudioFrame(const Int16FrameData& frame) {
-  const size_t length = frame.samples_per_channel * frame.num_channels;
   int16_t max_data = AbsValue(frame.data[0]);
-  for (size_t i = 1; i < length; i++) {
+  for (size_t i = 1; i < frame.size(); i++) {
     max_data = std::max(max_data, AbsValue(frame.data[i]));
   }
 
@@ -508,11 +457,11 @@
 
 void ApmTest::Init(AudioProcessing* ap) {
   ASSERT_EQ(
-      kNoErr,
-      ap->Initialize({{{frame_.sample_rate_hz, frame_.num_channels},
+      AudioProcessing::kNoError,
+      ap->Initialize({{{frame_.sample_rate_hz, frame_.num_channels()},
                        {output_sample_rate_hz_, num_output_channels_},
-                       {revframe_.sample_rate_hz, revframe_.num_channels},
-                       {revframe_.sample_rate_hz, revframe_.num_channels}}}));
+                       {revframe_.sample_rate_hz, revframe_.num_channels()},
+                       {revframe_.sample_rate_hz, revframe_.num_channels()}}}));
 }
 
 void ApmTest::Init(int sample_rate_hz,
@@ -566,7 +515,7 @@
                         Int16FrameData* frame,
                         ChannelBuffer<float>* cb) {
   // The files always contain stereo audio.
-  size_t frame_size = frame->samples_per_channel * 2;
+  size_t frame_size = frame->samples_per_channel() * 2;
   size_t read_count =
       fread(frame->data.data(), sizeof(int16_t), frame_size, file);
   if (read_count != frame_size) {
@@ -575,9 +524,9 @@
     return false;  // This is expected.
   }
 
-  if (frame->num_channels == 1) {
+  if (frame->num_channels() == 1) {
     MixStereoToMono(frame->data.data(), frame->data.data(),
-                    frame->samples_per_channel);
+                    frame->samples_per_channel());
   }
 
   if (cb) {
@@ -609,13 +558,13 @@
   if (format == kIntFormat) {
     return apm_->ProcessStream(
         frame_.data.data(),
-        StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
-        StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+        StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
+        StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
         frame_.data.data());
   }
   return apm_->ProcessStream(
       float_cb_->channels(),
-      StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+      StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
       StreamConfig(output_sample_rate_hz_, num_output_channels_),
       float_cb_->channels());
 }
@@ -624,13 +573,13 @@
   if (format == kIntFormat) {
     return apm_->ProcessReverseStream(
         revframe_.data.data(),
-        StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels),
-        StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels),
+        StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels()),
+        StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels()),
         revframe_.data.data());
   }
   return apm_->AnalyzeReverseStream(
       revfloat_cb_->channels(),
-      StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels));
+      StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels()));
 }
 
 void ApmTest::ProcessDelayVerificationTest(int delay_ms,
@@ -644,9 +593,9 @@
   bool causal = true;
 
   tmp_frame.CopyFrom(revframe_);
-  SetFrameTo(&tmp_frame, 0);
+  tmp_frame.FillData(0);
 
-  EXPECT_EQ(apm_->kNoError, apm_->Initialize());
+  EXPECT_EQ(AudioProcessing::kNoError, apm_->Initialize());
   // Initialize the `frame_queue` with empty frames.
   int frame_delay = delay_ms / 10;
   while (frame_delay < 0) {
@@ -682,20 +631,22 @@
       process_frame = &tmp_frame;
       process_frame->CopyFrom(*frame);
     }
-    EXPECT_EQ(apm_->kNoError, apm_->ProcessReverseStream(
-                                  reverse_frame->data.data(),
-                                  StreamConfig(reverse_frame->sample_rate_hz,
-                                               reverse_frame->num_channels),
-                                  StreamConfig(reverse_frame->sample_rate_hz,
-                                               reverse_frame->num_channels),
-                                  reverse_frame->data.data()));
-    EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(system_delay_ms));
-    EXPECT_EQ(apm_->kNoError,
+    EXPECT_EQ(
+        AudioProcessing::kNoError,
+        apm_->ProcessReverseStream(reverse_frame->data.data(),
+                                   StreamConfig(reverse_frame->sample_rate_hz,
+                                                reverse_frame->num_channels()),
+                                   StreamConfig(reverse_frame->sample_rate_hz,
+                                                reverse_frame->num_channels()),
+                                   reverse_frame->data.data()));
+    EXPECT_EQ(AudioProcessing::kNoError,
+              apm_->set_stream_delay_ms(system_delay_ms));
+    EXPECT_EQ(AudioProcessing::kNoError,
               apm_->ProcessStream(process_frame->data.data(),
                                   StreamConfig(process_frame->sample_rate_hz,
-                                               process_frame->num_channels),
+                                               process_frame->num_channels()),
                                   StreamConfig(process_frame->sample_rate_hz,
-                                               process_frame->num_channels),
+                                               process_frame->num_channels()),
                                   process_frame->data.data()));
     frame = frame_queue.front();
     frame_queue.pop();
@@ -716,7 +667,7 @@
   // Calculate expected delay estimate and acceptable regions. Further,
   // limit them w.r.t. AEC delay estimation support.
   const size_t samples_per_ms =
-      rtc::SafeMin<size_t>(16u, frame_.samples_per_channel / 10);
+      rtc::SafeMin<size_t>(16u, frame_.samples_per_channel() / 10);
   const int expected_median =
       rtc::SafeClamp<int>(delay_ms - system_delay_ms, delay_min, delay_max);
   const int expected_median_high = rtc::SafeClamp<int>(
@@ -735,7 +686,7 @@
 
 void ApmTest::StreamParametersTest(Format format) {
   // No errors when the components are disabled.
-  EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
+  EXPECT_EQ(AudioProcessing::kNoError, ProcessStreamChooser(format));
 
   // -- Missing AGC level --
   AudioProcessing::Config apm_config = apm_->GetConfig();
@@ -745,43 +696,43 @@
 
   // Resets after successful ProcessStream().
   apm_->set_stream_analog_level(127);
-  EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
+  EXPECT_EQ(AudioProcessing::kNoError, ProcessStreamChooser(format));
   EXPECT_EQ(apm_->kStreamParameterNotSetError, ProcessStreamChooser(format));
 
   // Other stream parameters set correctly.
   apm_config.echo_canceller.enabled = true;
   apm_config.echo_canceller.mobile_mode = false;
   apm_->ApplyConfig(apm_config);
-  EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
+  EXPECT_EQ(AudioProcessing::kNoError, apm_->set_stream_delay_ms(100));
   EXPECT_EQ(apm_->kStreamParameterNotSetError, ProcessStreamChooser(format));
   apm_config.gain_controller1.enabled = false;
   apm_->ApplyConfig(apm_config);
 
   // -- Missing delay --
-  EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
-  EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
+  EXPECT_EQ(AudioProcessing::kNoError, ProcessStreamChooser(format));
+  EXPECT_EQ(AudioProcessing::kNoError, ProcessStreamChooser(format));
 
   // Resets after successful ProcessStream().
-  EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
-  EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
-  EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
+  EXPECT_EQ(AudioProcessing::kNoError, apm_->set_stream_delay_ms(100));
+  EXPECT_EQ(AudioProcessing::kNoError, ProcessStreamChooser(format));
+  EXPECT_EQ(AudioProcessing::kNoError, ProcessStreamChooser(format));
 
   // Other stream parameters set correctly.
   apm_config.gain_controller1.enabled = true;
   apm_->ApplyConfig(apm_config);
   apm_->set_stream_analog_level(127);
-  EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
+  EXPECT_EQ(AudioProcessing::kNoError, ProcessStreamChooser(format));
   apm_config.gain_controller1.enabled = false;
   apm_->ApplyConfig(apm_config);
 
   // -- No stream parameters --
-  EXPECT_EQ(apm_->kNoError, AnalyzeReverseStreamChooser(format));
-  EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
+  EXPECT_EQ(AudioProcessing::kNoError, AnalyzeReverseStreamChooser(format));
+  EXPECT_EQ(AudioProcessing::kNoError, ProcessStreamChooser(format));
 
   // -- All there --
-  EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
+  EXPECT_EQ(AudioProcessing::kNoError, apm_->set_stream_delay_ms(100));
   apm_->set_stream_analog_level(127);
-  EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
+  EXPECT_EQ(AudioProcessing::kNoError, ProcessStreamChooser(format));
 }
 
 TEST_F(ApmTest, StreamParametersInt) {
@@ -795,19 +746,19 @@
 void ApmTest::TestChangingChannelsInt16Interface(
     size_t num_channels,
     AudioProcessing::Error expected_return) {
-  frame_.num_channels = num_channels;
+  frame_.set_num_channels(num_channels);
 
   EXPECT_EQ(expected_return,
             apm_->ProcessStream(
                 frame_.data.data(),
-                StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
-                StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+                StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
+                StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
                 frame_.data.data()));
   EXPECT_EQ(expected_return,
             apm_->ProcessReverseStream(
                 frame_.data.data(),
-                StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
-                StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+                StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
+                StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
                 frame_.data.data()));
 }
 
@@ -846,7 +797,7 @@
   TestChangingChannelsInt16Interface(0, apm_->kBadNumberChannelsError);
 
   for (size_t i = 1; i < 4; i++) {
-    TestChangingChannelsInt16Interface(i, kNoErr);
+    TestChangingChannelsInt16Interface(i, AudioProcessing::kNoError);
     EXPECT_EQ(i, apm_->num_input_channels());
   }
 }
@@ -862,8 +813,8 @@
     for (size_t j = 0; j < 1; ++j) {
       // Output channels much be one or match input channels.
       if (j == 1 || i == j) {
-        TestChangingForwardChannels(i, j, kNoErr);
-        TestChangingReverseChannels(i, kNoErr);
+        TestChangingForwardChannels(i, j, AudioProcessing::kNoError);
+        TestChangingReverseChannels(i, AudioProcessing::kNoError);
 
         EXPECT_EQ(i, apm_->num_input_channels());
         EXPECT_EQ(j, apm_->num_output_channels());
@@ -889,10 +840,10 @@
 // number of frames, and checks that output signal has the right level.
 TEST_F(ApmTest, PreAmplifier) {
   // Fill the audio frame with a sawtooth pattern.
-  rtc::ArrayView<int16_t> frame_data = GetMutableFrameData(&frame_);
-  const size_t samples_per_channel = frame_.samples_per_channel;
+  InterleavedView<int16_t> frame_data = frame_.view();
+  const size_t samples_per_channel = frame_.samples_per_channel();
   for (size_t i = 0; i < samples_per_channel; i++) {
-    for (size_t ch = 0; ch < frame_.num_channels; ++ch) {
+    for (size_t ch = 0; ch < frame_.num_channels(); ++ch) {
       frame_data[i + ch * samples_per_channel] = 10000 * ((i % 3) - 1);
     }
   }
@@ -901,7 +852,7 @@
   tmp_frame.CopyFrom(frame_);
 
   auto compute_power = [](const Int16FrameData& frame) {
-    rtc::ArrayView<const int16_t> data = GetFrameData(frame);
+    rtc::ArrayView<const int16_t> data = frame.view().data();
     return std::accumulate(data.begin(), data.end(), 0.0f,
                            [](float a, float b) { return a + b * b; }) /
            data.size() / 32768 / 32768;
@@ -920,7 +871,7 @@
 
   for (int i = 0; i < 20; ++i) {
     frame_.CopyFrom(tmp_frame);
-    EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kIntFormat));
+    EXPECT_EQ(AudioProcessing::kNoError, ProcessStreamChooser(kIntFormat));
   }
   float output_power = compute_power(frame_);
   EXPECT_NEAR(output_power, input_power, kEpsilon);
@@ -933,7 +884,7 @@
 
   for (int i = 0; i < 20; ++i) {
     frame_.CopyFrom(tmp_frame);
-    EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kIntFormat));
+    EXPECT_EQ(AudioProcessing::kNoError, ProcessStreamChooser(kIntFormat));
   }
   output_power = compute_power(frame_);
   EXPECT_NEAR(output_power, 4 * input_power, kEpsilon);
@@ -946,7 +897,7 @@
 
   for (int i = 0; i < 20; ++i) {
     frame_.CopyFrom(tmp_frame);
-    EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kIntFormat));
+    EXPECT_EQ(AudioProcessing::kNoError, ProcessStreamChooser(kIntFormat));
   }
   output_power = compute_power(frame_);
   EXPECT_NEAR(output_power, 2.25 * input_power, kEpsilon);
@@ -958,10 +909,10 @@
 // crashing.
 TEST_F(ApmTest, AnalogMicGainEmulation) {
   // Fill the audio frame with a sawtooth pattern.
-  rtc::ArrayView<int16_t> frame_data = GetMutableFrameData(&frame_);
-  const size_t samples_per_channel = frame_.samples_per_channel;
+  InterleavedView<int16_t> frame_data = frame_.view();
+  const size_t samples_per_channel = frame_.samples_per_channel();
   for (size_t i = 0; i < samples_per_channel; i++) {
-    for (size_t ch = 0; ch < frame_.num_channels; ++ch) {
+    for (size_t ch = 0; ch < frame_.num_channels(); ++ch) {
       frame_data[i + ch * samples_per_channel] = 100 * ((i % 3) - 1);
     }
   }
@@ -983,7 +934,7 @@
   // Process a number of frames to ensure that the code runs without crashes.
   for (int i = 0; i < 20; ++i) {
     frame_.CopyFrom(tmp_frame);
-    EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kIntFormat));
+    EXPECT_EQ(AudioProcessing::kNoError, ProcessStreamChooser(kIntFormat));
   }
 }
 
@@ -992,10 +943,10 @@
 // right level.
 TEST_F(ApmTest, CaptureLevelAdjustment) {
   // Fill the audio frame with a sawtooth pattern.
-  rtc::ArrayView<int16_t> frame_data = GetMutableFrameData(&frame_);
-  const size_t samples_per_channel = frame_.samples_per_channel;
+  InterleavedView<int16_t> frame_data = frame_.view();
+  const size_t samples_per_channel = frame_.samples_per_channel();
   for (size_t i = 0; i < samples_per_channel; i++) {
-    for (size_t ch = 0; ch < frame_.num_channels; ++ch) {
+    for (size_t ch = 0; ch < frame_.num_channels(); ++ch) {
       frame_data[i + ch * samples_per_channel] = 100 * ((i % 3) - 1);
     }
   }
@@ -1004,7 +955,7 @@
   tmp_frame.CopyFrom(frame_);
 
   auto compute_power = [](const Int16FrameData& frame) {
-    rtc::ArrayView<const int16_t> data = GetFrameData(frame);
+    rtc::ArrayView<const int16_t> data = frame.view().data();
     return std::accumulate(data.begin(), data.end(), 0.0f,
                            [](float a, float b) { return a + b * b; }) /
            data.size() / 32768 / 32768;
@@ -1029,7 +980,7 @@
 
   for (int i = 0; i < 20; ++i) {
     frame_.CopyFrom(tmp_frame);
-    EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kIntFormat));
+    EXPECT_EQ(AudioProcessing::kNoError, ProcessStreamChooser(kIntFormat));
   }
   float output_power = compute_power(frame_);
   EXPECT_NEAR(output_power, expected_output_power1, kEpsilon);
@@ -1049,7 +1000,7 @@
 
   for (int i = 0; i < 20; ++i) {
     frame_.CopyFrom(tmp_frame);
-    EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kIntFormat));
+    EXPECT_EQ(AudioProcessing::kNoError, ProcessStreamChooser(kIntFormat));
   }
   output_power = compute_power(frame_);
   EXPECT_NEAR(output_power, expected_output_power2, kEpsilon);
@@ -1070,7 +1021,7 @@
 
   for (int i = 0; i < 20; ++i) {
     frame_.CopyFrom(tmp_frame);
-    EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kIntFormat));
+    EXPECT_EQ(AudioProcessing::kNoError, ProcessStreamChooser(kIntFormat));
   }
   output_power = compute_power(frame_);
   EXPECT_NEAR(output_power, expected_output_power3, kEpsilon);
@@ -1094,7 +1045,7 @@
     config.gain_controller1.mode = mode;
     apm_->ApplyConfig(config);
     apm_->set_stream_analog_level(100);
-    EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kFloatFormat));
+    EXPECT_EQ(AudioProcessing::kNoError, ProcessStreamChooser(kFloatFormat));
   }
 
   // Testing target levels
@@ -1102,7 +1053,7 @@
     config.gain_controller1.target_level_dbfs = target_level_dbfs;
     apm_->ApplyConfig(config);
     apm_->set_stream_analog_level(100);
-    EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kFloatFormat));
+    EXPECT_EQ(AudioProcessing::kNoError, ProcessStreamChooser(kFloatFormat));
   }
 
   // Testing compression gains
@@ -1110,7 +1061,7 @@
     config.gain_controller1.compression_gain_db = compression_gain_db;
     apm_->ApplyConfig(config);
     apm_->set_stream_analog_level(100);
-    EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kFloatFormat));
+    EXPECT_EQ(AudioProcessing::kNoError, ProcessStreamChooser(kFloatFormat));
   }
 
   // Testing limiter off/on
@@ -1118,18 +1069,18 @@
     config.gain_controller1.enable_limiter = enable;
     apm_->ApplyConfig(config);
     apm_->set_stream_analog_level(100);
-    EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kFloatFormat));
+    EXPECT_EQ(AudioProcessing::kNoError, ProcessStreamChooser(kFloatFormat));
   }
 
   // Testing level limits.
   constexpr int kMinLevel = 0;
   constexpr int kMaxLevel = 255;
   apm_->set_stream_analog_level(kMinLevel);
-  EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kFloatFormat));
+  EXPECT_EQ(AudioProcessing::kNoError, ProcessStreamChooser(kFloatFormat));
   apm_->set_stream_analog_level((kMinLevel + kMaxLevel) / 2);
-  EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kFloatFormat));
+  EXPECT_EQ(AudioProcessing::kNoError, ProcessStreamChooser(kFloatFormat));
   apm_->set_stream_analog_level(kMaxLevel);
-  EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kFloatFormat));
+  EXPECT_EQ(AudioProcessing::kNoError, ProcessStreamChooser(kFloatFormat));
 }
 
 #if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
@@ -1190,15 +1141,15 @@
   for (int i = 0; i < 2000; ++i) {
     ReadFrameWithRewind(near_file_, &frame_);
     // Ensure the audio is at a low level, so the AGC will try to increase it.
-    ScaleFrame(&frame_, 0.25);
+    frame_.Scale(0.25f);
 
     // Always pass in the same volume.
     apm_->set_stream_analog_level(100);
-    EXPECT_EQ(apm_->kNoError,
+    EXPECT_EQ(AudioProcessing::kNoError,
               apm_->ProcessStream(
                   frame_.data.data(),
-                  StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
-                  StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+                  StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
+                  StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
                   frame_.data.data()));
     out_analog_level = apm_->recommended_stream_analog_level();
   }
@@ -1228,14 +1179,14 @@
   for (int i = 0; i < 1000; ++i) {
     ReadFrameWithRewind(near_file_, &frame_);
     // Ensure the audio is at a low level, so the AGC will try to increase it.
-    ScaleFrame(&frame_, 0.25);
+    frame_.Scale(0.25f);
 
     apm_->set_stream_analog_level(out_analog_level);
-    EXPECT_EQ(apm_->kNoError,
+    EXPECT_EQ(AudioProcessing::kNoError,
               apm_->ProcessStream(
                   frame_.data.data(),
-                  StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
-                  StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+                  StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
+                  StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
                   frame_.data.data()));
     out_analog_level = apm_->recommended_stream_analog_level();
   }
@@ -1248,14 +1199,14 @@
 
   for (int i = 0; i < 300; ++i) {
     ReadFrameWithRewind(near_file_, &frame_);
-    ScaleFrame(&frame_, 0.25);
+    frame_.Scale(0.25f);
 
     apm_->set_stream_analog_level(out_analog_level);
-    EXPECT_EQ(apm_->kNoError,
+    EXPECT_EQ(AudioProcessing::kNoError,
               apm_->ProcessStream(
                   frame_.data.data(),
-                  StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
-                  StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+                  StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
+                  StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
                   frame_.data.data()));
     out_analog_level = apm_->recommended_stream_analog_level();
     // Check that AGC respected the manually adjusted volume.
@@ -1301,24 +1252,24 @@
   for (size_t sample_rate_hz : kSampleRatesHz) {
     SCOPED_TRACE(::testing::Message() << "sample_rate_hz=" << sample_rate_hz);
     Init(sample_rate_hz, sample_rate_hz, sample_rate_hz, 2, 2, 2, false);
-    SetFrameTo(&frame_, 1000, 2000);
+    frame_.FillStereoData(1000, 2000);
     Int16FrameData frame_copy;
     frame_copy.CopyFrom(frame_);
     for (int j = 0; j < 1000; j++) {
-      EXPECT_EQ(apm_->kNoError,
+      EXPECT_EQ(AudioProcessing::kNoError,
                 apm_->ProcessStream(
                     frame_.data.data(),
-                    StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
-                    StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+                    StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
+                    StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
                     frame_.data.data()));
-      EXPECT_TRUE(FrameDataAreEqual(frame_, frame_copy));
-      EXPECT_EQ(apm_->kNoError,
+      EXPECT_TRUE(frame_.IsEqual(frame_copy));
+      EXPECT_EQ(AudioProcessing::kNoError,
                 apm_->ProcessReverseStream(
                     frame_.data.data(),
-                    StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
-                    StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+                    StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
+                    StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
                     frame_.data.data()));
-      EXPECT_TRUE(FrameDataAreEqual(frame_, frame_copy));
+      EXPECT_TRUE(frame_.IsEqual(frame_copy));
     }
   }
 }
@@ -1368,29 +1319,29 @@
     ASSERT_EQ(0, feof(near_file_));
     while (ReadFrame(far_file_, &revframe_) && ReadFrame(near_file_, &frame_)) {
       CopyLeftToRightChannel(revframe_.data.data(),
-                             revframe_.samples_per_channel);
+                             revframe_.samples_per_channel());
 
       ASSERT_EQ(
-          kNoErr,
+          AudioProcessing::kNoError,
           apm_->ProcessReverseStream(
               revframe_.data.data(),
-              StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels),
-              StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels),
+              StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels()),
+              StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels()),
               revframe_.data.data()));
 
-      CopyLeftToRightChannel(frame_.data.data(), frame_.samples_per_channel);
+      CopyLeftToRightChannel(frame_.data.data(), frame_.samples_per_channel());
 
-      ASSERT_EQ(kNoErr, apm_->set_stream_delay_ms(0));
+      ASSERT_EQ(AudioProcessing::kNoError, apm_->set_stream_delay_ms(0));
       apm_->set_stream_analog_level(analog_level);
-      ASSERT_EQ(kNoErr,
+      ASSERT_EQ(AudioProcessing::kNoError,
                 apm_->ProcessStream(
                     frame_.data.data(),
-                    StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
-                    StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+                    StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
+                    StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
                     frame_.data.data()));
       analog_level = apm_->recommended_stream_analog_level();
 
-      VerifyChannelsAreEqual(frame_.data.data(), frame_.samples_per_channel);
+      VerifyChannelsAreEqual(frame_.data.data(), frame_.samples_per_channel());
     }
     rewind(far_file_);
     rewind(near_file_);
@@ -1400,41 +1351,41 @@
 TEST_F(ApmTest, SplittingFilter) {
   // Verify the filter is not active through undistorted audio when:
   // 1. No components are enabled...
-  SetFrameTo(&frame_, 1000);
+  frame_.FillData(1000);
   Int16FrameData frame_copy;
   frame_copy.CopyFrom(frame_);
-  EXPECT_EQ(apm_->kNoError,
+  EXPECT_EQ(AudioProcessing::kNoError,
             apm_->ProcessStream(
                 frame_.data.data(),
-                StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
-                StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+                StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
+                StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
                 frame_.data.data()));
-  EXPECT_EQ(apm_->kNoError,
+  EXPECT_EQ(AudioProcessing::kNoError,
             apm_->ProcessStream(
                 frame_.data.data(),
-                StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
-                StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+                StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
+                StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
                 frame_.data.data()));
-  EXPECT_TRUE(FrameDataAreEqual(frame_, frame_copy));
+  EXPECT_TRUE(frame_.IsEqual(frame_copy));
 
   // 2. Only the level estimator is enabled...
   auto apm_config = apm_->GetConfig();
-  SetFrameTo(&frame_, 1000);
+  frame_.FillData(1000);
   frame_copy.CopyFrom(frame_);
   apm_->ApplyConfig(apm_config);
-  EXPECT_EQ(apm_->kNoError,
+  EXPECT_EQ(AudioProcessing::kNoError,
             apm_->ProcessStream(
                 frame_.data.data(),
-                StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
-                StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+                StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
+                StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
                 frame_.data.data()));
-  EXPECT_EQ(apm_->kNoError,
+  EXPECT_EQ(AudioProcessing::kNoError,
             apm_->ProcessStream(
                 frame_.data.data(),
-                StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
-                StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+                StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
+                StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
                 frame_.data.data()));
-  EXPECT_TRUE(FrameDataAreEqual(frame_, frame_copy));
+  EXPECT_TRUE(frame_.IsEqual(frame_copy));
   apm_->ApplyConfig(apm_config);
 
   // Check the test is valid. We should have distortion from the filter
@@ -1442,19 +1393,17 @@
   apm_config.echo_canceller.enabled = true;
   apm_config.echo_canceller.mobile_mode = false;
   apm_->ApplyConfig(apm_config);
-  frame_.samples_per_channel = 320;
-  frame_.num_channels = 2;
-  frame_.sample_rate_hz = 32000;
-  SetFrameTo(&frame_, 1000);
+  frame_.SetProperties(/* samples_per_channel=*/320, /* num_channels=*/2);
+  frame_.FillData(1000);
   frame_copy.CopyFrom(frame_);
-  EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
-  EXPECT_EQ(apm_->kNoError,
+  EXPECT_EQ(AudioProcessing::kNoError, apm_->set_stream_delay_ms(0));
+  EXPECT_EQ(AudioProcessing::kNoError,
             apm_->ProcessStream(
                 frame_.data.data(),
-                StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
-                StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+                StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
+                StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
                 frame_.data.data()));
-  EXPECT_FALSE(FrameDataAreEqual(frame_, frame_copy));
+  EXPECT_FALSE(frame_.IsEqual(frame_copy));
 }
 
 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
@@ -1497,7 +1446,7 @@
       const audioproc::ReverseStream msg = event_msg.reverse_stream();
 
       if (msg.channel_size() > 0) {
-        ASSERT_EQ(revframe_.num_channels,
+        ASSERT_EQ(revframe_.num_channels(),
                   static_cast<size_t>(msg.channel_size()));
         for (int i = 0; i < msg.channel_size(); ++i) {
           memcpy(revfloat_cb_->channels()[i], msg.channel(i).data(),
@@ -1515,7 +1464,7 @@
     } else if (event_msg.type() == audioproc::Event::STREAM) {
       const audioproc::Stream msg = event_msg.stream();
       // ProcessStream could have changed this for the output frame.
-      frame_.num_channels = apm_->num_input_channels();
+      frame_.set_num_channels(apm_->num_input_channels());
 
       apm_->set_stream_analog_level(msg.applied_input_volume());
       EXPECT_NOERR(apm_->set_stream_delay_ms(msg.delay()));
@@ -1526,7 +1475,7 @@
       }
 
       if (msg.input_channel_size() > 0) {
-        ASSERT_EQ(frame_.num_channels,
+        ASSERT_EQ(frame_.num_channels(),
                   static_cast<size_t>(msg.input_channel_size()));
         for (int i = 0; i < msg.input_channel_size(); ++i) {
           memcpy(float_cb_->channels()[i], msg.input_channel(i).data(),
@@ -1644,18 +1593,19 @@
   auto aec_dump = AecDumpFactory::Create(filename, -1, worker_queue.Get());
   EXPECT_TRUE(aec_dump);
   apm_->AttachAecDump(std::move(aec_dump));
-  EXPECT_EQ(apm_->kNoError,
+  EXPECT_EQ(AudioProcessing::kNoError,
             apm_->ProcessStream(
                 frame_.data.data(),
-                StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
-                StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+                StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
+                StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
                 frame_.data.data()));
-  EXPECT_EQ(apm_->kNoError,
-            apm_->ProcessReverseStream(
-                revframe_.data.data(),
-                StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels),
-                StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels),
-                revframe_.data.data()));
+  EXPECT_EQ(
+      AudioProcessing::kNoError,
+      apm_->ProcessReverseStream(
+          revframe_.data.data(),
+          StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels()),
+          StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels()),
+          revframe_.data.data()));
   apm_->DetachAecDump();
 
   // Verify the file has been written.
@@ -1687,17 +1637,18 @@
   auto aec_dump = AecDumpFactory::Create(std::move(f), -1, worker_queue.Get());
   EXPECT_TRUE(aec_dump);
   apm_->AttachAecDump(std::move(aec_dump));
-  EXPECT_EQ(apm_->kNoError,
-            apm_->ProcessReverseStream(
-                revframe_.data.data(),
-                StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels),
-                StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels),
-                revframe_.data.data()));
-  EXPECT_EQ(apm_->kNoError,
+  EXPECT_EQ(
+      AudioProcessing::kNoError,
+      apm_->ProcessReverseStream(
+          revframe_.data.data(),
+          StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels()),
+          StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels()),
+          revframe_.data.data()));
+  EXPECT_EQ(AudioProcessing::kNoError,
             apm_->ProcessStream(
                 frame_.data.data(),
-                StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
-                StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+                StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
+                StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
                 frame_.data.data()));
   apm_->DetachAecDump();
 
@@ -1779,26 +1730,26 @@
 
     while (ReadFrame(far_file_, &revframe_) && ReadFrame(near_file_, &frame_)) {
       EXPECT_EQ(
-          apm_->kNoError,
+          AudioProcessing::kNoError,
           apm_->ProcessReverseStream(
               revframe_.data.data(),
-              StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels),
-              StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels),
+              StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels()),
+              StreamConfig(revframe_.sample_rate_hz, revframe_.num_channels()),
               revframe_.data.data()));
 
-      EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
+      EXPECT_EQ(AudioProcessing::kNoError, apm_->set_stream_delay_ms(0));
       apm_->set_stream_analog_level(analog_level);
 
-      EXPECT_EQ(apm_->kNoError,
+      EXPECT_EQ(AudioProcessing::kNoError,
                 apm_->ProcessStream(
                     frame_.data.data(),
-                    StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
-                    StreamConfig(frame_.sample_rate_hz, frame_.num_channels),
+                    StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
+                    StreamConfig(frame_.sample_rate_hz, frame_.num_channels()),
                     frame_.data.data()));
 
       // Ensure the frame was downmixed properly.
       EXPECT_EQ(static_cast<size_t>(test->num_output_channels()),
-                frame_.num_channels);
+                frame_.num_channels());
 
       max_output_average += MaxAudioFrame(frame_);
 
@@ -1806,13 +1757,12 @@
       analog_level_average += analog_level;
       AudioProcessingStats stats = apm_->GetStatistics();
 
-      size_t frame_size = frame_.samples_per_channel * frame_.num_channels;
       size_t write_count =
-          fwrite(frame_.data.data(), sizeof(int16_t), frame_size, out_file_);
-      ASSERT_EQ(frame_size, write_count);
+          fwrite(frame_.data.data(), sizeof(int16_t), frame_.size(), out_file_);
+      ASSERT_EQ(frame_.size(), write_count);
 
       // Reset in case of downmixing.
-      frame_.num_channels = static_cast<size_t>(test->num_input_channels());
+      frame_.set_num_channels(static_cast<size_t>(test->num_input_channels()));
       frame_count++;
 
 #if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
@@ -2537,13 +2487,14 @@
           .Build(CreateEnvironment());
 
   Int16FrameData audio;
-  audio.num_channels = 1;
-  SetFrameSampleRate(&audio, AudioProcessing::NativeRate::kSampleRate16kHz);
+  audio.SetProperties(AudioProcessing::GetFrameSize(
+                          AudioProcessing::NativeRate::kSampleRate16kHz),
+                      /* num_channels=*/1);
 
   EXPECT_CALL(*mock_post_processor_ptr, Process(::testing::_)).Times(1);
   apm->ProcessStream(audio.data.data(),
-                     StreamConfig(audio.sample_rate_hz, audio.num_channels),
-                     StreamConfig(audio.sample_rate_hz, audio.num_channels),
+                     StreamConfig(audio.sample_rate_hz, audio.num_channels()),
+                     StreamConfig(audio.sample_rate_hz, audio.num_channels()),
                      audio.data.data());
 }
 
@@ -2559,13 +2510,15 @@
           .Build(CreateEnvironment());
 
   Int16FrameData audio;
-  audio.num_channels = 1;
-  SetFrameSampleRate(&audio, AudioProcessing::NativeRate::kSampleRate16kHz);
+  audio.SetProperties(AudioProcessing::GetFrameSize(
+                          AudioProcessing::NativeRate::kSampleRate16kHz),
+                      /* num_channels=*/1);
 
   EXPECT_CALL(*mock_pre_processor_ptr, Process(::testing::_)).Times(1);
   apm->ProcessReverseStream(
-      audio.data.data(), StreamConfig(audio.sample_rate_hz, audio.num_channels),
-      StreamConfig(audio.sample_rate_hz, audio.num_channels),
+      audio.data.data(),
+      StreamConfig(audio.sample_rate_hz, audio.num_channels()),
+      StreamConfig(audio.sample_rate_hz, audio.num_channels()),
       audio.data.data());
 }
 
@@ -2581,13 +2534,14 @@
           .Build(CreateEnvironment());
 
   Int16FrameData audio;
-  audio.num_channels = 1;
-  SetFrameSampleRate(&audio, AudioProcessing::NativeRate::kSampleRate16kHz);
+  audio.SetProperties(AudioProcessing::GetFrameSize(
+                          AudioProcessing::NativeRate::kSampleRate16kHz),
+                      /* num_channels=*/1);
 
   EXPECT_CALL(*mock_capture_analyzer_ptr, Analyze(::testing::_)).Times(1);
   apm->ProcessStream(audio.data.data(),
-                     StreamConfig(audio.sample_rate_hz, audio.num_channels),
-                     StreamConfig(audio.sample_rate_hz, audio.num_channels),
+                     StreamConfig(audio.sample_rate_hz, audio.num_channels()),
+                     StreamConfig(audio.sample_rate_hz, audio.num_channels()),
                      audio.data.data());
 }
 
@@ -2606,14 +2560,16 @@
   // RuntimeSettings forwarded during 'Process*Stream' calls.
   // Therefore we have to make one such call.
   Int16FrameData audio;
-  audio.num_channels = 1;
-  SetFrameSampleRate(&audio, AudioProcessing::NativeRate::kSampleRate16kHz);
+  audio.SetProperties(AudioProcessing::GetFrameSize(
+                          AudioProcessing::NativeRate::kSampleRate16kHz),
+                      /* num_channels=*/1);
 
   EXPECT_CALL(*mock_pre_processor_ptr, SetRuntimeSetting(::testing::_))
       .Times(1);
   apm->ProcessReverseStream(
-      audio.data.data(), StreamConfig(audio.sample_rate_hz, audio.num_channels),
-      StreamConfig(audio.sample_rate_hz, audio.num_channels),
+      audio.data.data(),
+      StreamConfig(audio.sample_rate_hz, audio.num_channels()),
+      StreamConfig(audio.sample_rate_hz, audio.num_channels()),
       audio.data.data());
 }
 
@@ -2646,19 +2602,21 @@
           .Build(CreateEnvironment());
 
   Int16FrameData audio;
-  audio.num_channels = 1;
-  SetFrameSampleRate(&audio, AudioProcessing::NativeRate::kSampleRate16kHz);
+  audio.SetProperties(AudioProcessing::GetFrameSize(
+                          AudioProcessing::NativeRate::kSampleRate16kHz),
+                      /* num_channels=*/1);
   apm->ProcessStream(audio.data.data(),
-                     StreamConfig(audio.sample_rate_hz, audio.num_channels),
-                     StreamConfig(audio.sample_rate_hz, audio.num_channels),
+                     StreamConfig(audio.sample_rate_hz, audio.num_channels()),
+                     StreamConfig(audio.sample_rate_hz, audio.num_channels()),
                      audio.data.data());
   apm->ProcessReverseStream(
-      audio.data.data(), StreamConfig(audio.sample_rate_hz, audio.num_channels),
-      StreamConfig(audio.sample_rate_hz, audio.num_channels),
+      audio.data.data(),
+      StreamConfig(audio.sample_rate_hz, audio.num_channels()),
+      StreamConfig(audio.sample_rate_hz, audio.num_channels()),
       audio.data.data());
   apm->ProcessStream(audio.data.data(),
-                     StreamConfig(audio.sample_rate_hz, audio.num_channels),
-                     StreamConfig(audio.sample_rate_hz, audio.num_channels),
+                     StreamConfig(audio.sample_rate_hz, audio.num_channels()),
+                     StreamConfig(audio.sample_rate_hz, audio.num_channels()),
                      audio.data.data());
 }
 
@@ -2686,8 +2644,9 @@
   EXPECT_CALL(*mock_echo_detector, GetMetrics()).Times(1);
 
   Int16FrameData frame;
-  frame.num_channels = 1;
-  SetFrameSampleRate(&frame, 16000);
+  frame.SetProperties(AudioProcessing::GetFrameSize(
+                          AudioProcessing::NativeRate::kSampleRate16kHz),
+                      /* num_channels=*/1);
 
   apm->ProcessReverseStream(frame.data.data(), StreamConfig(16000, 1),
                             StreamConfig(16000, 1), frame.data.data());
@@ -2715,7 +2674,9 @@
       });
   EXPECT_CALL(*mock_echo_detector, GetMetrics()).Times(2);
 
-  SetFrameSampleRate(&frame, 48000);
+  frame.SetProperties(AudioProcessing::GetFrameSize(
+                          AudioProcessing::NativeRate::kSampleRate48kHz),
+                      frame.num_channels());
   apm->ProcessStream(frame.data.data(), StreamConfig(48000, 1),
                      StreamConfig(48000, 1), frame.data.data());
   apm->ProcessReverseStream(frame.data.data(), StreamConfig(48000, 1),
@@ -2769,8 +2730,9 @@
 
   // Set up an audioframe.
   Int16FrameData frame;
-  frame.num_channels = 1;
-  SetFrameSampleRate(&frame, AudioProcessing::NativeRate::kSampleRate32kHz);
+  frame.SetProperties(AudioProcessing::GetFrameSize(
+                          AudioProcessing::NativeRate::kSampleRate32kHz),
+                      /* num_channels=*/1);
 
   // Fill the audio frame with a sawtooth pattern.
   int16_t* ptr = frame.data.data();
@@ -2782,15 +2744,15 @@
   for (int i = 0; i < 200; i++) {
     EXPECT_EQ(apm->ProcessReverseStream(
                   frame.data.data(),
-                  StreamConfig(frame.sample_rate_hz, frame.num_channels),
-                  StreamConfig(frame.sample_rate_hz, frame.num_channels),
+                  StreamConfig(frame.sample_rate_hz, frame.num_channels()),
+                  StreamConfig(frame.sample_rate_hz, frame.num_channels()),
                   frame.data.data()),
               0);
     EXPECT_EQ(apm->set_stream_delay_ms(0), 0);
     EXPECT_EQ(apm->ProcessStream(
                   frame.data.data(),
-                  StreamConfig(frame.sample_rate_hz, frame.num_channels),
-                  StreamConfig(frame.sample_rate_hz, frame.num_channels),
+                  StreamConfig(frame.sample_rate_hz, frame.num_channels()),
+                  StreamConfig(frame.sample_rate_hz, frame.num_channels()),
                   frame.data.data()),
               0);
   }
@@ -2817,8 +2779,9 @@
 
   // Set up an audioframe.
   Int16FrameData frame;
-  frame.num_channels = 1;
-  SetFrameSampleRate(&frame, AudioProcessing::NativeRate::kSampleRate32kHz);
+  frame.SetProperties(AudioProcessing::GetFrameSize(
+                          AudioProcessing::NativeRate::kSampleRate32kHz),
+                      /* num_channels=*/1);
 
   // Fill the audio frame with a sawtooth pattern.
   int16_t* ptr = frame.data.data();
@@ -2830,15 +2793,15 @@
   for (int i = 0; i < 200; i++) {
     EXPECT_EQ(apm->ProcessReverseStream(
                   frame.data.data(),
-                  StreamConfig(frame.sample_rate_hz, frame.num_channels),
-                  StreamConfig(frame.sample_rate_hz, frame.num_channels),
+                  StreamConfig(frame.sample_rate_hz, frame.num_channels()),
+                  StreamConfig(frame.sample_rate_hz, frame.num_channels()),
                   frame.data.data()),
               0);
     EXPECT_EQ(apm->set_stream_delay_ms(0), 0);
     EXPECT_EQ(apm->ProcessStream(
                   frame.data.data(),
-                  StreamConfig(frame.sample_rate_hz, frame.num_channels),
-                  StreamConfig(frame.sample_rate_hz, frame.num_channels),
+                  StreamConfig(frame.sample_rate_hz, frame.num_channels()),
+                  StreamConfig(frame.sample_rate_hz, frame.num_channels()),
                   frame.data.data()),
               0);
   }
@@ -2863,8 +2826,9 @@
 
   // Set up an audioframe.
   Int16FrameData frame;
-  frame.num_channels = 1;
-  SetFrameSampleRate(&frame, AudioProcessing::NativeRate::kSampleRate32kHz);
+  frame.SetProperties(AudioProcessing::GetFrameSize(
+                          AudioProcessing::NativeRate::kSampleRate32kHz),
+                      /* num_channels=*/1);
 
   // Fill the audio frame with a sawtooth pattern.
   int16_t* ptr = frame.data.data();
@@ -2877,12 +2841,12 @@
   apm->Initialize(processing_config);
 
   // No metric should be reported.
-  EXPECT_EQ(
-      apm->ProcessStream(frame.data.data(),
-                         StreamConfig(frame.sample_rate_hz, frame.num_channels),
-                         StreamConfig(frame.sample_rate_hz, frame.num_channels),
-                         frame.data.data()),
-      0);
+  EXPECT_EQ(apm->ProcessStream(
+                frame.data.data(),
+                StreamConfig(frame.sample_rate_hz, frame.num_channels()),
+                StreamConfig(frame.sample_rate_hz, frame.num_channels()),
+                frame.data.data()),
+            0);
   EXPECT_FALSE(apm->GetStatistics().voice_detected.has_value());
 }
 
@@ -2890,14 +2854,15 @@
   scoped_refptr<AudioProcessing> apm =
       BuiltinAudioProcessingBuilder().Build(CreateEnvironment());
   Int16FrameData frame;
-  frame.num_channels = 1;
-  SetFrameSampleRate(&frame, AudioProcessing::NativeRate::kSampleRate32kHz);
-  ASSERT_EQ(
-      apm->ProcessStream(frame.data.data(),
-                         StreamConfig(frame.sample_rate_hz, frame.num_channels),
-                         StreamConfig(frame.sample_rate_hz, frame.num_channels),
-                         frame.data.data()),
-      0);
+  frame.SetProperties(AudioProcessing::GetFrameSize(
+                          AudioProcessing::NativeRate::kSampleRate32kHz),
+                      /* num_channels=*/1);
+  ASSERT_EQ(apm->ProcessStream(
+                frame.data.data(),
+                StreamConfig(frame.sample_rate_hz, frame.num_channels()),
+                StreamConfig(frame.sample_rate_hz, frame.num_channels()),
+                frame.data.data()),
+            0);
   // Echo detector is disabled by default, no stats reported.
   AudioProcessingStats stats = apm->GetStatistics();
   EXPECT_FALSE(stats.residual_echo_likelihood.has_value());
@@ -2911,15 +2876,16 @@
           .SetEchoDetector(CreateEchoDetector())
           .Build(CreateEnvironment());
   Int16FrameData frame;
-  frame.num_channels = 1;
-  SetFrameSampleRate(&frame, AudioProcessing::NativeRate::kSampleRate32kHz);
+  frame.SetProperties(AudioProcessing::GetFrameSize(
+                          AudioProcessing::NativeRate::kSampleRate32kHz),
+                      /* num_channels=*/1);
   // Echo detector enabled: Report stats.
-  ASSERT_EQ(
-      apm->ProcessStream(frame.data.data(),
-                         StreamConfig(frame.sample_rate_hz, frame.num_channels),
-                         StreamConfig(frame.sample_rate_hz, frame.num_channels),
-                         frame.data.data()),
-      0);
+  ASSERT_EQ(apm->ProcessStream(
+                frame.data.data(),
+                StreamConfig(frame.sample_rate_hz, frame.num_channels()),
+                StreamConfig(frame.sample_rate_hz, frame.num_channels()),
+                frame.data.data()),
+            0);
   AudioProcessingStats stats = apm->GetStatistics();
   EXPECT_TRUE(stats.residual_echo_likelihood.has_value());
   EXPECT_TRUE(stats.residual_echo_likelihood_recent_max.has_value());
diff --git a/modules/audio_processing/test/test_utils.cc b/modules/audio_processing/test/test_utils.cc
index c6b02b2..fcbd5e4 100644
--- a/modules/audio_processing/test/test_utils.cc
+++ b/modules/audio_processing/test/test_utils.cc
@@ -10,6 +10,7 @@
 
 #include "modules/audio_processing/test/test_utils.h"
 
+#include <algorithm>
 #include <string>
 #include <utility>
 
@@ -19,6 +20,52 @@
 
 namespace webrtc {
 
+void Int16FrameData::CopyFrom(const Int16FrameData& src) {
+  sample_rate_hz = src.sample_rate_hz;
+  view_ = InterleavedView<int16_t>(data.data(), src.samples_per_channel(),
+                                   src.num_channels());
+  RTC_CHECK_LE(view_.size(), kMaxDataSizeSamples);
+  CopySamples(view_, src.view());
+}
+
+bool Int16FrameData::IsEqual(const Int16FrameData& frame) const {
+  return samples_per_channel() == frame.samples_per_channel() &&
+         num_channels() == num_channels() &&
+         memcmp(data.data(), frame.data.data(),
+                samples_per_channel() * num_channels() * sizeof(int16_t)) == 0;
+}
+
+void Int16FrameData::Scale(float f) {
+  std::for_each(data.begin(), data.end(),
+                [f](int16_t& sample) { sample = FloatS16ToS16(sample * f); });
+}
+
+void Int16FrameData::SetProperties(size_t samples_per_channel,
+                                   size_t num_channels) {
+  sample_rate_hz = samples_per_channel * 100;
+  view_ =
+      InterleavedView<int16_t>(data.data(), samples_per_channel, num_channels);
+  RTC_CHECK_LE(view_.size(), kMaxDataSizeSamples);
+}
+
+void Int16FrameData::set_num_channels(size_t num_channels) {
+  view_ = InterleavedView<int16_t>(data.data(), samples_per_channel(),
+                                   num_channels);
+  RTC_CHECK_LE(view_.size(), kMaxDataSizeSamples);
+}
+
+void Int16FrameData::FillData(int16_t value) {
+  std::fill(&data[0], &data[size()], value);
+}
+
+void Int16FrameData::FillStereoData(int16_t left, int16_t right) {
+  RTC_DCHECK_EQ(num_channels(), 2u);
+  for (size_t i = 0; i < samples_per_channel() * 2u; i += 2u) {
+    data[i] = left;
+    data[i + 1] = right;
+  }
+}
+
 ChannelBufferWavReader::ChannelBufferWavReader(std::unique_ptr<WavReader> file)
     : file_(std::move(file)) {}
 
@@ -86,10 +133,4 @@
   return file;
 }
 
-void SetFrameSampleRate(Int16FrameData* frame, int sample_rate_hz) {
-  frame->sample_rate_hz = sample_rate_hz;
-  frame->samples_per_channel =
-      AudioProcessing::kChunkSizeMs * sample_rate_hz / 1000;
-}
-
 }  // namespace webrtc
diff --git a/modules/audio_processing/test/test_utils.h b/modules/audio_processing/test/test_utils.h
index cc36f9a..90cfb5c 100644
--- a/modules/audio_processing/test/test_utils.h
+++ b/modules/audio_processing/test/test_utils.h
@@ -20,41 +20,51 @@
 #include <vector>
 
 #include "absl/strings/string_view.h"
+#include "api/audio/audio_frame.h"
 #include "api/audio/audio_processing.h"
+#include "api/audio/audio_view.h"
 #include "common_audio/channel_buffer.h"
 #include "common_audio/wav_file.h"
 
 namespace webrtc {
 
 static const AudioProcessing::Error kNoErr = AudioProcessing::kNoError;
-#define EXPECT_NOERR(expr) EXPECT_EQ(kNoErr, (expr))
+#define EXPECT_NOERR(expr) EXPECT_EQ(AudioProcessing::kNoError, (expr))
 
 // Encapsulates samples and metadata for an integer frame.
 struct Int16FrameData {
   // Max data size that matches the data size of the AudioFrame class, providing
   // storage for 8 channels of 96 kHz data.
-  static const int kMaxDataSizeSamples = 7680;
+  static const int kMaxDataSizeSamples = AudioFrame::kMaxDataSizeSamples;
 
-  Int16FrameData() {
-    sample_rate_hz = 0;
-    num_channels = 0;
-    samples_per_channel = 0;
-    data.fill(0);
-  }
+  Int16FrameData() = default;
 
-  void CopyFrom(const Int16FrameData& src) {
-    samples_per_channel = src.samples_per_channel;
-    sample_rate_hz = src.sample_rate_hz;
-    num_channels = src.num_channels;
+  void CopyFrom(const Int16FrameData& src);
+  bool IsEqual(const Int16FrameData& frame) const;
+  void Scale(float f);
 
-    const size_t length = samples_per_channel * num_channels;
-    RTC_CHECK_LE(length, kMaxDataSizeSamples);
-    memcpy(data.data(), src.data.data(), sizeof(int16_t) * length);
-  }
-  std::array<int16_t, kMaxDataSizeSamples> data;
-  int32_t sample_rate_hz;
-  size_t num_channels;
-  size_t samples_per_channel;
+  // Sets `samples_per_channel`, `num_channels` and, implicitly, the sample
+  // rate. The sample rate is set to 100x that of samples per channel. I.e. if
+  // samples_per_channel is 320, the sample rate will be set to 32000.
+  void SetProperties(size_t samples_per_channel, size_t num_channels);
+
+  size_t size() const { return view_.size(); }
+  size_t samples_per_channel() const { return view_.samples_per_channel(); }
+  size_t num_channels() const { return view_.num_channels(); }
+  void set_num_channels(size_t num_channels);
+
+  InterleavedView<int16_t> view() { return view_; }
+  InterleavedView<const int16_t> view() const { return view_; }
+
+  void FillData(int16_t value);
+  void FillStereoData(int16_t left, int16_t right);
+
+  // public struct members.
+  std::array<int16_t, kMaxDataSizeSamples> data = {};
+  int32_t sample_rate_hz = 0;
+
+ private:
+  InterleavedView<int16_t> view_;
 };
 
 // Reads ChannelBuffers from a provided WavReader.
@@ -114,16 +124,13 @@
 // Exits on failure; do not use in unit tests.
 FILE* OpenFile(absl::string_view filename, absl::string_view mode);
 
-void SetFrameSampleRate(Int16FrameData* frame, int sample_rate_hz);
-
 template <typename T>
 void SetContainerFormat(int sample_rate_hz,
                         size_t num_channels,
                         Int16FrameData* frame,
                         std::unique_ptr<ChannelBuffer<T> >* cb) {
-  SetFrameSampleRate(frame, sample_rate_hz);
-  frame->num_channels = num_channels;
-  cb->reset(new ChannelBuffer<T>(frame->samples_per_channel, num_channels));
+  frame->SetProperties(sample_rate_hz / 100, num_channels);
+  cb->reset(new ChannelBuffer<T>(frame->samples_per_channel(), num_channels));
 }
 
 template <typename T>