Remove the requirement to call set_sample_rate_hz and friends.

Instead have ProcessStream transparently handle changes to the stream
audio parameters (sample rate and channels). This removes two locks
per 10 ms ProcessStream call taken by VoiceEngine (four total with the
audio level indicator.)

Also, prepare future improvements by having the splitting filter take
a length parameter. This will allow it to work at different sample
rates. Remove the useless splitting_filter wrapper.

TESTED=voe_cmd_test with audio processing enabled and switching between
codecs; unit tests.

R=aluebs@webrtc.org, bjornv@webrtc.org, turaj@webrtc.org, xians@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/3949004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@5346 4adac7df-926f-26a2-2b94-8c16560cd09d
diff --git a/webrtc/common_audio/signal_processing/include/signal_processing_library.h b/webrtc/common_audio/signal_processing/include/signal_processing_library.h
index c567beb..852bbf9 100644
--- a/webrtc/common_audio/signal_processing/include/signal_processing_library.h
+++ b/webrtc/common_audio/signal_processing/include/signal_processing_library.h
@@ -996,12 +996,14 @@
  * END OF RESAMPLING FUNCTIONS
  ************************************************************/
 void WebRtcSpl_AnalysisQMF(const int16_t* in_data,
+                           int in_data_length,
                            int16_t* low_band,
                            int16_t* high_band,
                            int32_t* filter_state1,
                            int32_t* filter_state2);
 void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
                             const int16_t* high_band,
+                            int band_length,
                             int16_t* out_data,
                             int32_t* filter_state1,
                             int32_t* filter_state2);
diff --git a/webrtc/common_audio/signal_processing/splitting_filter.c b/webrtc/common_audio/signal_processing/splitting_filter.c
index cf6ec9d..dbda042 100644
--- a/webrtc/common_audio/signal_processing/splitting_filter.c
+++ b/webrtc/common_audio/signal_processing/splitting_filter.c
@@ -15,10 +15,12 @@
 
 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
 
-// Number of samples in a low/high-band frame.
+#include <assert.h>
+
+// Maximum number of samples in a low/high-band frame.
 enum
 {
-    kBandFrameLength = 160
+    kMaxBandFrameLength = 240  // 10 ms at 48 kHz.
 };
 
 // QMF filter coefficients in Q16.
@@ -116,34 +118,37 @@
     filter_state[5] = out_data[data_length - 1]; // y[N-1], becomes y[-1] next time
 }
 
-void WebRtcSpl_AnalysisQMF(const int16_t* in_data, int16_t* low_band,
-                           int16_t* high_band, int32_t* filter_state1,
-                           int32_t* filter_state2)
+void WebRtcSpl_AnalysisQMF(const int16_t* in_data, int in_data_length,
+                           int16_t* low_band, int16_t* high_band,
+                           int32_t* filter_state1, int32_t* filter_state2)
 {
     int16_t i;
     int16_t k;
     int32_t tmp;
-    int32_t half_in1[kBandFrameLength];
-    int32_t half_in2[kBandFrameLength];
-    int32_t filter1[kBandFrameLength];
-    int32_t filter2[kBandFrameLength];
+    int32_t half_in1[kMaxBandFrameLength];
+    int32_t half_in2[kMaxBandFrameLength];
+    int32_t filter1[kMaxBandFrameLength];
+    int32_t filter2[kMaxBandFrameLength];
+    const int band_length = in_data_length / 2;
+    assert(in_data_length % 2 == 0);
+    assert(band_length <= kMaxBandFrameLength);
 
     // Split even and odd samples. Also shift them to Q10.
-    for (i = 0, k = 0; i < kBandFrameLength; i++, k += 2)
+    for (i = 0, k = 0; i < band_length; i++, k += 2)
     {
         half_in2[i] = WEBRTC_SPL_LSHIFT_W32((int32_t)in_data[k], 10);
         half_in1[i] = WEBRTC_SPL_LSHIFT_W32((int32_t)in_data[k + 1], 10);
     }
 
     // All pass filter even and odd samples, independently.
-    WebRtcSpl_AllPassQMF(half_in1, kBandFrameLength, filter1, WebRtcSpl_kAllPassFilter1,
-                         filter_state1);
-    WebRtcSpl_AllPassQMF(half_in2, kBandFrameLength, filter2, WebRtcSpl_kAllPassFilter2,
-                         filter_state2);
+    WebRtcSpl_AllPassQMF(half_in1, band_length, filter1,
+                         WebRtcSpl_kAllPassFilter1, filter_state1);
+    WebRtcSpl_AllPassQMF(half_in2, band_length, filter2,
+                         WebRtcSpl_kAllPassFilter2, filter_state2);
 
     // Take the sum and difference of filtered version of odd and even
     // branches to get upper & lower band.
-    for (i = 0; i < kBandFrameLength; i++)
+    for (i = 0; i < band_length; i++)
     {
         tmp = filter1[i] + filter2[i] + 1024;
         tmp = WEBRTC_SPL_RSHIFT_W32(tmp, 11);
@@ -156,20 +161,21 @@
 }
 
 void WebRtcSpl_SynthesisQMF(const int16_t* low_band, const int16_t* high_band,
-                            int16_t* out_data, int32_t* filter_state1,
-                            int32_t* filter_state2)
+                            int band_length, int16_t* out_data,
+                            int32_t* filter_state1, int32_t* filter_state2)
 {
     int32_t tmp;
-    int32_t half_in1[kBandFrameLength];
-    int32_t half_in2[kBandFrameLength];
-    int32_t filter1[kBandFrameLength];
-    int32_t filter2[kBandFrameLength];
+    int32_t half_in1[kMaxBandFrameLength];
+    int32_t half_in2[kMaxBandFrameLength];
+    int32_t filter1[kMaxBandFrameLength];
+    int32_t filter2[kMaxBandFrameLength];
     int16_t i;
     int16_t k;
+    assert(band_length <= kMaxBandFrameLength);
 
     // Obtain the sum and difference channels out of upper and lower-band channels.
     // Also shift to Q10 domain.
-    for (i = 0; i < kBandFrameLength; i++)
+    for (i = 0; i < band_length; i++)
     {
         tmp = (int32_t)low_band[i] + (int32_t)high_band[i];
         half_in1[i] = WEBRTC_SPL_LSHIFT_W32(tmp, 10);
@@ -178,15 +184,15 @@
     }
 
     // all-pass filter the sum and difference channels
-    WebRtcSpl_AllPassQMF(half_in1, kBandFrameLength, filter1, WebRtcSpl_kAllPassFilter2,
-                         filter_state1);
-    WebRtcSpl_AllPassQMF(half_in2, kBandFrameLength, filter2, WebRtcSpl_kAllPassFilter1,
-                         filter_state2);
+    WebRtcSpl_AllPassQMF(half_in1, band_length, filter1,
+                         WebRtcSpl_kAllPassFilter2, filter_state1);
+    WebRtcSpl_AllPassQMF(half_in2, band_length, filter2,
+                         WebRtcSpl_kAllPassFilter1, filter_state2);
 
     // The filtered signals are even and odd samples of the output. Combine
     // them. The signals are Q10 should shift them back to Q0 and take care of
     // saturation.
-    for (i = 0, k = 0; i < kBandFrameLength; i++)
+    for (i = 0, k = 0; i < band_length; i++)
     {
         tmp = WEBRTC_SPL_RSHIFT_W32(filter2[i] + 512, 10);
         out_data[k++] = WebRtcSpl_SatW32ToW16(tmp);
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/source/isac.c b/webrtc/modules/audio_coding/codecs/isac/main/source/isac.c
index f3f1650..2f2a4bc 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/source/isac.c
+++ b/webrtc/modules/audio_coding/codecs/isac/main/source/isac.c
@@ -552,8 +552,8 @@
   }
 
   if (instISAC->encoderSamplingRateKHz == kIsacSuperWideband) {
-    WebRtcSpl_AnalysisQMF(speech_in_ptr, speechInLB, speechInUB,
-                          instISAC->analysisFBState1,
+    WebRtcSpl_AnalysisQMF(speech_in_ptr, SWBFRAMESAMPLES_10ms, speechInLB,
+                          speechInUB, instISAC->analysisFBState1,
                           instISAC->analysisFBState2);
 
     /* Convert from fixed to floating point. */
@@ -1314,7 +1314,7 @@
     speechIdx = 0;
     while (speechIdx < numSamplesLB) {
       WebRtcSpl_SynthesisQMF(&outFrameLB[speechIdx], &outFrameUB[speechIdx],
-                             &decoded[(speechIdx << 1)],
+                             FRAMESAMPLES_10ms, &decoded[(speechIdx << 1)],
                              instISAC->synthesisFBState1,
                              instISAC->synthesisFBState2);
 
diff --git a/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc b/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc
index c5cf137..a8fbe07 100644
--- a/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc
+++ b/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc
@@ -165,10 +165,6 @@
     if(SetOutputFrequency(kDefaultFrequency) == -1)
         return false;
 
-    // Assume mono.
-    if (!SetNumLimiterChannels(1))
-        return false;
-
     if(_limiter->gain_control()->set_mode(GainControl::kFixedDigital) !=
         _limiter->kNoError)
         return false;
@@ -326,9 +322,6 @@
             std::max(MaxNumChannels(additionalFramesList),
                      MaxNumChannels(rampOutList)));
 
-        if (!SetNumLimiterChannels(num_mixed_channels))
-            retval = -1;
-
         mixedAudio->UpdateFrame(-1, _timeStamp, NULL, 0, _outputFrequency,
                                 AudioFrame::kNormalSpeech,
                                 AudioFrame::kVadPassive, num_mixed_channels);
@@ -434,13 +427,6 @@
     const Frequency frequency)
 {
     CriticalSectionScoped cs(_crit.get());
-    const int error = _limiter->set_sample_rate_hz(frequency);
-    if(error != _limiter->kNoError)
-    {
-        WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
-                     "Error from AudioProcessing: %d", error);
-        return -1;
-    }
 
     _outputFrequency = frequency;
     _sampleSize = (_outputFrequency*kProcessPeriodicityInMs) / 1000;
@@ -455,24 +441,6 @@
     return _outputFrequency;
 }
 
-bool AudioConferenceMixerImpl::SetNumLimiterChannels(int numChannels)
-{
-    if(_limiter->num_input_channels() != numChannels)
-    {
-        const int error = _limiter->set_num_channels(numChannels,
-                                                     numChannels);
-        if(error != _limiter->kNoError)
-        {
-            WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
-                         "Error from AudioProcessing: %d", error);
-            assert(false);
-            return false;
-        }
-    }
-
-    return true;
-}
-
 int32_t AudioConferenceMixerImpl::RegisterMixerStatusCallback(
     AudioMixerStatusReceiver& mixerStatusCallback,
     const uint32_t amountOf10MsBetweenCallbacks)
diff --git a/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h b/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h
index 737acbb..a0123f9 100644
--- a/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h
+++ b/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h
@@ -89,10 +89,6 @@
     int32_t SetOutputFrequency(const Frequency frequency);
     Frequency OutputFrequency() const;
 
-    // Must be called whenever an audio frame indicates the number of channels
-    // has changed.
-    bool SetNumLimiterChannels(int numChannels);
-
     // Fills mixList with the AudioFrames pointers that should be used when
     // mixing. Fills mixParticipantList with ParticipantStatistics for the
     // participants who's AudioFrames are inside mixList.
diff --git a/webrtc/modules/audio_processing/audio_processing.gypi b/webrtc/modules/audio_processing/audio_processing.gypi
index 336b4ee..6512f03 100644
--- a/webrtc/modules/audio_processing/audio_processing.gypi
+++ b/webrtc/modules/audio_processing/audio_processing.gypi
@@ -67,8 +67,6 @@
         'level_estimator_impl.h',
         'noise_suppression_impl.cc',
         'noise_suppression_impl.h',
-        'splitting_filter.cc',
-        'splitting_filter.h',
         'processing_component.cc',
         'processing_component.h',
         'utility/delay_estimator.c',
diff --git a/webrtc/modules/audio_processing/audio_processing_impl.cc b/webrtc/modules/audio_processing/audio_processing_impl.cc
index 4d36ff7..6a89b9c 100644
--- a/webrtc/modules/audio_processing/audio_processing_impl.cc
+++ b/webrtc/modules/audio_processing/audio_processing_impl.cc
@@ -12,6 +12,7 @@
 
 #include <assert.h>
 
+#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
 #include "webrtc/modules/audio_processing/audio_buffer.h"
 #include "webrtc/modules/audio_processing/echo_cancellation_impl_wrapper.h"
 #include "webrtc/modules/audio_processing/echo_control_mobile_impl.h"
@@ -20,9 +21,9 @@
 #include "webrtc/modules/audio_processing/level_estimator_impl.h"
 #include "webrtc/modules/audio_processing/noise_suppression_impl.h"
 #include "webrtc/modules/audio_processing/processing_component.h"
-#include "webrtc/modules/audio_processing/splitting_filter.h"
 #include "webrtc/modules/audio_processing/voice_detection_impl.h"
 #include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/system_wrappers/interface/compile_assert.h"
 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
 #include "webrtc/system_wrappers/interface/file_wrapper.h"
 #include "webrtc/system_wrappers/interface/logging.h"
@@ -36,9 +37,23 @@
 #endif
 #endif  // WEBRTC_AUDIOPROC_DEBUG_DUMP
 
+static const int kChunkSizeMs = 10;
+
+#define RETURN_ON_ERR(expr)  \
+  do {                       \
+    int err = expr;          \
+    if (err != kNoError) {   \
+      return err;            \
+    }                        \
+  } while (0)
+
 namespace webrtc {
+
+// Throughout webrtc, it's assumed that success is represented by zero.
+COMPILE_ASSERT(AudioProcessing::kNoError == 0, no_error_must_be_zero);
+
 AudioProcessing* AudioProcessing::Create(int id) {
-  AudioProcessingImpl* apm = new AudioProcessingImpl(id);
+  AudioProcessingImpl* apm = new AudioProcessingImpl();
   if (apm->Initialize() != kNoError) {
     delete apm;
     apm = NULL;
@@ -50,9 +65,8 @@
 int32_t AudioProcessing::TimeUntilNextProcess() { return -1; }
 int32_t AudioProcessing::Process() { return -1; }
 
-AudioProcessingImpl::AudioProcessingImpl(int id)
-    : id_(id),
-      echo_cancellation_(NULL),
+AudioProcessingImpl::AudioProcessingImpl()
+    : echo_cancellation_(NULL),
       echo_control_mobile_(NULL),
       gain_control_(NULL),
       high_pass_filter_(NULL),
@@ -68,7 +82,7 @@
 #endif
       sample_rate_hz_(kSampleRate16kHz),
       split_sample_rate_hz_(kSampleRate16kHz),
-      samples_per_channel_(sample_rate_hz_ / 100),
+      samples_per_channel_(kChunkSizeMs * sample_rate_hz_ / 1000),
       stream_delay_ms_(0),
       delay_offset_ms_(0),
       was_stream_delay_set_(false),
@@ -157,8 +171,6 @@
   capture_audio_ = new AudioBuffer(num_input_channels_,
                                    samples_per_channel_);
 
-  was_stream_delay_set_ = false;
-
   // Initialize all components.
   std::list<ProcessingComponent*>::iterator it;
   for (it = component_list_.begin(); it != component_list_.end(); ++it) {
@@ -272,6 +284,49 @@
   return num_output_channels_;
 }
 
+int AudioProcessingImpl::MaybeInitializeLocked(int sample_rate_hz,
+    int num_input_channels, int num_output_channels, int num_reverse_channels) {
+  if (sample_rate_hz == sample_rate_hz_ &&
+      num_input_channels == num_input_channels_ &&
+      num_output_channels == num_output_channels_ &&
+      num_reverse_channels == num_reverse_channels_) {
+    return kNoError;
+  }
+
+  if (sample_rate_hz != kSampleRate8kHz &&
+      sample_rate_hz != kSampleRate16kHz &&
+      sample_rate_hz != kSampleRate32kHz) {
+    return kBadSampleRateError;
+  }
+  if (num_output_channels > num_input_channels) {
+    return kBadNumberChannelsError;
+  }
+  // Only mono and stereo supported currently.
+  if (num_input_channels > 2 || num_input_channels < 1 ||
+      num_output_channels > 2 || num_output_channels < 1 ||
+      num_reverse_channels > 2 || num_reverse_channels < 1) {
+    return kBadNumberChannelsError;
+  }
+  if (echo_control_mobile_->is_enabled() && sample_rate_hz > kSampleRate16kHz) {
+    LOG(LS_ERROR) << "AECM only supports 16 or 8 kHz sample rates";
+    return kUnsupportedComponentError;
+  }
+
+  sample_rate_hz_ = sample_rate_hz;
+  samples_per_channel_ = kChunkSizeMs * sample_rate_hz / 1000;
+  num_input_channels_ = num_input_channels;
+  num_output_channels_ = num_output_channels;
+  num_reverse_channels_ = num_reverse_channels;
+
+  if (sample_rate_hz_ == kSampleRate32kHz) {
+    split_sample_rate_hz_ = kSampleRate16kHz;
+  } else {
+    split_sample_rate_hz_ = sample_rate_hz_;
+  }
+
+  return InitializeLocked();
+}
+
 int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
   CriticalSectionScoped crit_scoped(crit_);
   int err = kNoError;
@@ -279,15 +334,10 @@
   if (frame == NULL) {
     return kNullPointerError;
   }
-
-  if (frame->sample_rate_hz_ != sample_rate_hz_) {
-    return kBadSampleRateError;
-  }
-
-  if (frame->num_channels_ != num_input_channels_) {
-    return kBadNumberChannelsError;
-  }
-
+  // TODO(ajm): We now always set the output channels equal to the input
+  // channels here. Remove the ability to downmix entirely.
+  RETURN_ON_ERR(MaybeInitializeLocked(frame->sample_rate_hz_,
+      frame->num_channels_, frame->num_channels_, num_reverse_channels_));
   if (frame->samples_per_channel_ != samples_per_channel_) {
     return kBadDataLengthError;
   }
@@ -318,11 +368,12 @@
   if (analysis_needed(data_processed)) {
     for (int i = 0; i < num_output_channels_; i++) {
       // Split into a low and high band.
-      SplittingFilterAnalysis(capture_audio_->data(i),
-                              capture_audio_->low_pass_split_data(i),
-                              capture_audio_->high_pass_split_data(i),
-                              capture_audio_->analysis_filter_state1(i),
-                              capture_audio_->analysis_filter_state2(i));
+      WebRtcSpl_AnalysisQMF(capture_audio_->data(i),
+                            capture_audio_->samples_per_channel(),
+                            capture_audio_->low_pass_split_data(i),
+                            capture_audio_->high_pass_split_data(i),
+                            capture_audio_->analysis_filter_state1(i),
+                            capture_audio_->analysis_filter_state2(i));
     }
   }
 
@@ -369,11 +420,12 @@
   if (synthesis_needed(data_processed)) {
     for (int i = 0; i < num_output_channels_; i++) {
       // Recombine low and high bands.
-      SplittingFilterSynthesis(capture_audio_->low_pass_split_data(i),
-                               capture_audio_->high_pass_split_data(i),
-                               capture_audio_->data(i),
-                               capture_audio_->synthesis_filter_state1(i),
-                               capture_audio_->synthesis_filter_state2(i));
+      WebRtcSpl_SynthesisQMF(capture_audio_->low_pass_split_data(i),
+                             capture_audio_->high_pass_split_data(i),
+                             capture_audio_->samples_per_split_channel(),
+                             capture_audio_->data(i),
+                             capture_audio_->synthesis_filter_state1(i),
+                             capture_audio_->synthesis_filter_state2(i));
     }
   }
 
@@ -403,25 +455,21 @@
   return kNoError;
 }
 
+// TODO(ajm): Have AnalyzeReverseStream accept sample rates not matching the
+// primary stream and convert ourselves rather than having the user manage it.
+// We can be smarter and use the splitting filter when appropriate. Similarly,
+// perform downmixing here.
 int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
   CriticalSectionScoped crit_scoped(crit_);
   int err = kNoError;
-
   if (frame == NULL) {
     return kNullPointerError;
   }
-
   if (frame->sample_rate_hz_ != sample_rate_hz_) {
     return kBadSampleRateError;
   }
-
-  if (frame->num_channels_ != num_reverse_channels_) {
-    return kBadNumberChannelsError;
-  }
-
-  if (frame->samples_per_channel_ != samples_per_channel_) {
-    return kBadDataLengthError;
-  }
+  RETURN_ON_ERR(MaybeInitializeLocked(sample_rate_hz_, num_input_channels_,
+      num_output_channels_, frame->num_channels_));
 
 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
   if (debug_file_->Open()) {
@@ -440,15 +488,15 @@
 
   render_audio_->DeinterleaveFrom(frame);
 
-  // TODO(ajm): turn the splitting filter into a component?
   if (sample_rate_hz_ == kSampleRate32kHz) {
     for (int i = 0; i < num_reverse_channels_; i++) {
       // Split into low and high band.
-      SplittingFilterAnalysis(render_audio_->data(i),
-                              render_audio_->low_pass_split_data(i),
-                              render_audio_->high_pass_split_data(i),
-                              render_audio_->analysis_filter_state1(i),
-                              render_audio_->analysis_filter_state2(i));
+      WebRtcSpl_AnalysisQMF(render_audio_->data(i),
+                            render_audio_->samples_per_channel(),
+                            render_audio_->low_pass_split_data(i),
+                            render_audio_->high_pass_split_data(i),
+                            render_audio_->analysis_filter_state1(i),
+                            render_audio_->analysis_filter_state2(i));
     }
   }
 
@@ -614,9 +662,6 @@
 }
 
 int32_t AudioProcessingImpl::ChangeUniqueId(const int32_t id) {
-  CriticalSectionScoped crit_scoped(crit_);
-  id_ = id;
-
   return kNoError;
 }
 
diff --git a/webrtc/modules/audio_processing/audio_processing_impl.h b/webrtc/modules/audio_processing/audio_processing_impl.h
index e48a2c1..caf93dc 100644
--- a/webrtc/modules/audio_processing/audio_processing_impl.h
+++ b/webrtc/modules/audio_processing/audio_processing_impl.h
@@ -47,7 +47,7 @@
     kSampleRate32kHz = 32000
   };
 
-  explicit AudioProcessingImpl(int id);
+  AudioProcessingImpl();
   virtual ~AudioProcessingImpl();
 
   CriticalSectionWrapper* crit() const;
@@ -57,7 +57,6 @@
 
   // AudioProcessing methods.
   virtual int Initialize() OVERRIDE;
-  virtual int InitializeLocked();
   virtual void SetExtraOptions(const Config& config) OVERRIDE;
   virtual int EnableExperimentalNs(bool enable) OVERRIDE;
   virtual bool experimental_ns_enabled() const OVERRIDE {
@@ -92,14 +91,17 @@
   // Module methods.
   virtual int32_t ChangeUniqueId(const int32_t id) OVERRIDE;
 
+ protected:
+  virtual int InitializeLocked();
+
  private:
+  int MaybeInitializeLocked(int sample_rate_hz, int num_input_channels,
+                            int num_output_channels, int num_reverse_channels);
   bool is_data_processed() const;
   bool interleave_needed(bool is_data_processed) const;
   bool synthesis_needed(bool is_data_processed) const;
   bool analysis_needed(bool is_data_processed) const;
 
-  int id_;
-
   EchoCancellationImplWrapper* echo_cancellation_;
   EchoControlMobileImpl* echo_control_mobile_;
   GainControlImpl* gain_control_;
@@ -118,8 +120,8 @@
   int WriteMessageToDebugFile();
   int WriteInitMessage();
   scoped_ptr<FileWrapper> debug_file_;
-  scoped_ptr<audioproc::Event> event_msg_; // Protobuf message.
-  std::string event_str_; // Memory for protobuf serialization.
+  scoped_ptr<audioproc::Event> event_msg_;  // Protobuf message.
+  std::string event_str_;  // Memory for protobuf serialization.
 #endif
 
   int sample_rate_hz_;
diff --git a/webrtc/modules/audio_processing/audio_processing_impl_unittest.cc b/webrtc/modules/audio_processing/audio_processing_impl_unittest.cc
new file mode 100644
index 0000000..75b9230
--- /dev/null
+++ b/webrtc/modules/audio_processing/audio_processing_impl_unittest.cc
@@ -0,0 +1,70 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_processing/audio_processing_impl.h"
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/audio_processing/test/test_utils.h"
+#include "webrtc/modules/interface/module_common_types.h"
+
+using ::testing::Invoke;
+using ::testing::Return;
+
+namespace webrtc {
+
+class MockInitialize : public AudioProcessingImpl {
+ public:
+  MOCK_METHOD0(InitializeLocked, int());
+
+  int RealInitializeLocked() { return AudioProcessingImpl::InitializeLocked(); }
+};
+
+TEST(AudioProcessingImplTest, AudioParameterChangeTriggersInit) {
+  MockInitialize mock;
+  ON_CALL(mock, InitializeLocked())
+      .WillByDefault(Invoke(&mock, &MockInitialize::RealInitializeLocked));
+
+  EXPECT_CALL(mock, InitializeLocked()).Times(1);
+  mock.Initialize();
+
+  AudioFrame frame;
+  // Call with the default parameters; there should be no init.
+  frame.num_channels_ = 1;
+  SetFrameSampleRate(&frame, 16000);
+  EXPECT_CALL(mock, InitializeLocked())
+      .Times(0);
+  EXPECT_EQ(kNoErr, mock.ProcessStream(&frame));
+  EXPECT_EQ(kNoErr, mock.AnalyzeReverseStream(&frame));
+
+  // New sample rate. (Only impacts ProcessStream).
+  SetFrameSampleRate(&frame, 32000);
+  EXPECT_CALL(mock, InitializeLocked())
+      .Times(1);
+  EXPECT_EQ(kNoErr, mock.ProcessStream(&frame));
+
+  // New number of channels.
+  frame.num_channels_ = 2;
+  EXPECT_CALL(mock, InitializeLocked())
+      .Times(2);
+  EXPECT_EQ(kNoErr, mock.ProcessStream(&frame));
+  // ProcessStream sets num_channels_ == num_output_channels.
+  frame.num_channels_ = 2;
+  EXPECT_EQ(kNoErr, mock.AnalyzeReverseStream(&frame));
+
+  // A new sample rate passed to AnalyzeReverseStream should be an error and
+  // not cause an init.
+  SetFrameSampleRate(&frame, 16000);
+  EXPECT_CALL(mock, InitializeLocked())
+      .Times(0);
+  EXPECT_EQ(mock.kBadSampleRateError, mock.AnalyzeReverseStream(&frame));
+}
+
+}  // namespace webrtc
diff --git a/webrtc/modules/audio_processing/echo_cancellation_impl.cc b/webrtc/modules/audio_processing/echo_cancellation_impl.cc
index cd12363..8fa86c3 100644
--- a/webrtc/modules/audio_processing/echo_cancellation_impl.cc
+++ b/webrtc/modules/audio_processing/echo_cancellation_impl.cc
@@ -336,8 +336,6 @@
     return err;
   }
 
-  was_stream_drift_set_ = false;
-
   return apm_->kNoError;
 }
 
diff --git a/webrtc/modules/audio_processing/gain_control_impl.cc b/webrtc/modules/audio_processing/gain_control_impl.cc
index 3554703..d5e76b3 100644
--- a/webrtc/modules/audio_processing/gain_control_impl.cc
+++ b/webrtc/modules/audio_processing/gain_control_impl.cc
@@ -91,6 +91,7 @@
   int err = apm_->kNoError;
 
   if (mode_ == kAdaptiveAnalog) {
+    capture_levels_.assign(num_handles(), analog_capture_level_);
     for (int i = 0; i < num_handles(); i++) {
       Handle* my_handle = static_cast<Handle*>(handle(i));
       err = WebRtcAgc_AddMic(
@@ -114,7 +115,6 @@
           audio->low_pass_split_data(i),
           audio->high_pass_split_data(i),
           static_cast<int16_t>(audio->samples_per_split_channel()),
-          //capture_levels_[i],
           analog_capture_level_,
           &capture_level_out);
 
@@ -190,13 +190,6 @@
   if (level < minimum_capture_level_ || level > maximum_capture_level_) {
     return apm_->kBadParameterError;
   }
-
-  if (mode_ == kAdaptiveAnalog) {
-    if (level != analog_capture_level_) {
-      // The analog level has been changed; update our internal levels.
-      capture_levels_.assign(num_handles(), level);
-    }
-  }
   analog_capture_level_ = level;
 
   return apm_->kNoError;
@@ -309,11 +302,6 @@
     return err;
   }
 
-  analog_capture_level_ =
-      (maximum_capture_level_ - minimum_capture_level_) >> 1;
-  capture_levels_.assign(num_handles(), analog_capture_level_);
-  was_analog_level_set_ = false;
-
   return apm_->kNoError;
 }
 
diff --git a/webrtc/modules/audio_processing/include/audio_processing.h b/webrtc/modules/audio_processing/include/audio_processing.h
index b5c856d..6ed23f4 100644
--- a/webrtc/modules/audio_processing/include/audio_processing.h
+++ b/webrtc/modules/audio_processing/include/audio_processing.h
@@ -89,11 +89,6 @@
 //
 // Usage example, omitting error checking:
 // AudioProcessing* apm = AudioProcessing::Create(0);
-// apm->set_sample_rate_hz(32000); // Super-wideband processing.
-//
-// // Mono capture and stereo render.
-// apm->set_num_channels(1, 1);
-// apm->set_num_reverse_channels(2);
 //
 // apm->high_pass_filter()->Enable(true);
 //
@@ -145,11 +140,9 @@
   // Initializes internal states, while retaining all user settings. This
   // should be called before beginning to process a new audio stream. However,
   // it is not necessary to call before processing the first stream after
-  // creation.
-  //
-  // set_sample_rate_hz(), set_num_channels() and set_num_reverse_channels()
-  // will trigger a full initialization if the settings are changed from their
-  // existing values. Otherwise they are no-ops.
+  // creation. It is also not necessary to call if the audio parameters (sample
+  // rate and number of channels) have changed. Passing updated parameters
+  // directly to |ProcessStream()| and |AnalyzeReverseStream()| is permissible.
   virtual int Initialize() = 0;
 
   // Pass down additional options which don't have explicit setters. This
@@ -159,11 +152,15 @@
   virtual int EnableExperimentalNs(bool enable) = 0;
   virtual bool experimental_ns_enabled() const = 0;
 
+  // DEPRECATED: It is now possible to modify the sample rate directly in a call
+  // to |ProcessStream|.
   // Sets the sample |rate| in Hz for both the primary and reverse audio
   // streams. 8000, 16000 or 32000 Hz are permitted.
   virtual int set_sample_rate_hz(int rate) = 0;
   virtual int sample_rate_hz() const = 0;
 
+  // DEPRECATED: It is now possible to modify the number of channels directly in
+  // a call to |ProcessStream|.
   // Sets the number of channels for the primary audio stream. Input frames must
   // contain a number of channels given by |input_channels|, while output frames
   // will be returned with number of channels given by |output_channels|.
@@ -171,6 +168,8 @@
   virtual int num_input_channels() const = 0;
   virtual int num_output_channels() const = 0;
 
+  // DEPRECATED: It is now possible to modify the number of channels directly in
+  // a call to |AnalyzeReverseStream|.
   // Sets the number of channels for the reverse audio stream. Input frames must
   // contain a number of channels given by |channels|.
   virtual int set_num_reverse_channels(int channels) = 0;
@@ -184,8 +183,8 @@
   // with the stream_ tag which is needed should be called after processing.
   //
   // The |sample_rate_hz_|, |num_channels_|, and |samples_per_channel_|
-  // members of |frame| must be valid, and correspond to settings supplied
-  // to APM.
+  // members of |frame| must be valid. If changed from the previous call to this
+  // method, it will trigger an initialization.
   virtual int ProcessStream(AudioFrame* frame) = 0;
 
   // Analyzes a 10 ms |frame| of the reverse direction audio stream. The frame
@@ -199,7 +198,8 @@
   // chances are you don't need to use it.
   //
   // The |sample_rate_hz_|, |num_channels_|, and |samples_per_channel_|
-  // members of |frame| must be valid.
+  // members of |frame| must be valid. |sample_rate_hz_| must correspond to
+  // |sample_rate_hz()|
   //
   // TODO(ajm): add const to input; requires an implementation fix.
   virtual int AnalyzeReverseStream(AudioFrame* frame) = 0;
diff --git a/webrtc/modules/audio_processing/splitting_filter.cc b/webrtc/modules/audio_processing/splitting_filter.cc
deleted file mode 100644
index 372c8dc..0000000
--- a/webrtc/modules/audio_processing/splitting_filter.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
-#include "webrtc/modules/audio_processing/splitting_filter.h"
-
-namespace webrtc {
-
-void SplittingFilterAnalysis(const int16_t* in_data,
-                             int16_t* low_band,
-                             int16_t* high_band,
-                             int32_t* filter_state1,
-                             int32_t* filter_state2)
-{
-    WebRtcSpl_AnalysisQMF(in_data, low_band, high_band, filter_state1, filter_state2);
-}
-
-void SplittingFilterSynthesis(const int16_t* low_band,
-                              const int16_t* high_band,
-                              int16_t* out_data,
-                              int32_t* filt_state1,
-                              int32_t* filt_state2)
-{
-    WebRtcSpl_SynthesisQMF(low_band, high_band, out_data, filt_state1, filt_state2);
-}
-}  // namespace webrtc
diff --git a/webrtc/modules/audio_processing/splitting_filter.h b/webrtc/modules/audio_processing/splitting_filter.h
deleted file mode 100644
index b6c8512..0000000
--- a/webrtc/modules/audio_processing/splitting_filter.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_SPLITTING_FILTER_H_
-#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_SPLITTING_FILTER_H_
-
-#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
-#include "webrtc/typedefs.h"
-
-namespace webrtc {
-/*
- * SplittingFilterbank_analysisQMF(...)
- *
- * Splits a super-wb signal into two subbands: 0-8 kHz and 8-16 kHz.
- *
- * Input:
- *    - in_data  : super-wb audio signal
- *
- * Input & Output:
- *    - filt_state1: Filter state for first all-pass filter
- *    - filt_state2: Filter state for second all-pass filter
- *
- * Output:
- *    - low_band : The signal from the 0-4 kHz band
- *    - high_band  : The signal from the 4-8 kHz band
- */
-void SplittingFilterAnalysis(const int16_t* in_data,
-                             int16_t* low_band,
-                             int16_t* high_band,
-                             int32_t* filt_state1,
-                             int32_t* filt_state2);
-
-/*
- * SplittingFilterbank_synthesisQMF(...)
- *
- * Combines the two subbands (0-8 and 8-16 kHz) into a super-wb signal.
- *
- * Input:
- *    - low_band : The signal with the 0-8 kHz band
- *    - high_band  : The signal with the 8-16 kHz band
- *
- * Input & Output:
- *    - filt_state1: Filter state for first all-pass filter
- *    - filt_state2: Filter state for second all-pass filter
- *
- * Output:
- *    - out_data : super-wb speech signal
- */
-void SplittingFilterSynthesis(const int16_t* low_band,
-                              const int16_t* high_band,
-                              int16_t* out_data,
-                              int32_t* filt_state1,
-                              int32_t* filt_state2);
-}  // namespace webrtc
-
-#endif  // WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_SPLITTING_FILTER_H_
diff --git a/webrtc/modules/audio_processing/test/audio_processing_unittest.cc b/webrtc/modules/audio_processing/test/audio_processing_unittest.cc
index 2d434c1..2650deb 100644
--- a/webrtc/modules/audio_processing/test/audio_processing_unittest.cc
+++ b/webrtc/modules/audio_processing/test/audio_processing_unittest.cc
@@ -15,6 +15,7 @@
 
 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
 #include "webrtc/modules/audio_processing/include/audio_processing.h"
+#include "webrtc/modules/audio_processing/test/test_utils.h"
 #include "webrtc/modules/interface/module_common_types.h"
 #include "webrtc/system_wrappers/interface/event_wrapper.h"
 #include "webrtc/system_wrappers/interface/scoped_ptr.h"
@@ -230,11 +231,10 @@
   void EnableAllComponents();
   bool ReadFrame(FILE* file, AudioFrame* frame);
   void ProcessWithDefaultStreamParameters(AudioFrame* frame);
-  template <typename F>
-  void ChangeTriggersInit(F f, AudioProcessing* ap, int initial_value,
-                          int changed_value);
   void ProcessDelayVerificationTest(int delay_ms, int system_delay_ms,
                                     int delay_min, int delay_max);
+  void TestChangingChannels(int num_channels,
+                            AudioProcessing::Error expected_return);
 
   const std::string output_path_;
   const std::string ref_path_;
@@ -330,17 +330,8 @@
 void ApmTest::Init(int sample_rate_hz, int num_reverse_channels,
                    int num_input_channels, int num_output_channels,
                    bool open_output_file) {
-  ASSERT_EQ(apm_->kNoError, apm_->Initialize());
-
-  // Handles error checking of the parameters as well. No need to repeat it.
-  ASSERT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(sample_rate_hz));
-  ASSERT_EQ(apm_->kNoError, apm_->set_num_channels(num_input_channels,
-                                                   num_output_channels));
-  ASSERT_EQ(apm_->kNoError,
-            apm_->set_num_reverse_channels(num_reverse_channels));
-
   // We always use 10 ms frames.
-  const int samples_per_channel = sample_rate_hz / 100;
+  const int samples_per_channel = kChunkSizeMs * sample_rate_hz / 1000;
   frame_->samples_per_channel_ = samples_per_channel;
   frame_->num_channels_ = num_input_channels;
   frame_->sample_rate_hz_ = sample_rate_hz;
@@ -348,6 +339,12 @@
   revframe_->num_channels_ = num_reverse_channels;
   revframe_->sample_rate_hz_ = sample_rate_hz;
 
+  // Make one process call to ensure the audio parameters are set. It might
+  // result in a stream error which we can safely ignore.
+  int err = apm_->ProcessStream(frame_);
+  ASSERT_TRUE(err == kNoErr || err == apm_->kStreamParameterNotSetError);
+  ASSERT_EQ(apm_->kNoError, apm_->Initialize());
+
   if (far_file_) {
     ASSERT_EQ(0, fclose(far_file_));
   }
@@ -378,7 +375,6 @@
 
 void ApmTest::EnableAllComponents() {
 #if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
-  EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(16000));
   EXPECT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(true));
 
   EXPECT_EQ(apm_->kNoError,
@@ -442,50 +438,6 @@
   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame));
 }
 
-template <typename F>
-void ApmTest::ChangeTriggersInit(F f, AudioProcessing* ap, int initial_value,
-                                 int changed_value) {
-  EnableAllComponents();
-  Init(16000, 2, 2, 2, false);
-  SetFrameTo(frame_, 1000);
-  AudioFrame frame_copy;
-  frame_copy.CopyFrom(*frame_);
-  ProcessWithDefaultStreamParameters(frame_);
-  // Verify the processing has actually changed the frame.
-  EXPECT_FALSE(FrameDataAreEqual(*frame_, frame_copy));
-
-  // Test that a change in value triggers an init.
-  f(apm_.get(), changed_value);
-  f(apm_.get(), initial_value);
-  ProcessWithDefaultStreamParameters(&frame_copy);
-  EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
-
-  apm_->Initialize();
-  SetFrameTo(frame_, 1000);
-  AudioFrame initial_frame;
-  initial_frame.CopyFrom(*frame_);
-  ProcessWithDefaultStreamParameters(frame_);
-  ProcessWithDefaultStreamParameters(frame_);
-  // Verify the processing has actually changed the frame.
-  EXPECT_FALSE(FrameDataAreEqual(*frame_, initial_frame));
-
-  frame_copy.CopyFrom(initial_frame);
-  apm_->Initialize();
-  ProcessWithDefaultStreamParameters(&frame_copy);
-  // Verify an init here would result in different output.
-  apm_->Initialize();
-  ProcessWithDefaultStreamParameters(&frame_copy);
-  EXPECT_FALSE(FrameDataAreEqual(*frame_, frame_copy));
-
-  frame_copy.CopyFrom(initial_frame);
-  apm_->Initialize();
-  ProcessWithDefaultStreamParameters(&frame_copy);
-  // Test that the same value does not trigger an init.
-  f(apm_.get(), initial_value);
-  ProcessWithDefaultStreamParameters(&frame_copy);
-  EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
-}
-
 void ApmTest::ProcessDelayVerificationTest(int delay_ms, int system_delay_ms,
                                            int delay_min, int delay_max) {
   // The |revframe_| and |frame_| should include the proper frame information,
@@ -579,7 +531,6 @@
             apm_->ProcessStream(frame_));
 
   // -- Missing AGC level --
-  EXPECT_EQ(apm_->kNoError, apm_->Initialize());
   EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
   EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
 
@@ -602,8 +553,8 @@
             apm_->echo_cancellation()->enable_drift_compensation(false));
 
   // -- Missing delay --
-  EXPECT_EQ(apm_->kNoError, apm_->Initialize());
   EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
   EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
 
   // Resets after successful ProcessStream().
@@ -622,7 +573,6 @@
   EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
 
   // -- Missing drift --
-  EXPECT_EQ(apm_->kNoError, apm_->Initialize());
   EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
 
   // Resets after successful ProcessStream().
@@ -639,14 +589,12 @@
   EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
 
   // -- No stream parameters --
-  EXPECT_EQ(apm_->kNoError, apm_->Initialize());
   EXPECT_EQ(apm_->kNoError,
             apm_->AnalyzeReverseStream(revframe_));
   EXPECT_EQ(apm_->kStreamParameterNotSetError,
             apm_->ProcessStream(frame_));
 
   // -- All there --
-  EXPECT_EQ(apm_->kNoError, apm_->Initialize());
   EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
   apm_->echo_cancellation()->set_stream_drift_samples(0);
   EXPECT_EQ(apm_->kNoError,
@@ -678,65 +626,38 @@
   EXPECT_EQ(50, apm_->stream_delay_ms());
 }
 
+void ApmTest::TestChangingChannels(int num_channels,
+                                   AudioProcessing::Error expected_return) {
+  frame_->num_channels_ = num_channels;
+  EXPECT_EQ(expected_return, apm_->ProcessStream(frame_));
+  EXPECT_EQ(expected_return, apm_->AnalyzeReverseStream(frame_));
+}
+
 TEST_F(ApmTest, Channels) {
-  // Testing number of invalid channels
-  EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(0, 1));
-  EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(1, 0));
-  EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(3, 1));
-  EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(1, 3));
-  EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_reverse_channels(0));
-  EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_reverse_channels(3));
-  // Testing number of valid channels
+  // Testing number of invalid channels.
+  TestChangingChannels(0, apm_->kBadNumberChannelsError);
+  TestChangingChannels(3, apm_->kBadNumberChannelsError);
+  // Testing number of valid channels.
   for (int i = 1; i < 3; i++) {
-    for (int j = 1; j < 3; j++) {
-      if (j > i) {
-        EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(i, j));
-      } else {
-        EXPECT_EQ(apm_->kNoError, apm_->set_num_channels(i, j));
-        EXPECT_EQ(j, apm_->num_output_channels());
-      }
-    }
+    TestChangingChannels(i, kNoErr);
     EXPECT_EQ(i, apm_->num_input_channels());
-    EXPECT_EQ(apm_->kNoError, apm_->set_num_reverse_channels(i));
     EXPECT_EQ(i, apm_->num_reverse_channels());
   }
 }
 
 TEST_F(ApmTest, SampleRates) {
   // Testing invalid sample rates
-  EXPECT_EQ(apm_->kBadParameterError, apm_->set_sample_rate_hz(10000));
+  SetFrameSampleRate(frame_, 10000);
+  EXPECT_EQ(apm_->kBadSampleRateError, apm_->ProcessStream(frame_));
   // Testing valid sample rates
   int fs[] = {8000, 16000, 32000};
   for (size_t i = 0; i < sizeof(fs) / sizeof(*fs); i++) {
-    EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(fs[i]));
+    SetFrameSampleRate(frame_, fs[i]);
+    EXPECT_EQ(kNoErr, apm_->ProcessStream(frame_));
     EXPECT_EQ(fs[i], apm_->sample_rate_hz());
   }
 }
 
-void SetSampleRate(AudioProcessing* ap, int value) {
-  EXPECT_EQ(ap->kNoError, ap->set_sample_rate_hz(value));
-}
-
-void SetNumReverseChannels(AudioProcessing* ap, int value) {
-  EXPECT_EQ(ap->kNoError, ap->set_num_reverse_channels(value));
-}
-
-void SetNumOutputChannels(AudioProcessing* ap, int value) {
-  EXPECT_EQ(ap->kNoError, ap->set_num_channels(2, value));
-}
-
-TEST_F(ApmTest, SampleRateChangeTriggersInit) {
-  ChangeTriggersInit(SetSampleRate, apm_.get(), 16000, 8000);
-}
-
-TEST_F(ApmTest, ReverseChannelChangeTriggersInit) {
-  ChangeTriggersInit(SetNumReverseChannels, apm_.get(), 2, 1);
-}
-
-TEST_F(ApmTest, ChannelChangeTriggersInit) {
-  ChangeTriggersInit(SetNumOutputChannels, apm_.get(), 2, 1);
-}
-
 TEST_F(ApmTest, EchoCancellation) {
   EXPECT_EQ(apm_->kNoError,
             apm_->echo_cancellation()->enable_drift_compensation(true));
@@ -838,7 +759,7 @@
     Init(kProcessSampleRates[i], 2, 2, 2, false);
     // Sampling frequency dependent variables.
     const int num_ms_per_block = std::max(4,
-                                           640 / frame_->samples_per_channel_);
+                                          640 / frame_->samples_per_channel_);
     const int delay_min_ms = -kLookaheadBlocks * num_ms_per_block;
     const int delay_max_ms = (kMaxDelayBlocks - 1) * num_ms_per_block;
 
@@ -876,13 +797,16 @@
 
 TEST_F(ApmTest, EchoControlMobile) {
   // AECM won't use super-wideband.
-  EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(32000));
+  SetFrameSampleRate(frame_, 32000);
+  EXPECT_EQ(kNoErr, apm_->ProcessStream(frame_));
   EXPECT_EQ(apm_->kBadSampleRateError,
             apm_->echo_control_mobile()->Enable(true));
-  EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(16000));
+  SetFrameSampleRate(frame_, 16000);
+  EXPECT_EQ(kNoErr, apm_->ProcessStream(frame_));
   EXPECT_EQ(apm_->kNoError,
             apm_->echo_control_mobile()->Enable(true));
-  EXPECT_EQ(apm_->kUnsupportedComponentError, apm_->set_sample_rate_hz(32000));
+  SetFrameSampleRate(frame_, 32000);
+  EXPECT_EQ(apm_->kUnsupportedComponentError, apm_->ProcessStream(frame_));
 
   // Turn AECM on (and AEC off)
   Init(16000, 2, 2, 2, false);
@@ -1088,7 +1012,6 @@
 
   // Run this test in wideband; in super-wb, the splitting filter distorts the
   // audio enough to cause deviation from the expectation for small values.
-  EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(16000));
   frame_->samples_per_channel_ = 160;
   frame_->num_channels_ = 2;
   frame_->sample_rate_hz_ = 16000;
@@ -1214,19 +1137,6 @@
   // TODO(bjornv): Add tests for streamed voice; stream_has_voice()
 }
 
-TEST_F(ApmTest, VerifyDownMixing) {
-  for (size_t i = 0; i < kSampleRatesSize; i++) {
-    Init(kSampleRates[i], 2, 2, 1, false);
-    SetFrameTo(frame_, 1000, 2000);
-    AudioFrame mono_frame;
-    mono_frame.samples_per_channel_ = frame_->samples_per_channel_;
-    mono_frame.num_channels_ = 1;
-    SetFrameTo(&mono_frame, 1500);
-    EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
-    EXPECT_TRUE(FrameDataAreEqual(*frame_, mono_frame));
-  }
-}
-
 TEST_F(ApmTest, AllProcessingDisabledByDefault) {
   EXPECT_FALSE(apm_->echo_cancellation()->is_enabled());
   EXPECT_FALSE(apm_->echo_control_mobile()->is_enabled());
@@ -1322,7 +1232,6 @@
   EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(false));
 
   // 5. Not using super-wb.
-  EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(16000));
   frame_->samples_per_channel_ = 160;
   frame_->num_channels_ = 2;
   frame_->sample_rate_hz_ = 16000;
@@ -1343,7 +1252,6 @@
 
   // Check the test is valid. We should have distortion from the filter
   // when AEC is enabled (which won't affect the audio).
-  EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(32000));
   frame_->samples_per_channel_ = 320;
   frame_->num_channels_ = 2;
   frame_->sample_rate_hz_ = 32000;
@@ -1366,8 +1274,8 @@
   EXPECT_EQ(apm_->kNoError, apm_->StopDebugRecording());
 
   EXPECT_EQ(apm_->kNoError, apm_->StartDebugRecording(filename.c_str()));
-  EXPECT_EQ(apm_->kNoError, apm_->AnalyzeReverseStream(revframe_));
   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, apm_->AnalyzeReverseStream(revframe_));
   EXPECT_EQ(apm_->kNoError, apm_->StopDebugRecording());
 
   // Verify the file has been written.
@@ -1436,15 +1344,12 @@
     // Write the desired tests to the protobuf reference file.
     for (size_t i = 0; i < kChannelsSize; i++) {
       for (size_t j = 0; j < kChannelsSize; j++) {
-        // We can't have more output than input channels.
-        for (size_t k = 0; k <= j; k++) {
-          for (size_t l = 0; l < kProcessSampleRatesSize; l++) {
-            webrtc::audioproc::Test* test = ref_data.add_test();
-            test->set_num_reverse_channels(kChannels[i]);
-            test->set_num_input_channels(kChannels[j]);
-            test->set_num_output_channels(kChannels[k]);
-            test->set_sample_rate(kProcessSampleRates[l]);
-          }
+        for (size_t l = 0; l < kProcessSampleRatesSize; l++) {
+          webrtc::audioproc::Test* test = ref_data.add_test();
+          test->set_num_reverse_channels(kChannels[i]);
+          test->set_num_input_channels(kChannels[j]);
+          test->set_num_output_channels(kChannels[j]);
+          test->set_sample_rate(kProcessSampleRates[l]);
         }
       }
     }
@@ -1456,6 +1361,11 @@
     printf("Running test %d of %d...\n", i + 1, ref_data.test_size());
 
     webrtc::audioproc::Test* test = ref_data.mutable_test(i);
+    // TODO(ajm): We no longer allow different input and output channels. Skip
+    // these tests for now, but they should be removed from the set.
+    if (test->num_input_channels() != test->num_output_channels())
+      continue;
+
     Init(test->sample_rate(), test->num_reverse_channels(),
          test->num_input_channels(), test->num_output_channels(), true);
 
diff --git a/webrtc/modules/audio_processing/test/process_test.cc b/webrtc/modules/audio_processing/test/process_test.cc
index 6bf6a7d..9425905 100644
--- a/webrtc/modules/audio_processing/test/process_test.cc
+++ b/webrtc/modules/audio_processing/test/process_test.cc
@@ -232,9 +232,6 @@
       ASSERT_EQ(1, sscanf(argv[i], "%d", &sample_rate_hz));
       samples_per_channel = sample_rate_hz / 100;
 
-      ASSERT_EQ(apm->kNoError,
-                apm->set_sample_rate_hz(sample_rate_hz));
-
     } else if (strcmp(argv[i], "-ch") == 0) {
       i++;
       ASSERT_LT(i + 1, argc) << "Specify number of channels after -ch";
@@ -242,18 +239,11 @@
       i++;
       ASSERT_EQ(1, sscanf(argv[i], "%d", &num_capture_output_channels));
 
-      ASSERT_EQ(apm->kNoError,
-                apm->set_num_channels(num_capture_input_channels,
-                                      num_capture_output_channels));
-
     } else if (strcmp(argv[i], "-rch") == 0) {
       i++;
       ASSERT_LT(i, argc) << "Specify number of channels after -rch";
       ASSERT_EQ(1, sscanf(argv[i], "%d", &num_render_channels));
 
-      ASSERT_EQ(apm->kNoError,
-                apm->set_num_reverse_channels(num_render_channels));
-
     } else if (strcmp(argv[i], "-aec") == 0) {
       ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->Enable(true));
       ASSERT_EQ(apm->kNoError,
@@ -637,9 +627,6 @@
         const Init msg = event_msg.init();
 
         ASSERT_TRUE(msg.has_sample_rate());
-        ASSERT_EQ(apm->kNoError,
-            apm->set_sample_rate_hz(msg.sample_rate()));
-
         ASSERT_TRUE(msg.has_device_sample_rate());
         ASSERT_EQ(apm->kNoError,
                   apm->echo_cancellation()->set_device_sample_rate_hz(
@@ -647,13 +634,7 @@
 
         ASSERT_TRUE(msg.has_num_input_channels());
         ASSERT_TRUE(msg.has_num_output_channels());
-        ASSERT_EQ(apm->kNoError,
-            apm->set_num_channels(msg.num_input_channels(),
-                                  msg.num_output_channels()));
-
         ASSERT_TRUE(msg.has_num_reverse_channels());
-        ASSERT_EQ(apm->kNoError,
-            apm->set_num_reverse_channels(msg.num_reverse_channels()));
 
         samples_per_channel = msg.sample_rate() / 100;
         far_frame.sample_rate_hz_ = msg.sample_rate();
@@ -834,9 +815,6 @@
                   event_file));
 
         ASSERT_EQ(apm->kNoError,
-            apm->set_sample_rate_hz(sample_rate_hz));
-
-        ASSERT_EQ(apm->kNoError,
                   apm->echo_cancellation()->set_device_sample_rate_hz(
                       device_sample_rate_hz));
 
diff --git a/webrtc/modules/audio_processing/test/test_utils.h b/webrtc/modules/audio_processing/test/test_utils.h
new file mode 100644
index 0000000..452d843
--- /dev/null
+++ b/webrtc/modules/audio_processing/test/test_utils.h
@@ -0,0 +1,21 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_processing/include/audio_processing.h"
+#include "webrtc/modules/interface/module_common_types.h"
+
+static const int kChunkSizeMs = 10;
+static const webrtc::AudioProcessing::Error kNoErr =
+    webrtc::AudioProcessing::kNoError;
+
+static void SetFrameSampleRate(webrtc::AudioFrame* frame, int sample_rate_hz) {
+  frame->sample_rate_hz_ = sample_rate_hz;
+  frame->samples_per_channel_ = kChunkSizeMs * sample_rate_hz / 1000;
+}
diff --git a/webrtc/modules/modules.gyp b/webrtc/modules/modules.gyp
index ef354ab..db3281f 100644
--- a/webrtc/modules/modules.gyp
+++ b/webrtc/modules/modules.gyp
@@ -152,8 +152,10 @@
             'audio_coding/neteq4/mock/mock_payload_splitter.h',
             'audio_processing/aec/system_delay_unittest.cc',
             'audio_processing/aec/echo_cancellation_unittest.cc',
+            'audio_processing/audio_processing_impl_unittest.cc',
             'audio_processing/echo_cancellation_impl_unittest.cc',
             'audio_processing/test/audio_processing_unittest.cc',
+            'audio_processing/test/test_utils.h',
             'audio_processing/utility/delay_estimator_unittest.cc',
             'audio_processing/utility/ring_buffer_unittest.cc',
             'bitrate_controller/bitrate_controller_unittest.cc',
@@ -167,9 +169,9 @@
             'desktop_capture/screen_capturer_mock_objects.h',
             'desktop_capture/screen_capturer_unittest.cc',
             'desktop_capture/window_capturer_unittest.cc',
-            "desktop_capture/win/cursor_unittest.cc",
-            "desktop_capture/win/cursor_unittest_resources.h",
-            "desktop_capture/win/cursor_unittest_resources.rc",
+            'desktop_capture/win/cursor_unittest.cc',
+            'desktop_capture/win/cursor_unittest_resources.h',
+            'desktop_capture/win/cursor_unittest_resources.rc',
             'media_file/source/media_file_unittest.cc',
             'module_common_types_unittest.cc',
             'pacing/paced_sender_unittest.cc',
diff --git a/webrtc/voice_engine/channel.cc b/webrtc/voice_engine/channel.cc
index 2724f52..3f45fea 100644
--- a/webrtc/voice_engine/channel.cc
+++ b/webrtc/voice_engine/channel.cc
@@ -695,10 +695,12 @@
     // Store speech type for dead-or-alive detection
     _outputSpeechType = audioFrame.speech_type_;
 
-    // Perform far-end AudioProcessing module processing on the received signal
-    if (_rxApmIsEnabled)
-    {
-        ApmProcessRx(audioFrame);
+    if (_rxApmIsEnabled) {
+      int err = rx_audioproc_->ProcessStream(&audioFrame);
+      if (err) {
+        LOG(LS_ERROR) << "ProcessStream() error: " << err;
+        assert(false);
+      }
     }
 
     float output_gain = 1.0f;
@@ -4446,29 +4448,13 @@
 
     InsertInbandDtmfTone();
 
-    if (_includeAudioLevelIndication)
-    {
-        if (rtp_audioproc_->set_sample_rate_hz(_audioFrame.sample_rate_hz_) !=
-            AudioProcessing::kNoError)
-        {
-            WEBRTC_TRACE(kTraceWarning, kTraceVoice,
-                         VoEId(_instanceId, _channelId),
-                         "Error setting AudioProcessing sample rate");
-            return -1;
-        }
-
-        if (rtp_audioproc_->set_num_channels(_audioFrame.num_channels_,
-                                             _audioFrame.num_channels_) !=
-            AudioProcessing::kNoError)
-        {
-            WEBRTC_TRACE(kTraceWarning, kTraceVoice,
-                         VoEId(_instanceId, _channelId),
-                         "Error setting AudioProcessing channels");
-            return -1;
-        }
-
-        // Performs level analysis only; does not affect the signal.
-        rtp_audioproc_->ProcessStream(&_audioFrame);
+    if (_includeAudioLevelIndication) {
+      // Performs level analysis only; does not affect the signal.
+      int err = rtp_audioproc_->ProcessStream(&_audioFrame);
+      if (err) {
+        LOG(LS_ERROR) << "ProcessStream() error: " << err;
+        assert(false);
+      }
     }
 
     return 0;
@@ -5210,25 +5196,6 @@
     }
 }
 
-int Channel::ApmProcessRx(AudioFrame& frame) {
-  // Register the (possibly new) frame parameters.
-  if (rx_audioproc_->set_sample_rate_hz(frame.sample_rate_hz_) != 0) {
-    assert(false);
-    LOG_FERR1(LS_ERROR, set_sample_rate_hz, frame.sample_rate_hz_);
-  }
-  if (rx_audioproc_->set_num_channels(frame.num_channels_,
-                                      frame.num_channels_) != 0) {
-    assert(false);
-    LOG_FERR2(LS_ERROR, set_num_channels, frame.num_channels_,
-              frame.num_channels_);
-  }
-  if (rx_audioproc_->ProcessStream(&frame) != 0) {
-    assert(false);
-    LOG_FERR0(LS_ERROR, ProcessStream);
-  }
-  return 0;
-}
-
 int Channel::SetSecondarySendCodec(const CodecInst& codec,
                                    int red_payload_type) {
   // Sanity check for payload type.
diff --git a/webrtc/voice_engine/channel.h b/webrtc/voice_engine/channel.h
index 48d50c2..3dfeedc 100644
--- a/webrtc/voice_engine/channel.h
+++ b/webrtc/voice_engine/channel.h
@@ -443,7 +443,6 @@
     void UpdatePacketDelay(uint32_t timestamp,
                            uint16_t sequenceNumber);
     void RegisterReceiveCodecsToRTPModule();
-    int ApmProcessRx(AudioFrame& audioFrame);
 
     int SetRedPayloadType(int red_payload_type);
 
diff --git a/webrtc/voice_engine/transmit_mixer.cc b/webrtc/voice_engine/transmit_mixer.cc
index 743d8e2..b9618c8 100644
--- a/webrtc/voice_engine/transmit_mixer.cc
+++ b/webrtc/voice_engine/transmit_mixer.cc
@@ -1317,18 +1317,6 @@
 
 void TransmitMixer::ProcessAudio(int delay_ms, int clock_drift,
                                  int current_mic_level) {
-  if (audioproc_->set_num_channels(_audioFrame.num_channels_,
-                                   _audioFrame.num_channels_) != 0) {
-    assert(false);
-    LOG_FERR2(LS_ERROR, set_num_channels, _audioFrame.num_channels_,
-              _audioFrame.num_channels_);
-  }
-
-  if (audioproc_->set_sample_rate_hz(_audioFrame.sample_rate_hz_) != 0) {
-    assert(false);
-    LOG_FERR1(LS_ERROR, set_sample_rate_hz, _audioFrame.sample_rate_hz_);
-  }
-
   if (audioproc_->set_stream_delay_ms(delay_ms) != 0) {
     // A redundant warning is reported in AudioDevice, which we've throttled
     // to avoid flooding the logs. Relegate this one to LS_VERBOSE to avoid
@@ -1338,8 +1326,8 @@
 
   GainControl* agc = audioproc_->gain_control();
   if (agc->set_stream_analog_level(current_mic_level) != 0) {
-    assert(false);
     LOG_FERR1(LS_ERROR, set_stream_analog_level, current_mic_level);
+    assert(false);
   }
 
   EchoCancellation* aec = audioproc_->echo_cancellation();
@@ -1349,8 +1337,8 @@
 
   int err = audioproc_->ProcessStream(&_audioFrame);
   if (err != 0) {
-    assert(false);
     LOG(LS_ERROR) << "ProcessStream() error: " << err;
+    assert(false);
   }
 
   CriticalSectionScoped cs(&_critSect);