Reparent Nonlinear beamformer under beamforming interface.

R=aluebs@webrtc.org, andrew@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/41269004

Cr-Original-Commit-Position: refs/heads/master@{#8862}
Cr-Mirrored-From: https://chromium.googlesource.com/external/webrtc
Cr-Mirrored-Commit: dfa36058c945cf2ef9932a566987f648c24fa632
diff --git a/common_audio/lapped_transform.h b/common_audio/lapped_transform.h
index 3ed9528..9f6b302 100644
--- a/common_audio/lapped_transform.h
+++ b/common_audio/lapped_transform.h
@@ -13,7 +13,6 @@
 
 #include <complex>
 
-#include "webrtc/base/checks.h"
 #include "webrtc/base/scoped_ptr.h"
 #include "webrtc/common_audio/blocker.h"
 #include "webrtc/common_audio/real_fourier.h"
diff --git a/modules/audio_processing/BUILD.gn b/modules/audio_processing/BUILD.gn
index cc11603..016c684 100644
--- a/modules/audio_processing/BUILD.gn
+++ b/modules/audio_processing/BUILD.gn
@@ -72,6 +72,7 @@
     "audio_buffer.h",
     "audio_processing_impl.cc",
     "audio_processing_impl.h",
+    "beamformer/beamformer.h",
     "beamformer/complex_matrix.h",
     "beamformer/covariance_matrix_generator.cc",
     "beamformer/covariance_matrix_generator.h",
diff --git a/modules/audio_processing/audio_processing.gypi b/modules/audio_processing/audio_processing.gypi
index 0b19fd9..3ceeed8 100644
--- a/modules/audio_processing/audio_processing.gypi
+++ b/modules/audio_processing/audio_processing.gypi
@@ -82,6 +82,7 @@
         'audio_buffer.h',
         'audio_processing_impl.cc',
         'audio_processing_impl.h',
+        'beamformer/beamformer.h',
         'beamformer/complex_matrix.h',
         'beamformer/covariance_matrix_generator.cc',
         'beamformer/covariance_matrix_generator.h',
diff --git a/modules/audio_processing/audio_processing_impl.cc b/modules/audio_processing/audio_processing_impl.cc
index e989708..eeb9a79 100644
--- a/modules/audio_processing/audio_processing_impl.cc
+++ b/modules/audio_processing/audio_processing_impl.cc
@@ -14,11 +14,11 @@
 
 #include "webrtc/base/platform_file.h"
 #include "webrtc/common_audio/include/audio_util.h"
+#include "webrtc/common_audio/channel_buffer.h"
 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
 #include "webrtc/modules/audio_processing/agc/agc_manager_direct.h"
 #include "webrtc/modules/audio_processing/audio_buffer.h"
 #include "webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h"
-#include "webrtc/common_audio/channel_buffer.h"
 #include "webrtc/modules/audio_processing/common.h"
 #include "webrtc/modules/audio_processing/echo_cancellation_impl.h"
 #include "webrtc/modules/audio_processing/echo_control_mobile_impl.h"
@@ -134,7 +134,7 @@
 }
 
 AudioProcessing* AudioProcessing::Create(const Config& config,
-                                         NonlinearBeamformer* beamformer) {
+                                         Beamformer<float>* beamformer) {
   AudioProcessingImpl* apm = new AudioProcessingImpl(config, beamformer);
   if (apm->Initialize() != kNoError) {
     delete apm;
@@ -148,7 +148,7 @@
     : AudioProcessingImpl(config, nullptr) {}
 
 AudioProcessingImpl::AudioProcessingImpl(const Config& config,
-                                         NonlinearBeamformer* beamformer)
+                                         Beamformer<float>* beamformer)
     : echo_cancellation_(NULL),
       echo_control_mobile_(NULL),
       gain_control_(NULL),
@@ -600,7 +600,7 @@
   }
 
   if (beamformer_enabled_) {
-    beamformer_->ProcessChunk(ca->split_data_f(), ca->split_data_f());
+    beamformer_->ProcessChunk(*ca->split_data_f(), ca->split_data_f());
     ca->set_num_channels(1);
   }
 
diff --git a/modules/audio_processing/audio_processing_impl.h b/modules/audio_processing/audio_processing_impl.h
index b5114cf..765cde7 100644
--- a/modules/audio_processing/audio_processing_impl.h
+++ b/modules/audio_processing/audio_processing_impl.h
@@ -11,19 +11,21 @@
 #ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AUDIO_PROCESSING_IMPL_H_
 #define WEBRTC_MODULES_AUDIO_PROCESSING_AUDIO_PROCESSING_IMPL_H_
 
-#include "webrtc/modules/audio_processing/include/audio_processing.h"
-
 #include <list>
 #include <string>
 
 #include "webrtc/base/scoped_ptr.h"
 #include "webrtc/base/thread_annotations.h"
+#include "webrtc/modules/audio_processing/include/audio_processing.h"
 
 namespace webrtc {
 
 class AgcManagerDirect;
 class AudioBuffer;
-class NonlinearBeamformer;
+
+template<typename T>
+class Beamformer;
+
 class CriticalSectionWrapper;
 class EchoCancellationImpl;
 class EchoControlMobileImpl;
@@ -86,8 +88,9 @@
 class AudioProcessingImpl : public AudioProcessing {
  public:
   explicit AudioProcessingImpl(const Config& config);
-  // Only for testing.
-  AudioProcessingImpl(const Config& config, NonlinearBeamformer* beamformer);
+
+  // AudioProcessingImpl takes ownership of beamformer.
+  AudioProcessingImpl(const Config& config, Beamformer<float>* beamformer);
   virtual ~AudioProcessingImpl();
 
   // AudioProcessing methods.
@@ -218,7 +221,7 @@
   bool transient_suppressor_enabled_;
   rtc::scoped_ptr<TransientSuppressor> transient_suppressor_;
   const bool beamformer_enabled_;
-  rtc::scoped_ptr<NonlinearBeamformer> beamformer_;
+  rtc::scoped_ptr<Beamformer<float>> beamformer_;
   const std::vector<Point> array_geometry_;
 
   const bool supports_48kHz_;
diff --git a/modules/audio_processing/beamformer/beamformer.h b/modules/audio_processing/beamformer/beamformer.h
new file mode 100644
index 0000000..04cb659
--- /dev/null
+++ b/modules/audio_processing/beamformer/beamformer.h
@@ -0,0 +1,41 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_BEAMFORMER_BEAMFORMER_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_BEAMFORMER_BEAMFORMER_H_
+
+#include "webrtc/common_audio/channel_buffer.h"
+
+namespace webrtc {
+
+template<typename T>
+class Beamformer {
+ public:
+  virtual ~Beamformer() {}
+
+  // Process one time-domain chunk of audio. The audio is expected to be split
+  // into frequency bands inside the ChannelBuffer. The number of frames and
+  // channels must correspond to the constructor parameters. The same
+  // ChannelBuffer can be passed in as |input| and |output|.
+  virtual void ProcessChunk(const ChannelBuffer<T>& input,
+                            ChannelBuffer<T>* output) = 0;
+
+  // Sample rate corresponds to the lower band.
+  // Needs to be called before the the Beamformer can be used.
+  virtual void Initialize(int chunk_size_ms, int sample_rate_hz) = 0;
+
+  // Returns true if the current data contains the target signal.
+  // Which signals are considered "targets" is implementation dependent.
+  virtual bool is_target_present() = 0;
+};
+
+}  // namespace webrtc
+
+#endif  // WEBRTC_MODULES_AUDIO_PROCESSING_BEAMFORMER_BEAMFORMER_H_
diff --git a/modules/audio_processing/beamformer/complex_matrix.h b/modules/audio_processing/beamformer/complex_matrix.h
index 391050b..f5be2b2 100644
--- a/modules/audio_processing/beamformer/complex_matrix.h
+++ b/modules/audio_processing/beamformer/complex_matrix.h
@@ -15,7 +15,6 @@
 
 #include "webrtc/base/checks.h"
 #include "webrtc/base/scoped_ptr.h"
-#include "webrtc/common_audio/channel_buffer.h"
 #include "webrtc/modules/audio_processing/beamformer/matrix.h"
 
 namespace webrtc {
diff --git a/modules/audio_processing/beamformer/matrix.h b/modules/audio_processing/beamformer/matrix.h
index 9e485ef..990f6a4 100644
--- a/modules/audio_processing/beamformer/matrix.h
+++ b/modules/audio_processing/beamformer/matrix.h
@@ -12,13 +12,13 @@
 #define WEBRTC_MODULES_AUDIO_PROCESSING_BEAMFORMER_MATRIX_H_
 
 #include <algorithm>
+#include <cstring>
 #include <string>
 #include <vector>
 
 #include "webrtc/base/checks.h"
 #include "webrtc/base/constructormagic.h"
 #include "webrtc/base/scoped_ptr.h"
-#include "webrtc/common_audio/channel_buffer.h"
 
 namespace {
 
diff --git a/modules/audio_processing/beamformer/mock_nonlinear_beamformer.cc b/modules/audio_processing/beamformer/mock_nonlinear_beamformer.cc
index 4a1936e..aecb0ec 100644
--- a/modules/audio_processing/beamformer/mock_nonlinear_beamformer.cc
+++ b/modules/audio_processing/beamformer/mock_nonlinear_beamformer.cc
@@ -19,6 +19,4 @@
     : NonlinearBeamformer(array_geometry) {
 }
 
-MockNonlinearBeamformer::~MockNonlinearBeamformer() {}
-
 }  // namespace webrtc
diff --git a/modules/audio_processing/beamformer/mock_nonlinear_beamformer.h b/modules/audio_processing/beamformer/mock_nonlinear_beamformer.h
index 56e647b..eb05ecd 100644
--- a/modules/audio_processing/beamformer/mock_nonlinear_beamformer.h
+++ b/modules/audio_processing/beamformer/mock_nonlinear_beamformer.h
@@ -21,10 +21,9 @@
 class MockNonlinearBeamformer : public NonlinearBeamformer {
  public:
   explicit MockNonlinearBeamformer(const std::vector<Point>& array_geometry);
-  ~MockNonlinearBeamformer() override;
 
   MOCK_METHOD2(Initialize, void(int chunk_size_ms, int sample_rate_hz));
-  MOCK_METHOD2(ProcessChunk, void(const ChannelBuffer<float>* input,
+  MOCK_METHOD2(ProcessChunk, void(const ChannelBuffer<float>& input,
                                   ChannelBuffer<float>* output));
   MOCK_METHOD0(is_target_present, bool());
 };
diff --git a/modules/audio_processing/beamformer/nonlinear_beamformer.cc b/modules/audio_processing/beamformer/nonlinear_beamformer.cc
index 9630b7d..8fd6c68 100644
--- a/modules/audio_processing/beamformer/nonlinear_beamformer.cc
+++ b/modules/audio_processing/beamformer/nonlinear_beamformer.cc
@@ -293,32 +293,32 @@
   }
 }
 
-void NonlinearBeamformer::ProcessChunk(const ChannelBuffer<float>* input,
+void NonlinearBeamformer::ProcessChunk(const ChannelBuffer<float>& input,
                               ChannelBuffer<float>* output) {
-  DCHECK_EQ(input->num_channels(), num_input_channels_);
-  DCHECK_EQ(input->num_frames_per_band(), chunk_length_);
+  DCHECK_EQ(input.num_channels(), num_input_channels_);
+  DCHECK_EQ(input.num_frames_per_band(), chunk_length_);
 
   float old_high_pass_mask = high_pass_postfilter_mask_;
-  lapped_transform_->ProcessChunk(input->channels(0), output->channels(0));
+  lapped_transform_->ProcessChunk(input.channels(0), output->channels(0));
   // Ramp up/down for smoothing. 1 mask per 10ms results in audible
   // discontinuities.
   const float ramp_increment =
       (high_pass_postfilter_mask_ - old_high_pass_mask) /
-      input->num_frames_per_band();
+      input.num_frames_per_band();
   // Apply delay and sum and post-filter in the time domain. WARNING: only works
   // because delay-and-sum is not frequency dependent.
-  for (int i = 1; i < input->num_bands(); ++i) {
+  for (int i = 1; i < input.num_bands(); ++i) {
     float smoothed_mask = old_high_pass_mask;
-    for (int j = 0; j < input->num_frames_per_band(); ++j) {
+    for (int j = 0; j < input.num_frames_per_band(); ++j) {
       smoothed_mask += ramp_increment;
 
       // Applying the delay and sum (at zero degrees, this is equivalent to
       // averaging).
       float sum = 0.f;
-      for (int k = 0; k < input->num_channels(); ++k) {
-        sum += input->channels(i)[k][j];
+      for (int k = 0; k < input.num_channels(); ++k) {
+        sum += input.channels(i)[k][j];
       }
-      output->channels(i)[0][j] = sum / input->num_channels() * smoothed_mask;
+      output->channels(i)[0][j] = sum / input.num_channels() * smoothed_mask;
     }
   }
 }
diff --git a/modules/audio_processing/beamformer/nonlinear_beamformer.h b/modules/audio_processing/beamformer/nonlinear_beamformer.h
index 91e47cd..bebfad8 100644
--- a/modules/audio_processing/beamformer/nonlinear_beamformer.h
+++ b/modules/audio_processing/beamformer/nonlinear_beamformer.h
@@ -14,8 +14,10 @@
 #include <vector>
 
 #include "webrtc/common_audio/lapped_transform.h"
-#include "webrtc/modules/audio_processing/beamformer/complex_matrix.h"
+#include "webrtc/common_audio/channel_buffer.h"
 #include "webrtc/modules/audio_processing/beamformer/array_util.h"
+#include "webrtc/modules/audio_processing/beamformer/beamformer.h"
+#include "webrtc/modules/audio_processing/beamformer/complex_matrix.h"
 
 namespace webrtc {
 
@@ -27,7 +29,9 @@
 // Beamforming Postprocessor" by Bastiaan Kleijn.
 //
 // TODO: Target angle assumed to be 0. Parameterize target angle.
-class NonlinearBeamformer : public LappedTransform::Callback {
+class NonlinearBeamformer
+  : public Beamformer<float>,
+    public LappedTransform::Callback {
  public:
   // At the moment it only accepts uniform linear microphone arrays. Using the
   // first microphone as a reference position [0, 0, 0] is a natural choice.
@@ -35,19 +39,20 @@
 
   // Sample rate corresponds to the lower band.
   // Needs to be called before the NonlinearBeamformer can be used.
-  virtual void Initialize(int chunk_size_ms, int sample_rate_hz);
+  void Initialize(int chunk_size_ms, int sample_rate_hz) override;
 
   // Process one time-domain chunk of audio. The audio is expected to be split
   // into frequency bands inside the ChannelBuffer. The number of frames and
   // channels must correspond to the constructor parameters. The same
   // ChannelBuffer can be passed in as |input| and |output|.
-  virtual void ProcessChunk(const ChannelBuffer<float>* input,
-                            ChannelBuffer<float>* output);
+  void ProcessChunk(const ChannelBuffer<float>& input,
+                    ChannelBuffer<float>* output) override;
+
   // After processing each block |is_target_present_| is set to true if the
   // target signal es present and to false otherwise. This methods can be called
   // to know if the data is target signal or interference and process it
   // accordingly.
-  virtual bool is_target_present() { return is_target_present_; }
+  bool is_target_present() override { return is_target_present_; }
 
  protected:
   // Process one frequency-domain block of audio. This is where the fun
diff --git a/modules/audio_processing/beamformer/nonlinear_beamformer_test.cc b/modules/audio_processing/beamformer/nonlinear_beamformer_test.cc
index 9d85ec5..48d7c2b 100644
--- a/modules/audio_processing/beamformer/nonlinear_beamformer_test.cc
+++ b/modules/audio_processing/beamformer/nonlinear_beamformer_test.cc
@@ -72,7 +72,7 @@
       break;
     }
 
-    bf.ProcessChunk(&captured_audio_cb, &captured_audio_cb);
+    bf.ProcessChunk(captured_audio_cb, &captured_audio_cb);
     webrtc::PcmWriteFromFloat(
         write_file, kChunkSize, 1, captured_audio_cb.channels());
   }
diff --git a/modules/audio_processing/include/audio_processing.h b/modules/audio_processing/include/audio_processing.h
index 7c230d3..72553ff 100644
--- a/modules/audio_processing/include/audio_processing.h
+++ b/modules/audio_processing/include/audio_processing.h
@@ -25,7 +25,10 @@
 namespace webrtc {
 
 class AudioFrame;
-class NonlinearBeamformer;
+
+template<typename T>
+class Beamformer;
+
 class EchoCancellation;
 class EchoControlMobile;
 class GainControl;
@@ -202,7 +205,7 @@
   static AudioProcessing* Create(const Config& config);
   // Only for testing.
   static AudioProcessing* Create(const Config& config,
-                                 NonlinearBeamformer* beamformer);
+                                 Beamformer<float>* beamformer);
   virtual ~AudioProcessing() {}
 
   // Initializes internal states, while retaining all user settings. This