Fix constness of AudioBuffer accessors.

Don't return non-const pointers from const accessors and deal with the
spillover. Provide overloaded versions as needed.

Inspired by kwiberg:
https://webrtc-codereview.appspot.com/12379005/

R=bjornv@webrtc.org, kwiberg@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/15379004

git-svn-id: http://webrtc.googlecode.com/svn/trunk/webrtc@6030 4adac7df-926f-26a2-2b94-8c16560cd09d
diff --git a/common_audio/vad/include/webrtc_vad.h b/common_audio/vad/include/webrtc_vad.h
index 1a6e10a..0538273 100644
--- a/common_audio/vad/include/webrtc_vad.h
+++ b/common_audio/vad/include/webrtc_vad.h
@@ -69,7 +69,7 @@
 // returns              : 1 - (Active Voice),
 //                        0 - (Non-active Voice),
 //                       -1 - (Error)
-int WebRtcVad_Process(VadInst* handle, int fs, int16_t* audio_frame,
+int WebRtcVad_Process(VadInst* handle, int fs, const int16_t* audio_frame,
                       int frame_length);
 
 // Checks for valid combinations of |rate| and |frame_length|. We support 10,
diff --git a/common_audio/vad/vad_core.c b/common_audio/vad/vad_core.c
index 80c31f4..98da6ea 100644
--- a/common_audio/vad/vad_core.c
+++ b/common_audio/vad/vad_core.c
@@ -603,7 +603,7 @@
 // Calculate VAD decision by first extracting feature values and then calculate
 // probability for both speech and background noise.
 
-int WebRtcVad_CalcVad48khz(VadInstT* inst, int16_t* speech_frame,
+int WebRtcVad_CalcVad48khz(VadInstT* inst, const int16_t* speech_frame,
                            int frame_length) {
   int vad;
   int i;
@@ -628,7 +628,7 @@
   return vad;
 }
 
-int WebRtcVad_CalcVad32khz(VadInstT* inst, int16_t* speech_frame,
+int WebRtcVad_CalcVad32khz(VadInstT* inst, const int16_t* speech_frame,
                            int frame_length)
 {
     int len, vad;
@@ -650,7 +650,7 @@
     return vad;
 }
 
-int WebRtcVad_CalcVad16khz(VadInstT* inst, int16_t* speech_frame,
+int WebRtcVad_CalcVad16khz(VadInstT* inst, const int16_t* speech_frame,
                            int frame_length)
 {
     int len, vad;
@@ -666,7 +666,7 @@
     return vad;
 }
 
-int WebRtcVad_CalcVad8khz(VadInstT* inst, int16_t* speech_frame,
+int WebRtcVad_CalcVad8khz(VadInstT* inst, const int16_t* speech_frame,
                           int frame_length)
 {
     int16_t feature_vector[kNumChannels], total_power;
diff --git a/common_audio/vad/vad_core.h b/common_audio/vad/vad_core.h
index d6c1da2..202963d 100644
--- a/common_audio/vad/vad_core.h
+++ b/common_audio/vad/vad_core.h
@@ -85,9 +85,9 @@
 
 /****************************************************************************
  * WebRtcVad_CalcVad48khz(...)
- * WebRtcVad_CalcVad32khz(...) 
- * WebRtcVad_CalcVad16khz(...) 
- * WebRtcVad_CalcVad8khz(...) 
+ * WebRtcVad_CalcVad32khz(...)
+ * WebRtcVad_CalcVad16khz(...)
+ * WebRtcVad_CalcVad8khz(...)
  *
  * Calculate probability for active speech and make VAD decision.
  *
@@ -103,13 +103,13 @@
  *                        0 - No active speech
  *                        1-6 - Active speech
  */
-int WebRtcVad_CalcVad48khz(VadInstT* inst, int16_t* speech_frame,
+int WebRtcVad_CalcVad48khz(VadInstT* inst, const int16_t* speech_frame,
                            int frame_length);
-int WebRtcVad_CalcVad32khz(VadInstT* inst, int16_t* speech_frame,
+int WebRtcVad_CalcVad32khz(VadInstT* inst, const int16_t* speech_frame,
                            int frame_length);
-int WebRtcVad_CalcVad16khz(VadInstT* inst, int16_t* speech_frame,
+int WebRtcVad_CalcVad16khz(VadInstT* inst, const int16_t* speech_frame,
                            int frame_length);
-int WebRtcVad_CalcVad8khz(VadInstT* inst, int16_t* speech_frame,
+int WebRtcVad_CalcVad8khz(VadInstT* inst, const int16_t* speech_frame,
                           int frame_length);
 
 #endif  // WEBRTC_COMMON_AUDIO_VAD_VAD_CORE_H_
diff --git a/common_audio/vad/vad_sp.c b/common_audio/vad/vad_sp.c
index 41deb3d..e981ad2 100644
--- a/common_audio/vad/vad_sp.c
+++ b/common_audio/vad/vad_sp.c
@@ -24,7 +24,7 @@
 
 // TODO(bjornv): Move this function to vad_filterbank.c.
 // Downsampling filter based on splitting filter and allpass functions.
-void WebRtcVad_Downsampling(int16_t* signal_in,
+void WebRtcVad_Downsampling(const int16_t* signal_in,
                             int16_t* signal_out,
                             int32_t* filter_state,
                             int in_length) {
diff --git a/common_audio/vad/vad_sp.h b/common_audio/vad/vad_sp.h
index f84876a..b5e6259 100644
--- a/common_audio/vad/vad_sp.h
+++ b/common_audio/vad/vad_sp.h
@@ -30,7 +30,7 @@
 //
 // Output:
 //      - signal_out    : Downsampled signal (of length |in_length| / 2).
-void WebRtcVad_Downsampling(int16_t* signal_in,
+void WebRtcVad_Downsampling(const int16_t* signal_in,
                             int16_t* signal_out,
                             int32_t* filter_state,
                             int in_length);
diff --git a/common_audio/vad/webrtc_vad.c b/common_audio/vad/webrtc_vad.c
index 3b31ef5..8a9b931 100644
--- a/common_audio/vad/webrtc_vad.c
+++ b/common_audio/vad/webrtc_vad.c
@@ -68,7 +68,7 @@
   return WebRtcVad_set_mode_core(self, mode);
 }
 
-int WebRtcVad_Process(VadInst* handle, int fs, int16_t* audio_frame,
+int WebRtcVad_Process(VadInst* handle, int fs, const int16_t* audio_frame,
                       int frame_length) {
   int vad = -1;
   VadInstT* self = (VadInstT*) handle;
diff --git a/modules/audio_processing/audio_buffer.cc b/modules/audio_processing/audio_buffer.cc
index c53d4df..9160f69 100644
--- a/modules/audio_processing/audio_buffer.cc
+++ b/modules/audio_processing/audio_buffer.cc
@@ -228,7 +228,7 @@
   is_muted_ = false;
 }
 
-int16_t* AudioBuffer::data(int channel) const {
+const int16_t* AudioBuffer::data(int channel) const {
   assert(channel >= 0 && channel < num_proc_channels_);
   if (data_ != NULL) {
     return data_;
@@ -237,7 +237,12 @@
   return channels_->channel(channel);
 }
 
-int16_t* AudioBuffer::low_pass_split_data(int channel) const {
+int16_t* AudioBuffer::data(int channel) {
+  const AudioBuffer* t = this;
+  return const_cast<int16_t*>(t->data(channel));
+}
+
+const int16_t* AudioBuffer::low_pass_split_data(int channel) const {
   assert(channel >= 0 && channel < num_proc_channels_);
   if (split_channels_.get() == NULL) {
     return data(channel);
@@ -246,7 +251,12 @@
   return split_channels_->low_channel(channel);
 }
 
-int16_t* AudioBuffer::high_pass_split_data(int channel) const {
+int16_t* AudioBuffer::low_pass_split_data(int channel) {
+  const AudioBuffer* t = this;
+  return const_cast<int16_t*>(t->low_pass_split_data(channel));
+}
+
+const int16_t* AudioBuffer::high_pass_split_data(int channel) const {
   assert(channel >= 0 && channel < num_proc_channels_);
   if (split_channels_.get() == NULL) {
     return NULL;
@@ -255,19 +265,24 @@
   return split_channels_->high_channel(channel);
 }
 
-int16_t* AudioBuffer::mixed_data(int channel) const {
+int16_t* AudioBuffer::high_pass_split_data(int channel) {
+  const AudioBuffer* t = this;
+  return const_cast<int16_t*>(t->high_pass_split_data(channel));
+}
+
+const int16_t* AudioBuffer::mixed_data(int channel) const {
   assert(channel >= 0 && channel < num_mixed_channels_);
 
   return mixed_channels_->channel(channel);
 }
 
-int16_t* AudioBuffer::mixed_low_pass_data(int channel) const {
+const int16_t* AudioBuffer::mixed_low_pass_data(int channel) const {
   assert(channel >= 0 && channel < num_mixed_low_pass_channels_);
 
   return mixed_low_pass_channels_->channel(channel);
 }
 
-int16_t* AudioBuffer::low_pass_reference(int channel) const {
+const int16_t* AudioBuffer::low_pass_reference(int channel) const {
   assert(channel >= 0 && channel < num_proc_channels_);
   if (!reference_copied_) {
     return NULL;
@@ -280,7 +295,7 @@
   return keyboard_data_;
 }
 
-SplitFilterStates* AudioBuffer::filter_states(int channel) const {
+SplitFilterStates* AudioBuffer::filter_states(int channel) {
   assert(channel >= 0 && channel < num_proc_channels_);
   return &filter_states_[channel];
 }
diff --git a/modules/audio_processing/audio_buffer.h b/modules/audio_processing/audio_buffer.h
index eaf53eb..79f4689 100644
--- a/modules/audio_processing/audio_buffer.h
+++ b/modules/audio_processing/audio_buffer.h
@@ -55,15 +55,18 @@
   int samples_per_split_channel() const;
   int samples_per_keyboard_channel() const;
 
-  int16_t* data(int channel) const;
-  int16_t* low_pass_split_data(int channel) const;
-  int16_t* high_pass_split_data(int channel) const;
-  int16_t* mixed_data(int channel) const;
-  int16_t* mixed_low_pass_data(int channel) const;
-  int16_t* low_pass_reference(int channel) const;
+  int16_t* data(int channel);
+  const int16_t* data(int channel) const;
+  int16_t* low_pass_split_data(int channel);
+  const int16_t* low_pass_split_data(int channel) const;
+  int16_t* high_pass_split_data(int channel);
+  const int16_t* high_pass_split_data(int channel) const;
+  const int16_t* mixed_data(int channel) const;
+  const int16_t* mixed_low_pass_data(int channel) const;
+  const int16_t* low_pass_reference(int channel) const;
   const float* keyboard_data() const;
 
-  SplitFilterStates* filter_states(int channel) const;
+  SplitFilterStates* filter_states(int channel);
 
   void set_activity(AudioFrame::VADActivity activity);
   AudioFrame::VADActivity activity() const;
diff --git a/modules/audio_processing/echo_control_mobile_impl.cc b/modules/audio_processing/echo_control_mobile_impl.cc
index 1dce403..a03adc5 100644
--- a/modules/audio_processing/echo_control_mobile_impl.cc
+++ b/modules/audio_processing/echo_control_mobile_impl.cc
@@ -128,7 +128,7 @@
   for (int i = 0; i < audio->num_channels(); i++) {
     // TODO(ajm): improve how this works, possibly inside AECM.
     //            This is kind of hacked up.
-    int16_t* noisy = audio->low_pass_reference(i);
+    const int16_t* noisy = audio->low_pass_reference(i);
     int16_t* clean = audio->low_pass_split_data(i);
     if (noisy == NULL) {
       noisy = clean;
diff --git a/modules/audio_processing/gain_control_impl.cc b/modules/audio_processing/gain_control_impl.cc
index e859044..a67b67e 100644
--- a/modules/audio_processing/gain_control_impl.cc
+++ b/modules/audio_processing/gain_control_impl.cc
@@ -59,7 +59,7 @@
 
   assert(audio->samples_per_split_channel() <= 160);
 
-  int16_t* mixed_data = audio->low_pass_split_data(0);
+  const int16_t* mixed_data = audio->low_pass_split_data(0);
   if (audio->num_channels() > 1) {
     audio->CopyAndMixLowPass(1);
     mixed_data = audio->mixed_low_pass_data(0);
diff --git a/modules/audio_processing/level_estimator_impl.cc b/modules/audio_processing/level_estimator_impl.cc
index c5985ce..a91e963 100644
--- a/modules/audio_processing/level_estimator_impl.cc
+++ b/modules/audio_processing/level_estimator_impl.cc
@@ -20,7 +20,15 @@
 namespace webrtc {
 namespace {
 
-const double kMaxSquaredLevel = 32768.0 * 32768.0;
+const float kMaxSquaredLevel = 32768.0 * 32768.0;
+
+float SumSquare(const int16_t* data, int length) {
+  float sum_square = 0.f;
+  for (int i = 0; i < length; ++i) {
+    sum_square += data[i] * data[i];
+  }
+  return sum_square;
+}
 
 class Level {
  public:
@@ -36,7 +44,7 @@
     sample_count_ = 0;
   }
 
-  void Process(int16_t* data, int length) {
+  void Process(const int16_t* data, int length) {
     assert(data != NULL);
     assert(length > 0);
     sum_square_ += SumSquare(data, length);
@@ -55,7 +63,7 @@
     }
 
     // Normalize by the max level.
-    double rms = sum_square_ / (sample_count_ * kMaxSquaredLevel);
+    float rms = sum_square_ / (sample_count_ * kMaxSquaredLevel);
     // 20log_10(x^0.5) = 10log_10(x)
     rms = 10 * log10(rms);
     if (rms > 0)
@@ -69,18 +77,10 @@
   }
 
  private:
-  static double SumSquare(int16_t* data, int length) {
-    double sum_square = 0.0;
-    for (int i = 0; i < length; ++i) {
-      double data_d = static_cast<double>(data[i]);
-      sum_square += data_d * data_d;
-    }
-    return sum_square;
-  }
-
-  double sum_square_;
+  float sum_square_;
   int sample_count_;
 };
+
 }  // namespace
 
 LevelEstimatorImpl::LevelEstimatorImpl(const AudioProcessing* apm,
@@ -102,7 +102,7 @@
     return apm_->kNoError;
   }
 
-  int16_t* mixed_data = audio->data(0);
+  const int16_t* mixed_data = audio->data(0);
   if (audio->num_channels() > 1) {
     audio->CopyAndMix(1);
     mixed_data = audio->mixed_data(0);
diff --git a/modules/audio_processing/voice_detection_impl.cc b/modules/audio_processing/voice_detection_impl.cc
index 1d3d124..c6e497f 100644
--- a/modules/audio_processing/voice_detection_impl.cc
+++ b/modules/audio_processing/voice_detection_impl.cc
@@ -61,7 +61,7 @@
   }
   assert(audio->samples_per_split_channel() <= 160);
 
-  int16_t* mixed_data = audio->low_pass_split_data(0);
+  const int16_t* mixed_data = audio->low_pass_split_data(0);
   if (audio->num_channels() > 1) {
     audio->CopyAndMixLowPass(1);
     mixed_data = audio->mixed_low_pass_data(0);