Remove voe::TransmitMixer

TransmitMixer's functionality is moved into the AudioTransportProxy
owned by AudioState. This removes the need for an AudioTransport
implementation in VoEBaseImpl, which means that the proxy is no longer
a proxy, hence AudioTransportProxy is renamed to AudioTransportImpl.

In the short term, AudioState needs to know which AudioDeviceModule is
used, so it is added in AudioState::Config. AudioTransportImpl needs
to know which AudioSendStream:s are currently enabled to send, so
AudioState maintains a map of them, which is reduced into a simple
vector for AudioTransportImpl.

To encode and transmit audio,
AudioSendStream::OnAudioData(std::unique_ptr<AudioFrame> audio_frame)
is introduced, which is used in both the Chromium and standalone use
cases. This removes the need for two different instances of
voe::Channel::ProcessAndEncodeAudio(), so there is now only one,
taking an AudioFrame as argument. Callers need to allocate their own
AudioFrame:s, which is wasteful but not a regression since this was
already happening in the voe::Channel functions.

Most of the logic changed resides in
AudioTransportImpl::RecordedDataIsAvailable(), where two strange
things were found:

  1. The clock drift parameter was ineffective since
     apm->echo_cancellation()->enable_drift_compensation(false) is
     called during initialization.

  2. The output parameter 'new_mic_volume' was never set - instead it
     was returned as a result, causing the ADM to never update the
     analog mic gain
     (https://cs.chromium.org/chromium/src/third_party/webrtc/voice_engine/voe_base_impl.cc?q=voe_base_impl.cc&dr&l=100).

Besides this, tests are updated, and some dead code is removed which
was found in the process.

Bug: webrtc:4690, webrtc:8591
Change-Id: I789d5296bf5efb7299a5ee05a4f3ce6abf9124b2
Reviewed-on: https://webrtc-review.googlesource.com/26681
Commit-Queue: Fredrik Solenberg <solenberg@webrtc.org>
Reviewed-by: Oskar Sundbom <ossu@webrtc.org>
Reviewed-by: Karl Wiberg <kwiberg@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#21301}
diff --git a/audio/audio_state.cc b/audio/audio_state.cc
index 5a30c53..a83b681 100644
--- a/audio/audio_state.cc
+++ b/audio/audio_state.cc
@@ -10,13 +10,16 @@
 
 #include "audio/audio_state.h"
 
+#include <algorithm>
+#include <utility>
+#include <vector>
+
 #include "modules/audio_device/include/audio_device.h"
 #include "rtc_base/atomicops.h"
 #include "rtc_base/checks.h"
 #include "rtc_base/logging.h"
 #include "rtc_base/ptr_util.h"
 #include "rtc_base/thread.h"
-#include "voice_engine/transmit_mixer.h"
 
 namespace webrtc {
 namespace internal {
@@ -24,15 +27,16 @@
 AudioState::AudioState(const AudioState::Config& config)
     : config_(config),
       voe_base_(config.voice_engine),
-      audio_transport_proxy_(voe_base_->audio_transport(),
-                             config_.audio_processing.get(),
-                             config_.audio_mixer) {
+      audio_transport_(config_.audio_mixer,
+                       config_.audio_processing.get(),
+                       config_.audio_device_module.get()) {
   process_thread_checker_.DetachFromThread();
   RTC_DCHECK(config_.audio_mixer);
 }
 
 AudioState::~AudioState() {
   RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(sending_streams_.empty());
 }
 
 VoiceEngine* AudioState::voice_engine() {
@@ -47,11 +51,23 @@
 
 bool AudioState::typing_noise_detected() const {
   RTC_DCHECK(thread_checker_.CalledOnValidThread());
-  // TODO(solenberg): Remove const_cast once AudioState owns transmit mixer
-  //                  functionality.
-  voe::TransmitMixer* transmit_mixer =
-      const_cast<AudioState*>(this)->voe_base_->transmit_mixer();
-  return transmit_mixer->typing_noise_detected();
+  return audio_transport_.typing_noise_detected();
+}
+
+void AudioState::AddSendingStream(webrtc::AudioSendStream* stream,
+                                  int sample_rate_hz, size_t num_channels) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  auto& properties = sending_streams_[stream];
+  properties.sample_rate_hz = sample_rate_hz;
+  properties.num_channels = num_channels;
+  UpdateAudioTransportWithSendingStreams();
+}
+
+void AudioState::RemoveSendingStream(webrtc::AudioSendStream* stream) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  auto count = sending_streams_.erase(stream);
+  RTC_DCHECK_EQ(1, count);
+  UpdateAudioTransportWithSendingStreams();
 }
 
 void AudioState::SetPlayout(bool enabled) {
@@ -61,33 +77,47 @@
   if (enabled == currently_enabled) {
     return;
   }
-  VoEBase* const voe = VoEBase::GetInterface(voice_engine());
-  RTC_DCHECK(voe);
   if (enabled) {
     null_audio_poller_.reset();
   }
   // Will stop/start playout of the underlying device, if necessary, and
   // remember the setting for when it receives subsequent calls of
   // StartPlayout.
-  voe->SetPlayout(enabled);
+  voe_base_->SetPlayout(enabled);
   if (!enabled) {
     null_audio_poller_ =
-        rtc::MakeUnique<NullAudioPoller>(&audio_transport_proxy_);
+        rtc::MakeUnique<NullAudioPoller>(&audio_transport_);
   }
-  voe->Release();
 }
 
 void AudioState::SetRecording(bool enabled) {
   RTC_LOG(INFO) << "SetRecording(" << enabled << ")";
   RTC_DCHECK(thread_checker_.CalledOnValidThread());
   // TODO(henrika): keep track of state as in SetPlayout().
-  VoEBase* const voe = VoEBase::GetInterface(voice_engine());
-  RTC_DCHECK(voe);
   // Will stop/start recording of the underlying device, if necessary, and
   // remember the setting for when it receives subsequent calls of
   // StartPlayout.
-  voe->SetRecording(enabled);
-  voe->Release();
+  voe_base_->SetRecording(enabled);
+}
+
+AudioState::Stats AudioState::GetAudioInputStats() const {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  const voe::AudioLevel& audio_level = audio_transport_.audio_level();
+  Stats result;
+  result.audio_level = audio_level.LevelFullRange();
+  RTC_DCHECK_LE(0, result.audio_level);
+  RTC_DCHECK_GE(32767, result.audio_level);
+  result.quantized_audio_level = audio_level.Level();
+  RTC_DCHECK_LE(0, result.quantized_audio_level);
+  RTC_DCHECK_GE(9, result.quantized_audio_level);
+  result.total_energy = audio_level.TotalEnergy();
+  result.total_duration = audio_level.TotalDuration();
+  return result;
+}
+
+void AudioState::SetStereoChannelSwapping(bool enable) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  audio_transport_.SetStereoChannelSwapping(enable);
 }
 
 // Reference count; implementation copied from rtc::RefCountedObject.
@@ -103,6 +133,20 @@
   }
   return rtc::RefCountReleaseStatus::kOtherRefsRemained;
 }
+
+void AudioState::UpdateAudioTransportWithSendingStreams() {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  std::vector<AudioSendStream*> sending_streams;
+  int max_sample_rate_hz = 8000;
+  size_t max_num_channels = 1;
+  for (const auto& kv : sending_streams_) {
+    sending_streams.push_back(kv.first);
+    max_sample_rate_hz = std::max(max_sample_rate_hz, kv.second.sample_rate_hz);
+    max_num_channels = std::max(max_num_channels, kv.second.num_channels);
+  }
+  audio_transport_.UpdateSendingStreams(std::move(sending_streams),
+                                        max_sample_rate_hz, max_num_channels);
+}
 }  // namespace internal
 
 rtc::scoped_refptr<AudioState> AudioState::Create(