A simple copy of the old audio mixer to a new directory.

I have added build files and renamed the mixer so that it doesn't conflict with the old one. The header includes now point to this copy of the mixer. I have also fixed some of the more obvious cases of style guide non-conformance and run 'PRESUBMIT' on the old mixer.

This is a first step in the creation of a new mixing module that will replace AudioConferencMixer and OutputMixer.

NOTRY=True

Review-Url: https://codereview.webrtc.org/2104363003
Cr-Commit-Position: refs/heads/master@{#13378}
diff --git a/webrtc/modules/audio_mixer/BUILD.gn b/webrtc/modules/audio_mixer/BUILD.gn
new file mode 100644
index 0000000..c281f38
--- /dev/null
+++ b/webrtc/modules/audio_mixer/BUILD.gn
@@ -0,0 +1,72 @@
+# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+config("audio_conference_mixer_config") {
+  visibility = [ ":*" ]  # Only targets in this file can depend on this.
+  include_dirs = [
+    "include",
+    "../../modules/include",
+  ]
+}
+
+source_set("audio_mixer") {
+  sources = [
+    "audio_mixer.cc",
+    "audio_mixer.h",
+  ]
+  deps = [
+    ":audio_conference_mixer",
+    "../../voice_engine:voice_engine",
+  ]
+
+  if (is_win) {
+    defines = [ "WEBRTC_DRIFT_COMPENSATION_SUPPORTED" ]
+
+    cflags = [
+      # TODO(kjellander): Bug 261: fix this warning.
+      "/wd4373",  # virtual function override.
+    ]
+  }
+
+  configs += [ "../..:common_config" ]
+  public_configs = [ "../..:common_inherited_config" ]
+
+  if (is_clang) {
+    # Suppress warnings from Chrome's Clang plugins.
+    # See http://code.google.com/p/webrtc/issues/detail?id=163 for details.
+    configs -= [ "//build/config/clang:find_bad_constructs" ]
+  }
+}
+
+source_set("audio_conference_mixer") {
+  sources = [
+    "include/audio_mixer_defines.h",
+    "include/new_audio_conference_mixer.h",
+    "source/new_audio_conference_mixer_impl.cc",
+    "source/new_audio_conference_mixer_impl.h",
+  ]
+
+  configs += [ "../..:common_config" ]
+
+  public_configs = [
+    "../..:common_inherited_config",
+    ":audio_conference_mixer_config",
+  ]
+
+  if (is_clang) {
+    # Suppress warnings from Chrome's Clang plugins.
+    # See http://code.google.com/p/webrtc/issues/detail?id=163 for details.
+    configs -= [ "//build/config/clang:find_bad_constructs" ]
+  }
+
+  deps = [
+    "../../modules/audio_processing",
+    "../../modules/utility",
+    "../../system_wrappers",
+  ]
+}
diff --git a/webrtc/modules/audio_mixer/DEPS b/webrtc/modules/audio_mixer/DEPS
new file mode 100644
index 0000000..2290dc6
--- /dev/null
+++ b/webrtc/modules/audio_mixer/DEPS
@@ -0,0 +1,15 @@
+include_rules = [
+  "+webrtc/base",
+  "+webrtc/call",
+  "+webrtc/common_audio",
+  "+webrtc/modules/audio_coding",
+  "+webrtc/modules/audio_conference_mixer",
+  "+webrtc/modules/audio_device",
+  "+webrtc/modules/audio_processing",
+  "+webrtc/modules/media_file",
+  "+webrtc/modules/pacing",
+  "+webrtc/modules/rtp_rtcp",
+  "+webrtc/modules/utility",
+  "+webrtc/system_wrappers",
+  "+webrtc/voice_engine",
+]
diff --git a/webrtc/modules/audio_mixer/audio_mixer.cc b/webrtc/modules/audio_mixer/audio_mixer.cc
new file mode 100644
index 0000000..9048c39
--- /dev/null
+++ b/webrtc/modules/audio_mixer/audio_mixer.cc
@@ -0,0 +1,451 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_mixer/audio_mixer.h"
+
+#include "webrtc/base/format_macros.h"
+#include "webrtc/modules/audio_processing/include/audio_processing.h"
+#include "webrtc/modules/utility/include/audio_frame_operations.h"
+#include "webrtc/system_wrappers/include/file_wrapper.h"
+#include "webrtc/system_wrappers/include/trace.h"
+#include "webrtc/voice_engine/include/voe_external_media.h"
+#include "webrtc/voice_engine/statistics.h"
+#include "webrtc/voice_engine/utility.h"
+
+namespace webrtc {
+namespace voe {
+
+void AudioMixer::NewMixedAudio(int32_t id,
+                               const AudioFrame& generalAudioFrame,
+                               const AudioFrame** uniqueAudioFrames,
+                               uint32_t size) {
+  WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
+               "AudioMixer::NewMixedAudio(id=%d, size=%u)", id, size);
+
+  _audioFrame.CopyFrom(generalAudioFrame);
+  _audioFrame.id_ = id;
+}
+
+void AudioMixer::PlayNotification(int32_t id, uint32_t durationMs) {
+  WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
+               "AudioMixer::PlayNotification(id=%d, durationMs=%d)", id,
+               durationMs);
+  // Not implement yet
+}
+
+void AudioMixer::RecordNotification(int32_t id, uint32_t durationMs) {
+  WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
+               "AudioMixer::RecordNotification(id=%d, durationMs=%d)", id,
+               durationMs);
+
+  // Not implement yet
+}
+
+void AudioMixer::PlayFileEnded(int32_t id) {
+  WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
+               "AudioMixer::PlayFileEnded(id=%d)", id);
+
+  // not needed
+}
+
+void AudioMixer::RecordFileEnded(int32_t id) {
+  WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
+               "AudioMixer::RecordFileEnded(id=%d)", id);
+  assert(id == _instanceId);
+
+  rtc::CritScope cs(&_fileCritSect);
+  _outputFileRecording = false;
+  WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+               "AudioMixer::RecordFileEnded() =>"
+               "output file recorder module is shutdown");
+}
+
+int32_t AudioMixer::Create(AudioMixer*& mixer, uint32_t instanceId) {
+  WEBRTC_TRACE(kTraceMemory, kTraceVoice, instanceId,
+               "AudioMixer::Create(instanceId=%d)", instanceId);
+  mixer = new AudioMixer(instanceId);
+  if (mixer == NULL) {
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, instanceId,
+                 "AudioMixer::Create() unable to allocate memory for"
+                 "mixer");
+    return -1;
+  }
+  return 0;
+}
+
+AudioMixer::AudioMixer(uint32_t instanceId)
+    : _mixerModule(*NewAudioConferenceMixer::Create(instanceId)),
+      _audioLevel(),
+      _instanceId(instanceId),
+      _externalMediaCallbackPtr(NULL),
+      _externalMedia(false),
+      _panLeft(1.0f),
+      _panRight(1.0f),
+      _mixingFrequencyHz(8000),
+      _outputFileRecorderPtr(NULL),
+      _outputFileRecording(false) {
+  WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
+               "AudioMixer::AudioMixer() - ctor");
+
+  if (_mixerModule.RegisterMixedStreamCallback(this) == -1) {
+    WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
+                 "AudioMixer::AudioMixer() failed to register mixer"
+                 "callbacks");
+  }
+}
+
+void AudioMixer::Destroy(AudioMixer*& mixer) {
+  if (mixer) {
+    delete mixer;
+    mixer = NULL;
+  }
+}
+
+AudioMixer::~AudioMixer() {
+  WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
+               "AudioMixer::~AudioMixer() - dtor");
+  if (_externalMedia) {
+    DeRegisterExternalMediaProcessing();
+  }
+  {
+    rtc::CritScope cs(&_fileCritSect);
+    if (_outputFileRecorderPtr) {
+      _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
+      _outputFileRecorderPtr->StopRecording();
+      FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
+      _outputFileRecorderPtr = NULL;
+    }
+  }
+  _mixerModule.UnRegisterMixedStreamCallback();
+  delete &_mixerModule;
+}
+
+int32_t AudioMixer::SetEngineInformation(voe::Statistics& engineStatistics) {
+  WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+               "AudioMixer::SetEngineInformation()");
+  _engineStatisticsPtr = &engineStatistics;
+  return 0;
+}
+
+int32_t AudioMixer::SetAudioProcessingModule(
+    AudioProcessing* audioProcessingModule) {
+  WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+               "AudioMixer::SetAudioProcessingModule("
+               "audioProcessingModule=0x%x)",
+               audioProcessingModule);
+  _audioProcessingModulePtr = audioProcessingModule;
+  return 0;
+}
+
+int AudioMixer::RegisterExternalMediaProcessing(
+    VoEMediaProcess& proccess_object) {
+  WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+               "AudioMixer::RegisterExternalMediaProcessing()");
+
+  rtc::CritScope cs(&_callbackCritSect);
+  _externalMediaCallbackPtr = &proccess_object;
+  _externalMedia = true;
+
+  return 0;
+}
+
+int AudioMixer::DeRegisterExternalMediaProcessing() {
+  WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+               "AudioMixer::DeRegisterExternalMediaProcessing()");
+
+  rtc::CritScope cs(&_callbackCritSect);
+  _externalMedia = false;
+  _externalMediaCallbackPtr = NULL;
+
+  return 0;
+}
+
+int32_t AudioMixer::SetMixabilityStatus(MixerAudioSource& participant,
+                                        bool mixable) {
+  return _mixerModule.SetMixabilityStatus(&participant, mixable);
+}
+
+int32_t AudioMixer::SetAnonymousMixabilityStatus(MixerAudioSource& participant,
+                                                 bool mixable) {
+  return _mixerModule.SetAnonymousMixabilityStatus(&participant, mixable);
+}
+
+int32_t AudioMixer::MixActiveChannels() {
+  _mixerModule.Process();
+  return 0;
+}
+
+int AudioMixer::GetSpeechOutputLevel(uint32_t& level) {
+  int8_t currentLevel = _audioLevel.Level();
+  level = static_cast<uint32_t>(currentLevel);
+  WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+               "GetSpeechOutputLevel() => level=%u", level);
+  return 0;
+}
+
+int AudioMixer::GetSpeechOutputLevelFullRange(uint32_t& level) {
+  int16_t currentLevel = _audioLevel.LevelFullRange();
+  level = static_cast<uint32_t>(currentLevel);
+  WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+               "GetSpeechOutputLevelFullRange() => level=%u", level);
+  return 0;
+}
+
+int AudioMixer::SetOutputVolumePan(float left, float right) {
+  WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+               "AudioMixer::SetOutputVolumePan()");
+  _panLeft = left;
+  _panRight = right;
+  return 0;
+}
+
+int AudioMixer::GetOutputVolumePan(float& left, float& right) {
+  left = _panLeft;
+  right = _panRight;
+  WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+               "GetOutputVolumePan() => left=%2.1f, right=%2.1f", left, right);
+  return 0;
+}
+
+int AudioMixer::StartRecordingPlayout(const char* fileName,
+                                      const CodecInst* codecInst) {
+  WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+               "AudioMixer::StartRecordingPlayout(fileName=%s)", fileName);
+
+  if (_outputFileRecording) {
+    WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+                 "StartRecordingPlayout() is already recording");
+    return 0;
+  }
+
+  FileFormats format;
+  const uint32_t notificationTime(0);
+  CodecInst dummyCodec = {100, "L16", 16000, 320, 1, 320000};
+
+  if ((codecInst != NULL) &&
+      ((codecInst->channels < 1) || (codecInst->channels > 2))) {
+    _engineStatisticsPtr->SetLastError(
+        VE_BAD_ARGUMENT, kTraceError,
+        "StartRecordingPlayout() invalid compression");
+    return (-1);
+  }
+  if (codecInst == NULL) {
+    format = kFileFormatPcm16kHzFile;
+    codecInst = &dummyCodec;
+  } else if ((STR_CASE_CMP(codecInst->plname, "L16") == 0) ||
+             (STR_CASE_CMP(codecInst->plname, "PCMU") == 0) ||
+             (STR_CASE_CMP(codecInst->plname, "PCMA") == 0)) {
+    format = kFileFormatWavFile;
+  } else {
+    format = kFileFormatCompressedFile;
+  }
+
+  rtc::CritScope cs(&_fileCritSect);
+
+  // Destroy the old instance
+  if (_outputFileRecorderPtr) {
+    _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
+    FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
+    _outputFileRecorderPtr = NULL;
+  }
+
+  _outputFileRecorderPtr =
+      FileRecorder::CreateFileRecorder(_instanceId, (const FileFormats)format);
+  if (_outputFileRecorderPtr == NULL) {
+    _engineStatisticsPtr->SetLastError(
+        VE_INVALID_ARGUMENT, kTraceError,
+        "StartRecordingPlayout() fileRecorder format isnot correct");
+    return -1;
+  }
+
+  if (_outputFileRecorderPtr->StartRecordingAudioFile(
+          fileName, (const CodecInst&)*codecInst, notificationTime) != 0) {
+    _engineStatisticsPtr->SetLastError(
+        VE_BAD_FILE, kTraceError,
+        "StartRecordingAudioFile() failed to start file recording");
+    _outputFileRecorderPtr->StopRecording();
+    FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
+    _outputFileRecorderPtr = NULL;
+    return -1;
+  }
+  _outputFileRecorderPtr->RegisterModuleFileCallback(this);
+  _outputFileRecording = true;
+
+  return 0;
+}
+
+int AudioMixer::StartRecordingPlayout(OutStream* stream,
+                                      const CodecInst* codecInst) {
+  WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+               "AudioMixer::StartRecordingPlayout()");
+
+  if (_outputFileRecording) {
+    WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+                 "StartRecordingPlayout() is already recording");
+    return 0;
+  }
+
+  FileFormats format;
+  const uint32_t notificationTime(0);
+  CodecInst dummyCodec = {100, "L16", 16000, 320, 1, 320000};
+
+  if (codecInst != NULL && codecInst->channels != 1) {
+    _engineStatisticsPtr->SetLastError(
+        VE_BAD_ARGUMENT, kTraceError,
+        "StartRecordingPlayout() invalid compression");
+    return (-1);
+  }
+  if (codecInst == NULL) {
+    format = kFileFormatPcm16kHzFile;
+    codecInst = &dummyCodec;
+  } else if ((STR_CASE_CMP(codecInst->plname, "L16") == 0) ||
+             (STR_CASE_CMP(codecInst->plname, "PCMU") == 0) ||
+             (STR_CASE_CMP(codecInst->plname, "PCMA") == 0)) {
+    format = kFileFormatWavFile;
+  } else {
+    format = kFileFormatCompressedFile;
+  }
+
+  rtc::CritScope cs(&_fileCritSect);
+
+  // Destroy the old instance
+  if (_outputFileRecorderPtr) {
+    _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
+    FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
+    _outputFileRecorderPtr = NULL;
+  }
+
+  _outputFileRecorderPtr =
+      FileRecorder::CreateFileRecorder(_instanceId, (const FileFormats)format);
+  if (_outputFileRecorderPtr == NULL) {
+    _engineStatisticsPtr->SetLastError(
+        VE_INVALID_ARGUMENT, kTraceError,
+        "StartRecordingPlayout() fileRecorder format isnot correct");
+    return -1;
+  }
+
+  if (_outputFileRecorderPtr->StartRecordingAudioFile(*stream, *codecInst,
+                                                      notificationTime) != 0) {
+    _engineStatisticsPtr->SetLastError(
+        VE_BAD_FILE, kTraceError,
+        "StartRecordingAudioFile() failed to start file recording");
+    _outputFileRecorderPtr->StopRecording();
+    FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
+    _outputFileRecorderPtr = NULL;
+    return -1;
+  }
+
+  _outputFileRecorderPtr->RegisterModuleFileCallback(this);
+  _outputFileRecording = true;
+
+  return 0;
+}
+
+int AudioMixer::StopRecordingPlayout() {
+  WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+               "AudioMixer::StopRecordingPlayout()");
+
+  if (!_outputFileRecording) {
+    WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
+                 "StopRecordingPlayout() file isnot recording");
+    return -1;
+  }
+
+  rtc::CritScope cs(&_fileCritSect);
+
+  if (_outputFileRecorderPtr->StopRecording() != 0) {
+    _engineStatisticsPtr->SetLastError(
+        VE_STOP_RECORDING_FAILED, kTraceError,
+        "StopRecording(), could not stop recording");
+    return -1;
+  }
+  _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
+  FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
+  _outputFileRecorderPtr = NULL;
+  _outputFileRecording = false;
+
+  return 0;
+}
+
+int AudioMixer::GetMixedAudio(int sample_rate_hz,
+                              size_t num_channels,
+                              AudioFrame* frame) {
+  WEBRTC_TRACE(
+      kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
+      "AudioMixer::GetMixedAudio(sample_rate_hz=%d, num_channels=%" PRIuS ")",
+      sample_rate_hz, num_channels);
+
+  // --- Record playout if enabled
+  {
+    rtc::CritScope cs(&_fileCritSect);
+    if (_outputFileRecording && _outputFileRecorderPtr)
+      _outputFileRecorderPtr->RecordAudioToFile(_audioFrame);
+  }
+
+  frame->num_channels_ = num_channels;
+  frame->sample_rate_hz_ = sample_rate_hz;
+  // TODO(andrew): Ideally the downmixing would occur much earlier, in
+  // AudioCodingModule.
+  RemixAndResample(_audioFrame, &resampler_, frame);
+  return 0;
+}
+
+int32_t AudioMixer::DoOperationsOnCombinedSignal(bool feed_data_to_apm) {
+  if (_audioFrame.sample_rate_hz_ != _mixingFrequencyHz) {
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
+                 "AudioMixer::DoOperationsOnCombinedSignal() => "
+                 "mixing frequency = %d",
+                 _audioFrame.sample_rate_hz_);
+    _mixingFrequencyHz = _audioFrame.sample_rate_hz_;
+  }
+
+  // Scale left and/or right channel(s) if balance is active
+  if (_panLeft != 1.0 || _panRight != 1.0) {
+    if (_audioFrame.num_channels_ == 1) {
+      AudioFrameOperations::MonoToStereo(&_audioFrame);
+    } else {
+      // Pure stereo mode (we are receiving a stereo signal).
+    }
+
+    assert(_audioFrame.num_channels_ == 2);
+    AudioFrameOperations::Scale(_panLeft, _panRight, _audioFrame);
+  }
+
+  // --- Far-end Voice Quality Enhancement (AudioProcessing Module)
+  if (feed_data_to_apm) {
+    if (_audioProcessingModulePtr->ProcessReverseStream(&_audioFrame) != 0) {
+      WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+                   "AudioProcessingModule::ProcessReverseStream() => error");
+      RTC_DCHECK(false);
+    }
+  }
+
+  // --- External media processing
+  {
+    rtc::CritScope cs(&_callbackCritSect);
+    if (_externalMedia) {
+      const bool is_stereo = (_audioFrame.num_channels_ == 2);
+      if (_externalMediaCallbackPtr) {
+        _externalMediaCallbackPtr->Process(
+            -1, kPlaybackAllChannelsMixed,
+            reinterpret_cast<int16_t*>(_audioFrame.data_),
+            _audioFrame.samples_per_channel_, _audioFrame.sample_rate_hz_,
+            is_stereo);
+      }
+    }
+  }
+
+  // --- Measure audio level (0-9) for the combined signal
+  _audioLevel.ComputeLevel(_audioFrame);
+
+  return 0;
+}
+}  // namespace voe
+}  // namespace webrtc
diff --git a/webrtc/modules/audio_mixer/audio_mixer.gypi b/webrtc/modules/audio_mixer/audio_mixer.gypi
new file mode 100644
index 0000000..055d7bd
--- /dev/null
+++ b/webrtc/modules/audio_mixer/audio_mixer.gypi
@@ -0,0 +1,40 @@
+# Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+{
+  'targets': [
+    {
+      'target_name': 'new_audio_conference_mixer',
+      'type': 'static_library',
+      'dependencies': [
+        'audio_processing',
+        'webrtc_utility',
+        '<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
+      ],
+      'sources': [
+        'include/new_audio_conference_mixer.h',
+        'include/audio_mixer_defines.h',
+        'source/new_audio_conference_mixer_impl.cc',
+        'source/new_audio_conference_mixer_impl.h',
+      ],
+    },
+    {
+      'target_name': 'audio_mixer',
+      'type': 'static_library',
+      'dependencies': [
+        'new_audio_conference_mixer',
+        'webrtc_utility',
+        '<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
+      ],
+      'sources': [
+        'audio_mixer.h',
+        'audio_mixer.cc',
+      ],
+    },
+  ], # targets
+}
diff --git a/webrtc/modules/audio_mixer/audio_mixer.h b/webrtc/modules/audio_mixer/audio_mixer.h
new file mode 100644
index 0000000..ddcebe5
--- /dev/null
+++ b/webrtc/modules/audio_mixer/audio_mixer.h
@@ -0,0 +1,127 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_MIXER_AUDIO_MIXER_H_
+#define WEBRTC_MODULES_AUDIO_MIXER_AUDIO_MIXER_H_
+
+#include "webrtc/base/criticalsection.h"
+#include "webrtc/common_audio/resampler/include/push_resampler.h"
+#include "webrtc/common_types.h"
+#include "webrtc/modules/audio_mixer/include/new_audio_conference_mixer.h"
+#include "webrtc/modules/audio_mixer/include/audio_mixer_defines.h"
+#include "webrtc/modules/utility/include/file_recorder.h"
+#include "webrtc/voice_engine/level_indicator.h"
+#include "webrtc/voice_engine/voice_engine_defines.h"
+
+namespace webrtc {
+
+class AudioProcessing;
+class FileWrapper;
+class VoEMediaProcess;
+
+namespace voe {
+class Statistics;
+
+// Note: this class is in the process of being rewritten and merged
+// with AudioConferenceMixer. Expect inheritance chains to be changed,
+// member functions removed or renamed.
+class AudioMixer : public OldAudioMixerOutputReceiver, public FileCallback {
+ public:
+  static int32_t Create(AudioMixer*& mixer, uint32_t instanceId);  // NOLINT
+
+  static void Destroy(AudioMixer*& mixer);  // NOLINT
+
+  int32_t SetEngineInformation(Statistics& engineStatistics);  // NOLINT
+
+  int32_t SetAudioProcessingModule(AudioProcessing* audioProcessingModule);
+
+  // VoEExternalMedia
+  int RegisterExternalMediaProcessing(VoEMediaProcess&  // NOLINT
+                                      proccess_object);
+
+  int DeRegisterExternalMediaProcessing();
+
+  int32_t MixActiveChannels();
+
+  int32_t DoOperationsOnCombinedSignal(bool feed_data_to_apm);
+
+  int32_t SetMixabilityStatus(MixerAudioSource& participant,  // NOLINT
+                              bool mixable);
+
+  int32_t SetAnonymousMixabilityStatus(MixerAudioSource& participant,  // NOLINT
+                                       bool mixable);
+
+  int GetMixedAudio(int sample_rate_hz,
+                    size_t num_channels,
+                    AudioFrame* audioFrame);
+
+  // VoEVolumeControl
+  int GetSpeechOutputLevel(uint32_t& level);  // NOLINT
+
+  int GetSpeechOutputLevelFullRange(uint32_t& level);  // NOLINT
+
+  int SetOutputVolumePan(float left, float right);
+
+  int GetOutputVolumePan(float& left, float& right);  // NOLINT
+
+  // VoEFile
+  int StartRecordingPlayout(const char* fileName, const CodecInst* codecInst);
+
+  int StartRecordingPlayout(OutStream* stream, const CodecInst* codecInst);
+  int StopRecordingPlayout();
+
+  virtual ~AudioMixer();
+
+  // from AudioMixerOutputReceiver
+  virtual void NewMixedAudio(int32_t id,
+                             const AudioFrame& generalAudioFrame,
+                             const AudioFrame** uniqueAudioFrames,
+                             uint32_t size);
+
+  // For file recording
+  void PlayNotification(int32_t id, uint32_t durationMs);
+
+  void RecordNotification(int32_t id, uint32_t durationMs);
+
+  void PlayFileEnded(int32_t id);
+  void RecordFileEnded(int32_t id);
+
+ private:
+  explicit AudioMixer(uint32_t instanceId);
+
+  // uses
+  Statistics* _engineStatisticsPtr;
+  AudioProcessing* _audioProcessingModulePtr;
+
+  rtc::CriticalSection _callbackCritSect;
+  // protect the _outputFileRecorderPtr and _outputFileRecording
+  rtc::CriticalSection _fileCritSect;
+  NewAudioConferenceMixer& _mixerModule;
+  AudioFrame _audioFrame;
+  // Converts mixed audio to the audio device output rate.
+  PushResampler<int16_t> resampler_;
+  // Converts mixed audio to the audio processing rate.
+  PushResampler<int16_t> audioproc_resampler_;
+  AudioLevel _audioLevel;  // measures audio level for the combined signal
+  int _instanceId;
+  VoEMediaProcess* _externalMediaCallbackPtr;
+  bool _externalMedia;
+  float _panLeft;
+  float _panRight;
+  int _mixingFrequencyHz;
+  FileRecorder* _outputFileRecorderPtr;
+  bool _outputFileRecording;
+};
+
+}  // namespace voe
+
+}  // namespace webrtc
+
+#endif  // WEBRTC_MODULES_AUDIO_MIXER_AUDIO_MIXER_H_
diff --git a/webrtc/modules/audio_mixer/include/audio_mixer_defines.h b/webrtc/modules/audio_mixer/include/audio_mixer_defines.h
new file mode 100644
index 0000000..3aa5c6b
--- /dev/null
+++ b/webrtc/modules/audio_mixer/include/audio_mixer_defines.h
@@ -0,0 +1,84 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_MIXER_INCLUDE_AUDIO_MIXER_DEFINES_H_
+#define WEBRTC_MODULES_AUDIO_MIXER_INCLUDE_AUDIO_MIXER_DEFINES_H_
+
+#include "webrtc/base/checks.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+class NewMixHistory;
+
+// A callback class that all mixer participants must inherit from/implement.
+class MixerAudioSource {
+ public:
+  // The implementation of this function should update audioFrame with new
+  // audio every time it's called.
+  //
+  // If it returns -1, the frame will not be added to the mix.
+  //
+  // NOTE: This function should not be called. It will remain for a short
+  // time so that subclasses can override it without getting warnings.
+  // TODO(henrik.lundin) Remove this function.
+  virtual int32_t GetAudioFrame(int32_t id, AudioFrame* audioFrame) {
+    RTC_CHECK(false);
+    return -1;
+  }
+
+  // The implementation of GetAudioFrameWithMuted should update audio_frame
+  // with new audio every time it's called. The return value will be
+  // interpreted as follows.
+  enum class AudioFrameInfo {
+    kNormal,  // The samples in audio_frame are valid and should be used.
+    kMuted,   // The samples in audio_frame should not be used, but should be
+              // implicitly interpreted as zero. Other fields in audio_frame
+              // may be read and should contain meaningful values.
+    kError    // audio_frame will not be used.
+  };
+
+  virtual AudioFrameInfo GetAudioFrameWithMuted(int32_t id,
+                                                AudioFrame* audio_frame) {
+    return GetAudioFrame(id, audio_frame) == -1 ? AudioFrameInfo::kError
+                                                : AudioFrameInfo::kNormal;
+  }
+
+  // Returns true if the participant was mixed this mix iteration.
+  bool IsMixed() const;
+
+  // This function specifies the sampling frequency needed for the AudioFrame
+  // for future GetAudioFrame(..) calls.
+  virtual int32_t NeededFrequency(int32_t id) const = 0;
+
+  NewMixHistory* _mixHistory;
+
+ protected:
+  MixerAudioSource();
+  virtual ~MixerAudioSource();
+};
+
+class OldAudioMixerOutputReceiver {
+ public:
+  // This callback function provides the mixed audio for this mix iteration.
+  // Note that uniqueAudioFrames is an array of AudioFrame pointers with the
+  // size according to the size parameter.
+  virtual void NewMixedAudio(const int32_t id,
+                             const AudioFrame& generalAudioFrame,
+                             const AudioFrame** uniqueAudioFrames,
+                             const uint32_t size) = 0;
+
+ protected:
+  OldAudioMixerOutputReceiver() {}
+  virtual ~OldAudioMixerOutputReceiver() {}
+};
+}  // namespace webrtc
+
+#endif  // WEBRTC_MODULES_AUDIO_MIXER_INCLUDE_AUDIO_MIXER_DEFINES_H_
diff --git a/webrtc/modules/audio_mixer/include/new_audio_conference_mixer.h b/webrtc/modules/audio_mixer/include/new_audio_conference_mixer.h
new file mode 100644
index 0000000..f691640
--- /dev/null
+++ b/webrtc/modules/audio_mixer/include/new_audio_conference_mixer.h
@@ -0,0 +1,74 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_MIXER_INCLUDE_NEW_AUDIO_CONFERENCE_MIXER_H_
+#define WEBRTC_MODULES_AUDIO_MIXER_INCLUDE_NEW_AUDIO_CONFERENCE_MIXER_H_
+
+#include "webrtc/modules/audio_mixer/include/audio_mixer_defines.h"
+#include "webrtc/modules/include/module.h"
+#include "webrtc/modules/include/module_common_types.h"
+
+namespace webrtc {
+class OldAudioMixerOutputReceiver;
+class MixerAudioSource;
+class Trace;
+
+class NewAudioConferenceMixer : public Module {
+ public:
+  enum { kMaximumAmountOfMixedParticipants = 3 };
+  enum Frequency {
+    kNbInHz = 8000,
+    kWbInHz = 16000,
+    kSwbInHz = 32000,
+    kFbInHz = 48000,
+    kLowestPossible = -1,
+    kDefaultFrequency = kWbInHz
+  };
+
+  // Factory method. Constructor disabled.
+  static NewAudioConferenceMixer* Create(int id);
+  virtual ~NewAudioConferenceMixer() {}
+
+  // Module functions
+  int64_t TimeUntilNextProcess() override = 0;
+  void Process() override = 0;
+
+  // Register/unregister a callback class for receiving the mixed audio.
+  virtual int32_t RegisterMixedStreamCallback(
+      OldAudioMixerOutputReceiver* receiver) = 0;
+  virtual int32_t UnRegisterMixedStreamCallback() = 0;
+
+  // Add/remove participants as candidates for mixing.
+  virtual int32_t SetMixabilityStatus(MixerAudioSource* participant,
+                                      bool mixable) = 0;
+  // Returns true if a participant is a candidate for mixing.
+  virtual bool MixabilityStatus(const MixerAudioSource& participant) const = 0;
+
+  // Inform the mixer that the participant should always be mixed and not
+  // count toward the number of mixed participants. Note that a participant
+  // must have been added to the mixer (by calling SetMixabilityStatus())
+  // before this function can be successfully called.
+  virtual int32_t SetAnonymousMixabilityStatus(MixerAudioSource* participant,
+                                               bool mixable) = 0;
+  // Returns true if the participant is mixed anonymously.
+  virtual bool AnonymousMixabilityStatus(
+      const MixerAudioSource& participant) const = 0;
+
+  // Set the minimum sampling frequency at which to mix. The mixing algorithm
+  // may still choose to mix at a higher samling frequency to avoid
+  // downsampling of audio contributing to the mixed audio.
+  virtual int32_t SetMinimumMixingFrequency(Frequency freq) = 0;
+
+ protected:
+  NewAudioConferenceMixer() {}
+};
+}  // namespace webrtc
+
+#endif  // WEBRTC_MODULES_AUDIO_MIXER_INCLUDE_NEW_AUDIO_CONFERENCE_MIXER_H_
diff --git a/webrtc/modules/audio_mixer/source/OWNERS b/webrtc/modules/audio_mixer/source/OWNERS
new file mode 100644
index 0000000..3ee6b4b
--- /dev/null
+++ b/webrtc/modules/audio_mixer/source/OWNERS
@@ -0,0 +1,5 @@
+
+# These are for the common case of adding or renaming files. If you're doing
+# structural changes, please get a review from a reviewer in this file.
+per-file *.gyp=*
+per-file *.gypi=*
diff --git a/webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.cc b/webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.cc
new file mode 100644
index 0000000..36d70b2
--- /dev/null
+++ b/webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.cc
@@ -0,0 +1,898 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.h"
+
+#include <algorithm>
+
+#include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h"
+#include "webrtc/modules/audio_mixer/include/audio_mixer_defines.h"
+#include "webrtc/modules/audio_processing/include/audio_processing.h"
+#include "webrtc/modules/utility/include/audio_frame_operations.h"
+#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
+#include "webrtc/system_wrappers/include/trace.h"
+
+namespace webrtc {
+namespace {
+
+struct ParticipantFrameStruct {
+  ParticipantFrameStruct(MixerAudioSource* p, AudioFrame* a, bool m)
+      : participant(p), audioFrame(a), muted(m) {}
+  MixerAudioSource* participant;
+  AudioFrame* audioFrame;
+  bool muted;
+};
+
+typedef std::list<ParticipantFrameStruct*> ParticipantFrameStructList;
+
+// Mix |frame| into |mixed_frame|, with saturation protection and upmixing.
+// These effects are applied to |frame| itself prior to mixing. Assumes that
+// |mixed_frame| always has at least as many channels as |frame|. Supports
+// stereo at most.
+//
+// TODO(andrew): consider not modifying |frame| here.
+void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) {
+  assert(mixed_frame->num_channels_ >= frame->num_channels_);
+  if (use_limiter) {
+    // Divide by two to avoid saturation in the mixing.
+    // This is only meaningful if the limiter will be used.
+    *frame >>= 1;
+  }
+  if (mixed_frame->num_channels_ > frame->num_channels_) {
+    // We only support mono-to-stereo.
+    assert(mixed_frame->num_channels_ == 2 && frame->num_channels_ == 1);
+    AudioFrameOperations::MonoToStereo(frame);
+  }
+
+  *mixed_frame += *frame;
+}
+
+// Return the max number of channels from a |list| composed of AudioFrames.
+size_t MaxNumChannels(const AudioFrameList* list) {
+  size_t max_num_channels = 1;
+  for (AudioFrameList::const_iterator iter = list->begin(); iter != list->end();
+       ++iter) {
+    max_num_channels = std::max(max_num_channels, (*iter).frame->num_channels_);
+  }
+  return max_num_channels;
+}
+
+}  // namespace
+
+MixerAudioSource::MixerAudioSource() : _mixHistory(new NewMixHistory()) {}
+
+MixerAudioSource::~MixerAudioSource() {
+  delete _mixHistory;
+}
+
+bool MixerAudioSource::IsMixed() const {
+  return _mixHistory->IsMixed();
+}
+
+NewMixHistory::NewMixHistory() : _isMixed(0) {}
+
+NewMixHistory::~NewMixHistory() {}
+
+bool NewMixHistory::IsMixed() const {
+  return _isMixed;
+}
+
+bool NewMixHistory::WasMixed() const {
+  // Was mixed is the same as is mixed depending on perspective. This function
+  // is for the perspective of NewAudioConferenceMixerImpl.
+  return IsMixed();
+}
+
+int32_t NewMixHistory::SetIsMixed(const bool mixed) {
+  _isMixed = mixed;
+  return 0;
+}
+
+void NewMixHistory::ResetMixedStatus() {
+  _isMixed = false;
+}
+
+NewAudioConferenceMixer* NewAudioConferenceMixer::Create(int id) {
+  NewAudioConferenceMixerImpl* mixer = new NewAudioConferenceMixerImpl(id);
+  if (!mixer->Init()) {
+    delete mixer;
+    return NULL;
+  }
+  return mixer;
+}
+
+NewAudioConferenceMixerImpl::NewAudioConferenceMixerImpl(int id)
+    : _id(id),
+      _minimumMixingFreq(kLowestPossible),
+      _mixReceiver(NULL),
+      _outputFrequency(kDefaultFrequency),
+      _sampleSize(0),
+      _audioFramePool(NULL),
+      _participantList(),
+      _additionalParticipantList(),
+      _numMixedParticipants(0),
+      use_limiter_(true),
+      _timeStamp(0),
+      _timeScheduler(kProcessPeriodicityInMs),
+      _processCalls(0) {}
+
+bool NewAudioConferenceMixerImpl::Init() {
+  _crit.reset(CriticalSectionWrapper::CreateCriticalSection());
+  if (_crit.get() == NULL)
+    return false;
+
+  _cbCrit.reset(CriticalSectionWrapper::CreateCriticalSection());
+  if (_cbCrit.get() == NULL)
+    return false;
+
+  Config config;
+  config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
+  _limiter.reset(AudioProcessing::Create(config));
+  if (!_limiter.get())
+    return false;
+
+  MemoryPool<AudioFrame>::CreateMemoryPool(_audioFramePool,
+                                           DEFAULT_AUDIO_FRAME_POOLSIZE);
+  if (_audioFramePool == NULL)
+    return false;
+
+  if (SetOutputFrequency(kDefaultFrequency) == -1)
+    return false;
+
+  if (_limiter->gain_control()->set_mode(GainControl::kFixedDigital) !=
+      _limiter->kNoError)
+    return false;
+
+  // We smoothly limit the mixed frame to -7 dbFS. -6 would correspond to the
+  // divide-by-2 but -7 is used instead to give a bit of headroom since the
+  // AGC is not a hard limiter.
+  if (_limiter->gain_control()->set_target_level_dbfs(7) != _limiter->kNoError)
+    return false;
+
+  if (_limiter->gain_control()->set_compression_gain_db(0) !=
+      _limiter->kNoError)
+    return false;
+
+  if (_limiter->gain_control()->enable_limiter(true) != _limiter->kNoError)
+    return false;
+
+  if (_limiter->gain_control()->Enable(true) != _limiter->kNoError)
+    return false;
+
+  return true;
+}
+
+NewAudioConferenceMixerImpl::~NewAudioConferenceMixerImpl() {
+  MemoryPool<AudioFrame>::DeleteMemoryPool(_audioFramePool);
+  assert(_audioFramePool == NULL);
+}
+
+// Process should be called every kProcessPeriodicityInMs ms
+int64_t NewAudioConferenceMixerImpl::TimeUntilNextProcess() {
+  int64_t timeUntilNextProcess = 0;
+  CriticalSectionScoped cs(_crit.get());
+  if (_timeScheduler.TimeToNextUpdate(timeUntilNextProcess) != 0) {
+    WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
+                 "failed in TimeToNextUpdate() call");
+    // Sanity check
+    assert(false);
+    return -1;
+  }
+  return timeUntilNextProcess;
+}
+
+void NewAudioConferenceMixerImpl::Process() {
+  size_t remainingParticipantsAllowedToMix = kMaximumAmountOfMixedParticipants;
+  {
+    CriticalSectionScoped cs(_crit.get());
+    assert(_processCalls == 0);
+    _processCalls++;
+
+    // Let the scheduler know that we are running one iteration.
+    _timeScheduler.UpdateScheduler();
+  }
+
+  AudioFrameList mixList;
+  AudioFrameList rampOutList;
+  AudioFrameList additionalFramesList;
+  std::map<int, MixerAudioSource*> mixedParticipantsMap;
+  {
+    CriticalSectionScoped cs(_cbCrit.get());
+
+    int32_t lowFreq = GetLowestMixingFrequency();
+    // SILK can run in 12 kHz and 24 kHz. These frequencies are not
+    // supported so use the closest higher frequency to not lose any
+    // information.
+    // TODO(henrike): this is probably more appropriate to do in
+    //                GetLowestMixingFrequency().
+    if (lowFreq == 12000) {
+      lowFreq = 16000;
+    } else if (lowFreq == 24000) {
+      lowFreq = 32000;
+    }
+    if (lowFreq <= 0) {
+      CriticalSectionScoped cs(_crit.get());
+      _processCalls--;
+      return;
+    } else {
+      switch (lowFreq) {
+        case 8000:
+          if (OutputFrequency() != kNbInHz) {
+            SetOutputFrequency(kNbInHz);
+          }
+          break;
+        case 16000:
+          if (OutputFrequency() != kWbInHz) {
+            SetOutputFrequency(kWbInHz);
+          }
+          break;
+        case 32000:
+          if (OutputFrequency() != kSwbInHz) {
+            SetOutputFrequency(kSwbInHz);
+          }
+          break;
+        case 48000:
+          if (OutputFrequency() != kFbInHz) {
+            SetOutputFrequency(kFbInHz);
+          }
+          break;
+        default:
+          assert(false);
+
+          CriticalSectionScoped cs(_crit.get());
+          _processCalls--;
+          return;
+      }
+    }
+
+    UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap,
+                &remainingParticipantsAllowedToMix);
+
+    GetAdditionalAudio(&additionalFramesList);
+    UpdateMixedStatus(mixedParticipantsMap);
+  }
+
+  // Get an AudioFrame for mixing from the memory pool.
+  AudioFrame* mixedAudio = NULL;
+  if (_audioFramePool->PopMemory(mixedAudio) == -1) {
+    WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id,
+                 "failed PopMemory() call");
+    assert(false);
+    return;
+  }
+
+  {
+    CriticalSectionScoped cs(_crit.get());
+
+    // TODO(henrike): it might be better to decide the number of channels
+    //                with an API instead of dynamically.
+
+    // Find the max channels over all mixing lists.
+    const size_t num_mixed_channels =
+        std::max(MaxNumChannels(&mixList),
+                 std::max(MaxNumChannels(&additionalFramesList),
+                          MaxNumChannels(&rampOutList)));
+
+    mixedAudio->UpdateFrame(-1, _timeStamp, NULL, 0, _outputFrequency,
+                            AudioFrame::kNormalSpeech, AudioFrame::kVadPassive,
+                            num_mixed_channels);
+
+    _timeStamp += static_cast<uint32_t>(_sampleSize);
+
+    // We only use the limiter if it supports the output sample rate and
+    // we're actually mixing multiple streams.
+    use_limiter_ = _numMixedParticipants > 1 &&
+                   _outputFrequency <= AudioProcessing::kMaxNativeSampleRateHz;
+
+    MixFromList(mixedAudio, mixList);
+    MixAnonomouslyFromList(mixedAudio, additionalFramesList);
+    MixAnonomouslyFromList(mixedAudio, rampOutList);
+
+    if (mixedAudio->samples_per_channel_ == 0) {
+      // Nothing was mixed, set the audio samples to silence.
+      mixedAudio->samples_per_channel_ = _sampleSize;
+      mixedAudio->Mute();
+    } else {
+      // Only call the limiter if we have something to mix.
+      LimitMixedAudio(mixedAudio);
+    }
+  }
+
+  {
+    CriticalSectionScoped cs(_cbCrit.get());
+    if (_mixReceiver != NULL) {
+      const AudioFrame** dummy = NULL;
+      _mixReceiver->NewMixedAudio(_id, *mixedAudio, dummy, 0);
+    }
+  }
+
+  // Reclaim all outstanding memory.
+  _audioFramePool->PushMemory(mixedAudio);
+  ClearAudioFrameList(&mixList);
+  ClearAudioFrameList(&rampOutList);
+  ClearAudioFrameList(&additionalFramesList);
+  {
+    CriticalSectionScoped cs(_crit.get());
+    _processCalls--;
+  }
+  return;
+}
+
+int32_t NewAudioConferenceMixerImpl::RegisterMixedStreamCallback(
+    OldAudioMixerOutputReceiver* mixReceiver) {
+  CriticalSectionScoped cs(_cbCrit.get());
+  if (_mixReceiver != NULL) {
+    return -1;
+  }
+  _mixReceiver = mixReceiver;
+  return 0;
+}
+
+int32_t NewAudioConferenceMixerImpl::UnRegisterMixedStreamCallback() {
+  CriticalSectionScoped cs(_cbCrit.get());
+  if (_mixReceiver == NULL) {
+    return -1;
+  }
+  _mixReceiver = NULL;
+  return 0;
+}
+
+int32_t NewAudioConferenceMixerImpl::SetOutputFrequency(
+    const Frequency& frequency) {
+  CriticalSectionScoped cs(_crit.get());
+
+  _outputFrequency = frequency;
+  _sampleSize =
+      static_cast<size_t>((_outputFrequency * kProcessPeriodicityInMs) / 1000);
+
+  return 0;
+}
+
+NewAudioConferenceMixer::Frequency
+NewAudioConferenceMixerImpl::OutputFrequency() const {
+  CriticalSectionScoped cs(_crit.get());
+  return _outputFrequency;
+}
+
+int32_t NewAudioConferenceMixerImpl::SetMixabilityStatus(
+    MixerAudioSource* participant,
+    bool mixable) {
+  if (!mixable) {
+    // Anonymous participants are in a separate list. Make sure that the
+    // participant is in the _participantList if it is being mixed.
+    SetAnonymousMixabilityStatus(participant, false);
+  }
+  size_t numMixedParticipants;
+  {
+    CriticalSectionScoped cs(_cbCrit.get());
+    const bool isMixed = IsParticipantInList(*participant, _participantList);
+    // API must be called with a new state.
+    if (!(mixable ^ isMixed)) {
+      WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
+                   "Mixable is aready %s", isMixed ? "ON" : "off");
+      return -1;
+    }
+    bool success = false;
+    if (mixable) {
+      success = AddParticipantToList(participant, &_participantList);
+    } else {
+      success = RemoveParticipantFromList(participant, &_participantList);
+    }
+    if (!success) {
+      WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
+                   "failed to %s participant", mixable ? "add" : "remove");
+      assert(false);
+      return -1;
+    }
+
+    size_t numMixedNonAnonymous = _participantList.size();
+    if (numMixedNonAnonymous > kMaximumAmountOfMixedParticipants) {
+      numMixedNonAnonymous = kMaximumAmountOfMixedParticipants;
+    }
+    numMixedParticipants =
+        numMixedNonAnonymous + _additionalParticipantList.size();
+  }
+  // A MixerAudioSource was added or removed. Make sure the scratch
+  // buffer is updated if necessary.
+  // Note: The scratch buffer may only be updated in Process().
+  CriticalSectionScoped cs(_crit.get());
+  _numMixedParticipants = numMixedParticipants;
+  return 0;
+}
+
+bool NewAudioConferenceMixerImpl::MixabilityStatus(
+    const MixerAudioSource& participant) const {
+  CriticalSectionScoped cs(_cbCrit.get());
+  return IsParticipantInList(participant, _participantList);
+}
+
+int32_t NewAudioConferenceMixerImpl::SetAnonymousMixabilityStatus(
+    MixerAudioSource* participant,
+    bool anonymous) {
+  CriticalSectionScoped cs(_cbCrit.get());
+  if (IsParticipantInList(*participant, _additionalParticipantList)) {
+    if (anonymous) {
+      return 0;
+    }
+    if (!RemoveParticipantFromList(participant, &_additionalParticipantList)) {
+      WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
+                   "unable to remove participant from anonymous list");
+      assert(false);
+      return -1;
+    }
+    return AddParticipantToList(participant, &_participantList) ? 0 : -1;
+  }
+  if (!anonymous) {
+    return 0;
+  }
+  const bool mixable =
+      RemoveParticipantFromList(participant, &_participantList);
+  if (!mixable) {
+    WEBRTC_TRACE(
+        kTraceWarning, kTraceAudioMixerServer, _id,
+        "participant must be registered before turning it into anonymous");
+    // Setting anonymous status is only possible if MixerAudioSource is
+    // already registered.
+    return -1;
+  }
+  return AddParticipantToList(participant, &_additionalParticipantList) ? 0
+                                                                        : -1;
+}
+
+bool NewAudioConferenceMixerImpl::AnonymousMixabilityStatus(
+    const MixerAudioSource& participant) const {
+  CriticalSectionScoped cs(_cbCrit.get());
+  return IsParticipantInList(participant, _additionalParticipantList);
+}
+
+int32_t NewAudioConferenceMixerImpl::SetMinimumMixingFrequency(Frequency freq) {
+  // Make sure that only allowed sampling frequencies are used. Use closest
+  // higher sampling frequency to avoid losing information.
+  if (static_cast<int>(freq) == 12000) {
+    freq = kWbInHz;
+  } else if (static_cast<int>(freq) == 24000) {
+    freq = kSwbInHz;
+  }
+
+  if ((freq == kNbInHz) || (freq == kWbInHz) || (freq == kSwbInHz) ||
+      (freq == kLowestPossible)) {
+    _minimumMixingFreq = freq;
+    return 0;
+  } else {
+    WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
+                 "SetMinimumMixingFrequency incorrect frequency: %i", freq);
+    assert(false);
+    return -1;
+  }
+}
+
+// Check all AudioFrames that are to be mixed. The highest sampling frequency
+// found is the lowest that can be used without losing information.
+int32_t NewAudioConferenceMixerImpl::GetLowestMixingFrequency() const {
+  const int participantListFrequency =
+      GetLowestMixingFrequencyFromList(_participantList);
+  const int anonymousListFrequency =
+      GetLowestMixingFrequencyFromList(_additionalParticipantList);
+  const int highestFreq = (participantListFrequency > anonymousListFrequency)
+                              ? participantListFrequency
+                              : anonymousListFrequency;
+  // Check if the user specified a lowest mixing frequency.
+  if (_minimumMixingFreq != kLowestPossible) {
+    if (_minimumMixingFreq > highestFreq) {
+      return _minimumMixingFreq;
+    }
+  }
+  return highestFreq;
+}
+
+int32_t NewAudioConferenceMixerImpl::GetLowestMixingFrequencyFromList(
+    const MixerAudioSourceList& mixList) const {
+  int32_t highestFreq = 8000;
+  for (MixerAudioSourceList::const_iterator iter = mixList.begin();
+       iter != mixList.end(); ++iter) {
+    const int32_t neededFrequency = (*iter)->NeededFrequency(_id);
+    if (neededFrequency > highestFreq) {
+      highestFreq = neededFrequency;
+    }
+  }
+  return highestFreq;
+}
+
+void NewAudioConferenceMixerImpl::UpdateToMix(
+    AudioFrameList* mixList,
+    AudioFrameList* rampOutList,
+    std::map<int, MixerAudioSource*>* mixParticipantList,
+    size_t* maxAudioFrameCounter) const {
+  WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
+               "UpdateToMix(mixList,rampOutList,mixParticipantList,%d)",
+               *maxAudioFrameCounter);
+  const size_t mixListStartSize = mixList->size();
+  AudioFrameList activeList;
+  // Struct needed by the passive lists to keep track of which AudioFrame
+  // belongs to which MixerAudioSource.
+  ParticipantFrameStructList passiveWasNotMixedList;
+  ParticipantFrameStructList passiveWasMixedList;
+  for (MixerAudioSourceList::const_iterator participant =
+           _participantList.begin();
+       participant != _participantList.end(); ++participant) {
+    // Stop keeping track of passive participants if there are already
+    // enough participants available (they wont be mixed anyway).
+    bool mustAddToPassiveList =
+        (*maxAudioFrameCounter >
+         (activeList.size() + passiveWasMixedList.size() +
+          passiveWasNotMixedList.size()));
+
+    bool wasMixed = false;
+    wasMixed = (*participant)->_mixHistory->WasMixed();
+    AudioFrame* audioFrame = NULL;
+    if (_audioFramePool->PopMemory(audioFrame) == -1) {
+      WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id,
+                   "failed PopMemory() call");
+      assert(false);
+      return;
+    }
+    audioFrame->sample_rate_hz_ = _outputFrequency;
+
+    auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame);
+    if (ret == MixerAudioSource::AudioFrameInfo::kError) {
+      WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
+                   "failed to GetAudioFrameWithMuted() from participant");
+      _audioFramePool->PushMemory(audioFrame);
+      continue;
+    }
+    const bool muted = (ret == MixerAudioSource::AudioFrameInfo::kMuted);
+    if (_participantList.size() != 1) {
+      // TODO(wu): Issue 3390, add support for multiple participants case.
+      audioFrame->ntp_time_ms_ = -1;
+    }
+
+    // TODO(henrike): this assert triggers in some test cases where SRTP is
+    // used which prevents NetEQ from making a VAD. Temporarily disable this
+    // assert until the problem is fixed on a higher level.
+    // assert(audioFrame->vad_activity_ != AudioFrame::kVadUnknown);
+    if (audioFrame->vad_activity_ == AudioFrame::kVadUnknown) {
+      WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
+                   "invalid VAD state from participant");
+    }
+
+    if (audioFrame->vad_activity_ == AudioFrame::kVadActive) {
+      if (!wasMixed && !muted) {
+        RampIn(*audioFrame);
+      }
+
+      if (activeList.size() >= *maxAudioFrameCounter) {
+        // There are already more active participants than should be
+        // mixed. Only keep the ones with the highest energy.
+        AudioFrameList::iterator replaceItem;
+        uint32_t lowestEnergy = muted ? 0 : CalculateEnergy(*audioFrame);
+
+        bool found_replace_item = false;
+        for (AudioFrameList::iterator iter = activeList.begin();
+             iter != activeList.end(); ++iter) {
+          const uint32_t energy = muted ? 0 : CalculateEnergy(*iter->frame);
+          if (energy < lowestEnergy) {
+            replaceItem = iter;
+            lowestEnergy = energy;
+            found_replace_item = true;
+          }
+        }
+        if (found_replace_item) {
+          RTC_DCHECK(!muted);  // Cannot replace with a muted frame.
+          FrameAndMuteInfo replaceFrame = *replaceItem;
+
+          bool replaceWasMixed = false;
+          std::map<int, MixerAudioSource*>::const_iterator it =
+              mixParticipantList->find(replaceFrame.frame->id_);
+
+          // When a frame is pushed to |activeList| it is also pushed
+          // to mixParticipantList with the frame's id. This means
+          // that the Find call above should never fail.
+          assert(it != mixParticipantList->end());
+          replaceWasMixed = it->second->_mixHistory->WasMixed();
+
+          mixParticipantList->erase(replaceFrame.frame->id_);
+          activeList.erase(replaceItem);
+
+          activeList.push_front(FrameAndMuteInfo(audioFrame, muted));
+          (*mixParticipantList)[audioFrame->id_] = *participant;
+          assert(mixParticipantList->size() <=
+                 kMaximumAmountOfMixedParticipants);
+
+          if (replaceWasMixed) {
+            if (!replaceFrame.muted) {
+              RampOut(*replaceFrame.frame);
+            }
+            rampOutList->push_back(replaceFrame);
+            assert(rampOutList->size() <= kMaximumAmountOfMixedParticipants);
+          } else {
+            _audioFramePool->PushMemory(replaceFrame.frame);
+          }
+        } else {
+          if (wasMixed) {
+            if (!muted) {
+              RampOut(*audioFrame);
+            }
+            rampOutList->push_back(FrameAndMuteInfo(audioFrame, muted));
+            assert(rampOutList->size() <= kMaximumAmountOfMixedParticipants);
+          } else {
+            _audioFramePool->PushMemory(audioFrame);
+          }
+        }
+      } else {
+        activeList.push_front(FrameAndMuteInfo(audioFrame, muted));
+        (*mixParticipantList)[audioFrame->id_] = *participant;
+        assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants);
+      }
+    } else {
+      if (wasMixed) {
+        ParticipantFrameStruct* part_struct =
+            new ParticipantFrameStruct(*participant, audioFrame, muted);
+        passiveWasMixedList.push_back(part_struct);
+      } else if (mustAddToPassiveList) {
+        if (!muted) {
+          RampIn(*audioFrame);
+        }
+        ParticipantFrameStruct* part_struct =
+            new ParticipantFrameStruct(*participant, audioFrame, muted);
+        passiveWasNotMixedList.push_back(part_struct);
+      } else {
+        _audioFramePool->PushMemory(audioFrame);
+      }
+    }
+  }
+  assert(activeList.size() <= *maxAudioFrameCounter);
+  // At this point it is known which participants should be mixed. Transfer
+  // this information to this functions output parameters.
+  for (AudioFrameList::const_iterator iter = activeList.begin();
+       iter != activeList.end(); ++iter) {
+    mixList->push_back(*iter);
+  }
+  activeList.clear();
+  // Always mix a constant number of AudioFrames. If there aren't enough
+  // active participants mix passive ones. Starting with those that was mixed
+  // last iteration.
+  for (ParticipantFrameStructList::const_iterator iter =
+           passiveWasMixedList.begin();
+       iter != passiveWasMixedList.end(); ++iter) {
+    if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) {
+      mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted));
+      (*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant;
+      assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants);
+    } else {
+      _audioFramePool->PushMemory((*iter)->audioFrame);
+    }
+    delete *iter;
+  }
+  // And finally the ones that have not been mixed for a while.
+  for (ParticipantFrameStructList::const_iterator iter =
+           passiveWasNotMixedList.begin();
+       iter != passiveWasNotMixedList.end(); ++iter) {
+    if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) {
+      mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted));
+      (*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant;
+      assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants);
+    } else {
+      _audioFramePool->PushMemory((*iter)->audioFrame);
+    }
+    delete *iter;
+  }
+  assert(*maxAudioFrameCounter + mixListStartSize >= mixList->size());
+  *maxAudioFrameCounter += mixListStartSize - mixList->size();
+}
+
+void NewAudioConferenceMixerImpl::GetAdditionalAudio(
+    AudioFrameList* additionalFramesList) const {
+  WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
+               "GetAdditionalAudio(additionalFramesList)");
+  // The GetAudioFrameWithMuted() callback may result in the participant being
+  // removed from additionalParticipantList_. If that happens it will
+  // invalidate any iterators. Create a copy of the participants list such
+  // that the list of participants can be traversed safely.
+  MixerAudioSourceList additionalParticipantList;
+  additionalParticipantList.insert(additionalParticipantList.begin(),
+                                   _additionalParticipantList.begin(),
+                                   _additionalParticipantList.end());
+
+  for (MixerAudioSourceList::const_iterator participant =
+           additionalParticipantList.begin();
+       participant != additionalParticipantList.end(); ++participant) {
+    AudioFrame* audioFrame = NULL;
+    if (_audioFramePool->PopMemory(audioFrame) == -1) {
+      WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id,
+                   "failed PopMemory() call");
+      assert(false);
+      return;
+    }
+    audioFrame->sample_rate_hz_ = _outputFrequency;
+    auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame);
+    if (ret == MixerAudioSource::AudioFrameInfo::kError) {
+      WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
+                   "failed to GetAudioFrameWithMuted() from participant");
+      _audioFramePool->PushMemory(audioFrame);
+      continue;
+    }
+    if (audioFrame->samples_per_channel_ == 0) {
+      // Empty frame. Don't use it.
+      _audioFramePool->PushMemory(audioFrame);
+      continue;
+    }
+    additionalFramesList->push_back(FrameAndMuteInfo(
+        audioFrame, ret == MixerAudioSource::AudioFrameInfo::kMuted));
+  }
+}
+
+void NewAudioConferenceMixerImpl::UpdateMixedStatus(
+    const std::map<int, MixerAudioSource*>& mixedParticipantsMap) const {
+  WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
+               "UpdateMixedStatus(mixedParticipantsMap)");
+  assert(mixedParticipantsMap.size() <= kMaximumAmountOfMixedParticipants);
+
+  // Loop through all participants. If they are in the mix map they
+  // were mixed.
+  for (MixerAudioSourceList::const_iterator participant =
+           _participantList.begin();
+       participant != _participantList.end(); ++participant) {
+    bool isMixed = false;
+    for (std::map<int, MixerAudioSource*>::const_iterator it =
+             mixedParticipantsMap.begin();
+         it != mixedParticipantsMap.end(); ++it) {
+      if (it->second == *participant) {
+        isMixed = true;
+        break;
+      }
+    }
+    (*participant)->_mixHistory->SetIsMixed(isMixed);
+  }
+}
+
+void NewAudioConferenceMixerImpl::ClearAudioFrameList(
+    AudioFrameList* audioFrameList) const {
+  WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
+               "ClearAudioFrameList(audioFrameList)");
+  for (AudioFrameList::iterator iter = audioFrameList->begin();
+       iter != audioFrameList->end(); ++iter) {
+    _audioFramePool->PushMemory(iter->frame);
+  }
+  audioFrameList->clear();
+}
+
+bool NewAudioConferenceMixerImpl::IsParticipantInList(
+    const MixerAudioSource& participant,
+    const MixerAudioSourceList& participantList) const {
+  WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
+               "IsParticipantInList(participant,participantList)");
+  for (MixerAudioSourceList::const_iterator iter = participantList.begin();
+       iter != participantList.end(); ++iter) {
+    if (&participant == *iter) {
+      return true;
+    }
+  }
+  return false;
+}
+
+bool NewAudioConferenceMixerImpl::AddParticipantToList(
+    MixerAudioSource* participant,
+    MixerAudioSourceList* participantList) const {
+  WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
+               "AddParticipantToList(participant, participantList)");
+  participantList->push_back(participant);
+  // Make sure that the mixed status is correct for new MixerAudioSource.
+  participant->_mixHistory->ResetMixedStatus();
+  return true;
+}
+
+bool NewAudioConferenceMixerImpl::RemoveParticipantFromList(
+    MixerAudioSource* participant,
+    MixerAudioSourceList* participantList) const {
+  WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
+               "RemoveParticipantFromList(participant, participantList)");
+  for (MixerAudioSourceList::iterator iter = participantList->begin();
+       iter != participantList->end(); ++iter) {
+    if (*iter == participant) {
+      participantList->erase(iter);
+      // Participant is no longer mixed, reset to default.
+      participant->_mixHistory->ResetMixedStatus();
+      return true;
+    }
+  }
+  return false;
+}
+
+int32_t NewAudioConferenceMixerImpl::MixFromList(
+    AudioFrame* mixedAudio,
+    const AudioFrameList& audioFrameList) const {
+  WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
+               "MixFromList(mixedAudio, audioFrameList)");
+  if (audioFrameList.empty())
+    return 0;
+
+  uint32_t position = 0;
+
+  if (_numMixedParticipants == 1) {
+    mixedAudio->timestamp_ = audioFrameList.front().frame->timestamp_;
+    mixedAudio->elapsed_time_ms_ =
+        audioFrameList.front().frame->elapsed_time_ms_;
+  } else {
+    // TODO(wu): Issue 3390.
+    // Audio frame timestamp is only supported in one channel case.
+    mixedAudio->timestamp_ = 0;
+    mixedAudio->elapsed_time_ms_ = -1;
+  }
+
+  for (AudioFrameList::const_iterator iter = audioFrameList.begin();
+       iter != audioFrameList.end(); ++iter) {
+    if (position >= kMaximumAmountOfMixedParticipants) {
+      WEBRTC_TRACE(
+          kTraceMemory, kTraceAudioMixerServer, _id,
+          "Trying to mix more than max amount of mixed participants:%d!",
+          kMaximumAmountOfMixedParticipants);
+      // Assert and avoid crash
+      assert(false);
+      position = 0;
+    }
+    if (!iter->muted) {
+      MixFrames(mixedAudio, iter->frame, use_limiter_);
+    }
+
+    position++;
+  }
+
+  return 0;
+}
+
+// TODO(andrew): consolidate this function with MixFromList.
+int32_t NewAudioConferenceMixerImpl::MixAnonomouslyFromList(
+    AudioFrame* mixedAudio,
+    const AudioFrameList& audioFrameList) const {
+  WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
+               "MixAnonomouslyFromList(mixedAudio, audioFrameList)");
+
+  if (audioFrameList.empty())
+    return 0;
+
+  for (AudioFrameList::const_iterator iter = audioFrameList.begin();
+       iter != audioFrameList.end(); ++iter) {
+    if (!iter->muted) {
+      MixFrames(mixedAudio, iter->frame, use_limiter_);
+    }
+  }
+  return 0;
+}
+
+bool NewAudioConferenceMixerImpl::LimitMixedAudio(
+    AudioFrame* mixedAudio) const {
+  if (!use_limiter_) {
+    return true;
+  }
+
+  // Smoothly limit the mixed frame.
+  const int error = _limiter->ProcessStream(mixedAudio);
+
+  // And now we can safely restore the level. This procedure results in
+  // some loss of resolution, deemed acceptable.
+  //
+  // It's possible to apply the gain in the AGC (with a target level of 0 dbFS
+  // and compression gain of 6 dB). However, in the transition frame when this
+  // is enabled (moving from one to two participants) it has the potential to
+  // create discontinuities in the mixed frame.
+  //
+  // Instead we double the frame (with addition since left-shifting a
+  // negative value is undefined).
+  *mixedAudio += *mixedAudio;
+
+  if (error != _limiter->kNoError) {
+    WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
+                 "Error from AudioProcessing: %d", error);
+    assert(false);
+    return false;
+  }
+  return true;
+}
+}  // namespace webrtc
diff --git a/webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.h b/webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.h
new file mode 100644
index 0000000..322e452
--- /dev/null
+++ b/webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.h
@@ -0,0 +1,188 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_MIXER_SOURCE_NEW_AUDIO_CONFERENCE_MIXER_IMPL_H_
+#define WEBRTC_MODULES_AUDIO_MIXER_SOURCE_NEW_AUDIO_CONFERENCE_MIXER_IMPL_H_
+
+#include <list>
+#include <map>
+#include <memory>
+
+#include "webrtc/engine_configurations.h"
+#include "webrtc/modules/audio_mixer/include/new_audio_conference_mixer.h"
+#include "webrtc/modules/audio_conference_mixer/source/memory_pool.h"
+#include "webrtc/modules/audio_conference_mixer/source/time_scheduler.h"
+#include "webrtc/modules/include/module_common_types.h"
+
+namespace webrtc {
+class AudioProcessing;
+class CriticalSectionWrapper;
+
+struct FrameAndMuteInfo {
+  FrameAndMuteInfo(AudioFrame* f, bool m) : frame(f), muted(m) {}
+  AudioFrame* frame;
+  bool muted;
+};
+
+typedef std::list<FrameAndMuteInfo> AudioFrameList;
+typedef std::list<MixerAudioSource*> MixerAudioSourceList;
+
+// Cheshire cat implementation of MixerAudioSource's non virtual functions.
+class NewMixHistory {
+ public:
+  NewMixHistory();
+  ~NewMixHistory();
+
+  // Returns true if the participant is being mixed.
+  bool IsMixed() const;
+
+  // Returns true if the participant was mixed previous mix
+  // iteration.
+  bool WasMixed() const;
+
+  // Updates the mixed status.
+  int32_t SetIsMixed(bool mixed);
+
+  void ResetMixedStatus();
+
+ private:
+  bool _isMixed;
+};
+
+class NewAudioConferenceMixerImpl : public NewAudioConferenceMixer {
+ public:
+  // AudioProcessing only accepts 10 ms frames.
+  enum { kProcessPeriodicityInMs = 10 };
+
+  explicit NewAudioConferenceMixerImpl(int id);
+  ~NewAudioConferenceMixerImpl();
+
+  // Must be called after ctor.
+  bool Init();
+
+  // Module functions
+  int64_t TimeUntilNextProcess() override;
+  void Process() override;
+
+  // NewAudioConferenceMixer functions
+  int32_t RegisterMixedStreamCallback(
+      OldAudioMixerOutputReceiver* mixReceiver) override;
+  int32_t UnRegisterMixedStreamCallback() override;
+  int32_t SetMixabilityStatus(MixerAudioSource* participant,
+                              bool mixable) override;
+  bool MixabilityStatus(const MixerAudioSource& participant) const override;
+  int32_t SetMinimumMixingFrequency(Frequency freq) override;
+  int32_t SetAnonymousMixabilityStatus(MixerAudioSource* participant,
+                                       bool mixable) override;
+  bool AnonymousMixabilityStatus(
+      const MixerAudioSource& participant) const override;
+
+ private:
+  enum { DEFAULT_AUDIO_FRAME_POOLSIZE = 50 };
+
+  // Set/get mix frequency
+  int32_t SetOutputFrequency(const Frequency& frequency);
+  Frequency OutputFrequency() const;
+
+  // Fills mixList with the AudioFrames pointers that should be used when
+  // mixing.
+  // maxAudioFrameCounter both input and output specifies how many more
+  // AudioFrames that are allowed to be mixed.
+  // rampOutList contain AudioFrames corresponding to an audio stream that
+  // used to be mixed but shouldn't be mixed any longer. These AudioFrames
+  // should be ramped out over this AudioFrame to avoid audio discontinuities.
+  void UpdateToMix(AudioFrameList* mixList,
+                   AudioFrameList* rampOutList,
+                   std::map<int, MixerAudioSource*>* mixParticipantList,
+                   size_t* maxAudioFrameCounter) const;
+
+  // Return the lowest mixing frequency that can be used without having to
+  // downsample any audio.
+  int32_t GetLowestMixingFrequency() const;
+  int32_t GetLowestMixingFrequencyFromList(
+      const MixerAudioSourceList& mixList) const;
+
+  // Return the AudioFrames that should be mixed anonymously.
+  void GetAdditionalAudio(AudioFrameList* additionalFramesList) const;
+
+  // Update the NewMixHistory of all MixerAudioSources. mixedParticipantsList
+  // should contain a map of MixerAudioSources that have been mixed.
+  void UpdateMixedStatus(
+      const std::map<int, MixerAudioSource*>& mixedParticipantsList) const;
+
+  // Clears audioFrameList and reclaims all memory associated with it.
+  void ClearAudioFrameList(AudioFrameList* audioFrameList) const;
+
+  // This function returns true if it finds the MixerAudioSource in the
+  // specified list of MixerAudioSources.
+  bool IsParticipantInList(const MixerAudioSource& participant,
+                           const MixerAudioSourceList& participantList) const;
+
+  // Add/remove the MixerAudioSource to the specified
+  // MixerAudioSource list.
+  bool AddParticipantToList(MixerAudioSource* participant,
+                            MixerAudioSourceList* participantList) const;
+  bool RemoveParticipantFromList(MixerAudioSource* removeParticipant,
+                                 MixerAudioSourceList* participantList) const;
+
+  // Mix the AudioFrames stored in audioFrameList into mixedAudio.
+  int32_t MixFromList(AudioFrame* mixedAudio,
+                      const AudioFrameList& audioFrameList) const;
+
+  // Mix the AudioFrames stored in audioFrameList into mixedAudio. No
+  // record will be kept of this mix (e.g. the corresponding MixerAudioSources
+  // will not be marked as IsMixed()
+  int32_t MixAnonomouslyFromList(AudioFrame* mixedAudio,
+                                 const AudioFrameList& audioFrameList) const;
+
+  bool LimitMixedAudio(AudioFrame* mixedAudio) const;
+
+  std::unique_ptr<CriticalSectionWrapper> _crit;
+  std::unique_ptr<CriticalSectionWrapper> _cbCrit;
+
+  int32_t _id;
+
+  Frequency _minimumMixingFreq;
+
+  // Mix result callback
+  OldAudioMixerOutputReceiver* _mixReceiver;
+
+  // The current sample frequency and sample size when mixing.
+  Frequency _outputFrequency;
+  size_t _sampleSize;
+
+  // Memory pool to avoid allocating/deallocating AudioFrames
+  MemoryPool<AudioFrame>* _audioFramePool;
+
+  // List of all participants. Note all lists are disjunct
+  MixerAudioSourceList _participantList;  // May be mixed.
+  // Always mixed, anonomously.
+  MixerAudioSourceList _additionalParticipantList;
+
+  size_t _numMixedParticipants;
+  // Determines if we will use a limiter for clipping protection during
+  // mixing.
+  bool use_limiter_;
+
+  uint32_t _timeStamp;
+
+  // Metronome class.
+  TimeScheduler _timeScheduler;
+
+  // Counter keeping track of concurrent calls to process.
+  // Note: should never be higher than 1 or lower than 0.
+  int16_t _processCalls;
+
+  // Used for inhibiting saturation in mixing.
+  std::unique_ptr<AudioProcessing> _limiter;
+};
+}  // namespace webrtc
+
+#endif  // WEBRTC_MODULES_AUDIO_MIXER_SOURCE_NEW_AUDIO_CONFERENCE_MIXER_IMPL_H_
diff --git a/webrtc/modules/modules.gyp b/webrtc/modules/modules.gyp
index f0196e2..3d41878 100644
--- a/webrtc/modules/modules.gyp
+++ b/webrtc/modules/modules.gyp
@@ -12,6 +12,7 @@
     'audio_coding/audio_coding.gypi',
     'audio_conference_mixer/audio_conference_mixer.gypi',
     'audio_device/audio_device.gypi',
+    'audio_mixer/audio_mixer.gypi',
     'audio_processing/audio_processing.gypi',
     'bitrate_controller/bitrate_controller.gypi',
     'congestion_controller/congestion_controller.gypi',
@@ -126,6 +127,7 @@
             'audio_coding_module',
             'audio_conference_mixer',
             'audio_device'  ,
+            'audio_mixer',
             'audio_processing',
             'audioproc_test_utils',
             'bitrate_controller',