Remove voe::OutputMixer and AudioConferenceMixer.

This code path is not used anymore.

BUG=webrtc:4690

Review-Url: https://codereview.webrtc.org/3015553002
Cr-Commit-Position: refs/heads/master@{#19929}
diff --git a/PRESUBMIT.py b/PRESUBMIT.py
index 88e4fb8..70c4759 100755
--- a/PRESUBMIT.py
+++ b/PRESUBMIT.py
@@ -22,7 +22,6 @@
   'examples/objc',
   'media',
   'modules/audio_coding',
-  'modules/audio_conference_mixer',
   'modules/audio_device',
   'modules/audio_processing',
   'modules/desktop_capture',
@@ -74,7 +73,6 @@
 LEGACY_API_DIRS = (
   'common_audio/include',
   'modules/audio_coding/include',
-  'modules/audio_conference_mixer/include',
   'modules/audio_processing/include',
   'modules/bitrate_controller/include',
   'modules/congestion_controller/include',
diff --git a/audio/audio_receive_stream.cc b/audio/audio_receive_stream.cc
index 8ff1d58..37985d8 100644
--- a/audio/audio_receive_stream.cc
+++ b/audio/audio_receive_stream.cc
@@ -247,7 +247,7 @@
 }
 
 int AudioReceiveStream::PreferredSampleRate() const {
-  return channel_proxy_->NeededFrequency();
+  return channel_proxy_->PreferredSampleRate();
 }
 
 int AudioReceiveStream::id() const {
diff --git a/modules/BUILD.gn b/modules/BUILD.gn
index 7394724..5d4987e 100644
--- a/modules/BUILD.gn
+++ b/modules/BUILD.gn
@@ -12,7 +12,6 @@
 group("modules") {
   public_deps = [
     "audio_coding",
-    "audio_conference_mixer",
     "audio_device",
     "audio_mixer",
     "audio_processing",
@@ -232,7 +231,6 @@
       ":module_api",
       "../test:test_main",
       "audio_coding:audio_coding_unittests",
-      "audio_conference_mixer:audio_conference_mixer_unittests",
       "audio_device:audio_device_unittests",
       "audio_mixer:audio_mixer_unittests",
       "audio_processing:audio_processing_unittests",
diff --git a/modules/audio_conference_mixer/BUILD.gn b/modules/audio_conference_mixer/BUILD.gn
deleted file mode 100644
index 16a62b8..0000000
--- a/modules/audio_conference_mixer/BUILD.gn
+++ /dev/null
@@ -1,80 +0,0 @@
-# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
-#
-# Use of this source code is governed by a BSD-style license
-# that can be found in the LICENSE file in the root of the source
-# tree. An additional intellectual property rights grant can be found
-# in the file PATENTS.  All contributing project authors may
-# be found in the AUTHORS file in the root of the source tree.
-
-import("../../webrtc.gni")
-
-config("audio_conference_mixer_config") {
-  visibility = [ ":*" ]  # Only targets in this file can depend on this.
-  include_dirs = [
-    "include",
-    "../include",
-  ]
-}
-
-rtc_static_library("audio_conference_mixer") {
-  sources = [
-    "include/audio_conference_mixer.h",
-    "include/audio_conference_mixer_defines.h",
-    "source/audio_conference_mixer_impl.cc",
-    "source/audio_conference_mixer_impl.h",
-    "source/audio_frame_manipulator.cc",
-    "source/audio_frame_manipulator.h",
-    "source/memory_pool.h",
-    "source/memory_pool_posix.h",
-    "source/memory_pool_win.h",
-    "source/time_scheduler.cc",
-    "source/time_scheduler.h",
-  ]
-
-  public_configs = [ ":audio_conference_mixer_config" ]
-
-  if (!build_with_chromium && is_clang) {
-    # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
-    suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
-  }
-
-  deps = [
-    "..:module_api",
-    "../..:webrtc_common",
-    "../../audio/utility:audio_frame_operations",
-    "../../rtc_base:rtc_base_approved",
-    "../../system_wrappers",
-    "../audio_processing",
-  ]
-}
-
-if (rtc_include_tests) {
-  rtc_source_set("audio_conference_mixer_unittests") {
-    testonly = true
-
-    # Skip restricting visibility on mobile platforms since the tests on those
-    # gets additional generated targets which would require many lines here to
-    # cover (which would be confusing to read and hard to maintain).
-    if (!is_android && !is_ios) {
-      visibility = [ "..:modules_unittests" ]
-    }
-    sources = [
-      "test/audio_conference_mixer_unittest.cc",
-    ]
-    deps = [
-      ":audio_conference_mixer",
-      "../../test:test_support",
-      "//testing/gmock",
-    ]
-    if (is_win) {
-      cflags = [
-        # TODO(kjellander): bugs.webrtc.org/261: Fix this warning.
-        "/wd4373",  # virtual function override.
-      ]
-    }
-    if (!build_with_chromium && is_clang) {
-      # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
-      suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
-    }
-  }
-}
diff --git a/modules/audio_conference_mixer/DEPS b/modules/audio_conference_mixer/DEPS
deleted file mode 100644
index 2410757..0000000
--- a/modules/audio_conference_mixer/DEPS
+++ /dev/null
@@ -1,4 +0,0 @@
-include_rules = [
-  "+audio/utility/audio_frame_operations.h",
-  "+system_wrappers",
-]
diff --git a/modules/audio_conference_mixer/OWNERS b/modules/audio_conference_mixer/OWNERS
deleted file mode 100644
index 6df09bb..0000000
--- a/modules/audio_conference_mixer/OWNERS
+++ /dev/null
@@ -1,6 +0,0 @@
-minyue@webrtc.org
-
-# These are for the common case of adding or renaming files. If you're doing
-# structural changes, please get a review from a reviewer in this file.
-per-file *.gn=*
-per-file *.gni=*
diff --git a/modules/audio_conference_mixer/include/audio_conference_mixer.h b/modules/audio_conference_mixer/include/audio_conference_mixer.h
deleted file mode 100644
index 0ccb6d9..0000000
--- a/modules/audio_conference_mixer/include/audio_conference_mixer.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef MODULES_AUDIO_CONFERENCE_MIXER_INCLUDE_AUDIO_CONFERENCE_MIXER_H_
-#define MODULES_AUDIO_CONFERENCE_MIXER_INCLUDE_AUDIO_CONFERENCE_MIXER_H_
-
-#include "modules/audio_conference_mixer/include/audio_conference_mixer_defines.h"
-#include "modules/include/module.h"
-#include "modules/include/module_common_types.h"
-
-namespace webrtc {
-class AudioMixerOutputReceiver;
-class MixerParticipant;
-class Trace;
-
-class AudioConferenceMixer : public Module
-{
-public:
-    enum {kMaximumAmountOfMixedParticipants = 3};
-    enum Frequency
-    {
-        kNbInHz           = 8000,
-        kWbInHz           = 16000,
-        kSwbInHz          = 32000,
-        kFbInHz           = 48000,
-        kLowestPossible   = -1,
-        kDefaultFrequency = kWbInHz
-    };
-
-    // Factory method. Constructor disabled.
-    static AudioConferenceMixer* Create(int id);
-    virtual ~AudioConferenceMixer() {}
-
-    // Module functions
-    int64_t TimeUntilNextProcess() override = 0;
-    void Process() override = 0;
-
-    // Register/unregister a callback class for receiving the mixed audio.
-    virtual int32_t RegisterMixedStreamCallback(
-        AudioMixerOutputReceiver* receiver) = 0;
-    virtual int32_t UnRegisterMixedStreamCallback() = 0;
-
-    // Add/remove participants as candidates for mixing.
-    virtual int32_t SetMixabilityStatus(MixerParticipant* participant,
-                                        bool mixable) = 0;
-    // Returns true if a participant is a candidate for mixing.
-    virtual bool MixabilityStatus(
-        const MixerParticipant& participant) const = 0;
-
-    // Inform the mixer that the participant should always be mixed and not
-    // count toward the number of mixed participants. Note that a participant
-    // must have been added to the mixer (by calling SetMixabilityStatus())
-    // before this function can be successfully called.
-    virtual int32_t SetAnonymousMixabilityStatus(
-        MixerParticipant* participant, bool mixable) = 0;
-    // Returns true if the participant is mixed anonymously.
-    virtual bool AnonymousMixabilityStatus(
-        const MixerParticipant& participant) const = 0;
-
-    // Set the minimum sampling frequency at which to mix. The mixing algorithm
-    // may still choose to mix at a higher samling frequency to avoid
-    // downsampling of audio contributing to the mixed audio.
-    virtual int32_t SetMinimumMixingFrequency(Frequency freq) = 0;
-
-protected:
-    AudioConferenceMixer() {}
-};
-}  // namespace webrtc
-
-#endif // MODULES_AUDIO_CONFERENCE_MIXER_INCLUDE_AUDIO_CONFERENCE_MIXER_H_
diff --git a/modules/audio_conference_mixer/include/audio_conference_mixer_defines.h b/modules/audio_conference_mixer/include/audio_conference_mixer_defines.h
deleted file mode 100644
index b40be23..0000000
--- a/modules/audio_conference_mixer/include/audio_conference_mixer_defines.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef MODULES_AUDIO_CONFERENCE_MIXER_INCLUDE_AUDIO_CONFERENCE_MIXER_DEFINES_H_
-#define MODULES_AUDIO_CONFERENCE_MIXER_INCLUDE_AUDIO_CONFERENCE_MIXER_DEFINES_H_
-
-#include "modules/include/module_common_types.h"
-#include "rtc_base/checks.h"
-#include "typedefs.h"  // NOLINT(build/include)
-
-namespace webrtc {
-class MixHistory;
-
-// A callback class that all mixer participants must inherit from/implement.
-class MixerParticipant
-{
-public:
-    // The implementation of this function should update audioFrame with new
-    // audio every time it's called.
-    //
-    // If it returns -1, the frame will not be added to the mix.
-    //
-    // NOTE: This function should not be called. It will remain for a short
-    // time so that subclasses can override it without getting warnings.
-    // TODO(henrik.lundin) Remove this function.
-    virtual int32_t GetAudioFrame(int32_t id,
-                                  AudioFrame* audioFrame) {
-      RTC_CHECK(false);
-      return -1;
-    }
-
-
-    // The implementation of GetAudioFrameWithMuted should update audio_frame
-    // with new audio every time it's called. The return value will be
-    // interpreted as follows.
-    enum class AudioFrameInfo {
-      kNormal,  // The samples in audio_frame are valid and should be used.
-      kMuted,   // The samples in audio_frame should not be used, but should be
-                // implicitly interpreted as zero. Other fields in audio_frame
-                // may be read and should contain meaningful values.
-      kError    // audio_frame will not be used.
-    };
-
-    virtual AudioFrameInfo GetAudioFrameWithMuted(int32_t id,
-                                                  AudioFrame* audio_frame) {
-      return GetAudioFrame(id, audio_frame) == -1 ?
-          AudioFrameInfo::kError :
-          AudioFrameInfo::kNormal;
-    }
-
-    // Returns true if the participant was mixed this mix iteration.
-    bool IsMixed() const;
-
-    // This function specifies the sampling frequency needed for the AudioFrame
-    // for future GetAudioFrame(..) calls.
-    virtual int32_t NeededFrequency(int32_t id) const = 0;
-
-    MixHistory* _mixHistory;
-protected:
-    MixerParticipant();
-    virtual ~MixerParticipant();
-};
-
-class AudioMixerOutputReceiver
-{
-public:
-    // This callback function provides the mixed audio for this mix iteration.
-    // Note that uniqueAudioFrames is an array of AudioFrame pointers with the
-    // size according to the size parameter.
-    virtual void NewMixedAudio(const int32_t id,
-                               const AudioFrame& generalAudioFrame,
-                               const AudioFrame** uniqueAudioFrames,
-                               const uint32_t size) = 0;
-protected:
-    AudioMixerOutputReceiver() {}
-    virtual ~AudioMixerOutputReceiver() {}
-};
-}  // namespace webrtc
-
-#endif // MODULES_AUDIO_CONFERENCE_MIXER_INCLUDE_AUDIO_CONFERENCE_MIXER_DEFINES_H_
diff --git a/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc b/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc
deleted file mode 100644
index 7927d24..0000000
--- a/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc
+++ /dev/null
@@ -1,904 +0,0 @@
-/*
- *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "modules/audio_conference_mixer/source/audio_conference_mixer_impl.h"
-#include "audio/utility/audio_frame_operations.h"
-#include "modules/audio_conference_mixer/include/audio_conference_mixer_defines.h"
-#include "modules/audio_conference_mixer/source/audio_frame_manipulator.h"
-#include "modules/audio_processing/include/audio_processing.h"
-#include "rtc_base/logging.h"
-
-namespace webrtc {
-namespace {
-
-struct ParticipantFrameStruct {
-  ParticipantFrameStruct(MixerParticipant* p, AudioFrame* a, bool m)
-      : participant(p), audioFrame(a), muted(m) {}
-  MixerParticipant* participant;
-  AudioFrame* audioFrame;
-  bool muted;
-};
-
-typedef std::list<ParticipantFrameStruct*> ParticipantFrameStructList;
-
-// Mix |frame| into |mixed_frame|, with saturation protection and upmixing.
-// These effects are applied to |frame| itself prior to mixing. Assumes that
-// |mixed_frame| always has at least as many channels as |frame|. Supports
-// stereo at most.
-//
-// TODO(andrew): consider not modifying |frame| here.
-void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) {
-  assert(mixed_frame->num_channels_ >= frame->num_channels_);
-  if (use_limiter) {
-    // This is to avoid saturation in the mixing. It is only
-    // meaningful if the limiter will be used.
-    AudioFrameOperations::ApplyHalfGain(frame);
-  }
-  if (mixed_frame->num_channels_ > frame->num_channels_) {
-    // We only support mono-to-stereo.
-    assert(mixed_frame->num_channels_ == 2 &&
-           frame->num_channels_ == 1);
-    AudioFrameOperations::MonoToStereo(frame);
-  }
-
-  AudioFrameOperations::Add(*frame, mixed_frame);
-}
-
-// Return the max number of channels from a |list| composed of AudioFrames.
-size_t MaxNumChannels(const AudioFrameList* list) {
-  size_t max_num_channels = 1;
-  for (AudioFrameList::const_iterator iter = list->begin();
-       iter != list->end();
-       ++iter) {
-    max_num_channels = std::max(max_num_channels, (*iter).frame->num_channels_);
-  }
-  return max_num_channels;
-}
-
-}  // namespace
-
-MixerParticipant::MixerParticipant()
-    : _mixHistory(new MixHistory()) {
-}
-
-MixerParticipant::~MixerParticipant() {
-    delete _mixHistory;
-}
-
-bool MixerParticipant::IsMixed() const {
-    return _mixHistory->IsMixed();
-}
-
-MixHistory::MixHistory()
-    : _isMixed(0) {
-}
-
-MixHistory::~MixHistory() {
-}
-
-bool MixHistory::IsMixed() const {
-    return _isMixed;
-}
-
-bool MixHistory::WasMixed() const {
-    // Was mixed is the same as is mixed depending on perspective. This function
-    // is for the perspective of AudioConferenceMixerImpl.
-    return IsMixed();
-}
-
-int32_t MixHistory::SetIsMixed(const bool mixed) {
-    _isMixed = mixed;
-    return 0;
-}
-
-void MixHistory::ResetMixedStatus() {
-    _isMixed = false;
-}
-
-AudioConferenceMixer* AudioConferenceMixer::Create(int id) {
-    AudioConferenceMixerImpl* mixer = new AudioConferenceMixerImpl(id);
-    if(!mixer->Init()) {
-        delete mixer;
-        return NULL;
-    }
-    return mixer;
-}
-
-AudioConferenceMixerImpl::AudioConferenceMixerImpl(int id)
-    : _id(id),
-      _minimumMixingFreq(kLowestPossible),
-      _mixReceiver(NULL),
-      _outputFrequency(kDefaultFrequency),
-      _sampleSize(0),
-      _audioFramePool(NULL),
-      _participantList(),
-      _additionalParticipantList(),
-      _numMixedParticipants(0),
-      use_limiter_(true),
-      _timeStamp(0),
-      _timeScheduler(kProcessPeriodicityInMs),
-      _processCalls(0) {}
-
-bool AudioConferenceMixerImpl::Init() {
-    Config config;
-    config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
-    _limiter.reset(AudioProcessing::Create(config));
-    if(!_limiter.get())
-        return false;
-
-    MemoryPool<AudioFrame>::CreateMemoryPool(_audioFramePool,
-                                             DEFAULT_AUDIO_FRAME_POOLSIZE);
-    if(_audioFramePool == NULL)
-        return false;
-
-    if(SetOutputFrequency(kDefaultFrequency) == -1)
-        return false;
-
-    if(_limiter->gain_control()->set_mode(GainControl::kFixedDigital) !=
-        _limiter->kNoError)
-        return false;
-
-    // We smoothly limit the mixed frame to -7 dbFS. -6 would correspond to the
-    // divide-by-2 but -7 is used instead to give a bit of headroom since the
-    // AGC is not a hard limiter.
-    if(_limiter->gain_control()->set_target_level_dbfs(7) != _limiter->kNoError)
-        return false;
-
-    if(_limiter->gain_control()->set_compression_gain_db(0)
-        != _limiter->kNoError)
-        return false;
-
-    if(_limiter->gain_control()->enable_limiter(true) != _limiter->kNoError)
-        return false;
-
-    if(_limiter->gain_control()->Enable(true) != _limiter->kNoError)
-        return false;
-
-    return true;
-}
-
-AudioConferenceMixerImpl::~AudioConferenceMixerImpl() {
-    MemoryPool<AudioFrame>::DeleteMemoryPool(_audioFramePool);
-    assert(_audioFramePool == NULL);
-}
-
-// Process should be called every kProcessPeriodicityInMs ms
-int64_t AudioConferenceMixerImpl::TimeUntilNextProcess() {
-    int64_t timeUntilNextProcess = 0;
-    rtc::CritScope cs(&_crit);
-    if(_timeScheduler.TimeToNextUpdate(timeUntilNextProcess) != 0) {
-        LOG(LS_ERROR) << "failed in TimeToNextUpdate() call";
-        // Sanity check
-        assert(false);
-        return -1;
-    }
-    return timeUntilNextProcess;
-}
-
-void AudioConferenceMixerImpl::Process() {
-    size_t remainingParticipantsAllowedToMix =
-        kMaximumAmountOfMixedParticipants;
-    {
-        rtc::CritScope cs(&_crit);
-        assert(_processCalls == 0);
-        _processCalls++;
-
-        // Let the scheduler know that we are running one iteration.
-        _timeScheduler.UpdateScheduler();
-    }
-
-    AudioFrameList mixList;
-    AudioFrameList rampOutList;
-    AudioFrameList additionalFramesList;
-    std::map<int, MixerParticipant*> mixedParticipantsMap;
-    {
-        rtc::CritScope cs(&_cbCrit);
-
-        int32_t lowFreq = GetLowestMixingFrequency();
-        // SILK can run in 12 kHz and 24 kHz. These frequencies are not
-        // supported so use the closest higher frequency to not lose any
-        // information.
-        // TODO(henrike): this is probably more appropriate to do in
-        //                GetLowestMixingFrequency().
-        if (lowFreq == 12000) {
-            lowFreq = 16000;
-        } else if (lowFreq == 24000) {
-            lowFreq = 32000;
-        }
-        if(lowFreq <= 0) {
-          rtc::CritScope cs(&_crit);
-          _processCalls--;
-          return;
-        } else {
-            switch(lowFreq) {
-            case 8000:
-                if(OutputFrequency() != kNbInHz) {
-                    SetOutputFrequency(kNbInHz);
-                }
-                break;
-            case 16000:
-                if(OutputFrequency() != kWbInHz) {
-                    SetOutputFrequency(kWbInHz);
-                }
-                break;
-            case 32000:
-                if(OutputFrequency() != kSwbInHz) {
-                    SetOutputFrequency(kSwbInHz);
-                }
-                break;
-            case 48000:
-                if(OutputFrequency() != kFbInHz) {
-                    SetOutputFrequency(kFbInHz);
-                }
-                break;
-            default:
-                assert(false);
-
-                rtc::CritScope cs(&_crit);
-                _processCalls--;
-                return;
-            }
-        }
-
-        UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap,
-                    &remainingParticipantsAllowedToMix);
-
-        GetAdditionalAudio(&additionalFramesList);
-        UpdateMixedStatus(mixedParticipantsMap);
-    }
-
-    // Get an AudioFrame for mixing from the memory pool.
-    AudioFrame* mixedAudio = NULL;
-    if(_audioFramePool->PopMemory(mixedAudio) == -1) {
-        LOG(LS_ERROR) << "failed PopMemory() call";
-        assert(false);
-        return;
-    }
-
-    {
-        rtc::CritScope cs(&_crit);
-
-        // TODO(henrike): it might be better to decide the number of channels
-        //                with an API instead of dynamically.
-
-        // Find the max channels over all mixing lists.
-        const size_t num_mixed_channels = std::max(MaxNumChannels(&mixList),
-            std::max(MaxNumChannels(&additionalFramesList),
-                     MaxNumChannels(&rampOutList)));
-
-        mixedAudio->UpdateFrame(-1, _timeStamp, NULL, 0, _outputFrequency,
-                                AudioFrame::kNormalSpeech,
-                                AudioFrame::kVadPassive, num_mixed_channels);
-
-        _timeStamp += static_cast<uint32_t>(_sampleSize);
-
-        // We only use the limiter if it supports the output sample rate and
-        // we're actually mixing multiple streams.
-        use_limiter_ =
-            _numMixedParticipants > 1 &&
-            _outputFrequency <= AudioProcessing::kMaxNativeSampleRateHz;
-
-        MixFromList(mixedAudio, mixList);
-        MixAnonomouslyFromList(mixedAudio, additionalFramesList);
-        MixAnonomouslyFromList(mixedAudio, rampOutList);
-
-        if(mixedAudio->samples_per_channel_ == 0) {
-            // Nothing was mixed, set the audio samples to silence.
-            mixedAudio->samples_per_channel_ = _sampleSize;
-            AudioFrameOperations::Mute(mixedAudio);
-        } else {
-            // Only call the limiter if we have something to mix.
-            LimitMixedAudio(mixedAudio);
-        }
-    }
-
-    {
-        rtc::CritScope cs(&_cbCrit);
-        if(_mixReceiver != NULL) {
-            const AudioFrame** dummy = NULL;
-            _mixReceiver->NewMixedAudio(
-                _id,
-                *mixedAudio,
-                dummy,
-                0);
-        }
-    }
-
-    // Reclaim all outstanding memory.
-    _audioFramePool->PushMemory(mixedAudio);
-    ClearAudioFrameList(&mixList);
-    ClearAudioFrameList(&rampOutList);
-    ClearAudioFrameList(&additionalFramesList);
-    {
-        rtc::CritScope cs(&_crit);
-        _processCalls--;
-    }
-    return;
-}
-
-int32_t AudioConferenceMixerImpl::RegisterMixedStreamCallback(
-    AudioMixerOutputReceiver* mixReceiver) {
-    rtc::CritScope cs(&_cbCrit);
-    if(_mixReceiver != NULL) {
-        return -1;
-    }
-    _mixReceiver = mixReceiver;
-    return 0;
-}
-
-int32_t AudioConferenceMixerImpl::UnRegisterMixedStreamCallback() {
-    rtc::CritScope cs(&_cbCrit);
-    if(_mixReceiver == NULL) {
-        return -1;
-    }
-    _mixReceiver = NULL;
-    return 0;
-}
-
-int32_t AudioConferenceMixerImpl::SetOutputFrequency(
-    const Frequency& frequency) {
-    rtc::CritScope cs(&_crit);
-
-    _outputFrequency = frequency;
-    _sampleSize =
-        static_cast<size_t>((_outputFrequency*kProcessPeriodicityInMs) / 1000);
-
-    return 0;
-}
-
-AudioConferenceMixer::Frequency
-AudioConferenceMixerImpl::OutputFrequency() const {
-    rtc::CritScope cs(&_crit);
-    return _outputFrequency;
-}
-
-int32_t AudioConferenceMixerImpl::SetMixabilityStatus(
-    MixerParticipant* participant, bool mixable) {
-    if (!mixable) {
-        // Anonymous participants are in a separate list. Make sure that the
-        // participant is in the _participantList if it is being mixed.
-        SetAnonymousMixabilityStatus(participant, false);
-    }
-    size_t numMixedParticipants;
-    {
-        rtc::CritScope cs(&_cbCrit);
-        const bool isMixed =
-            IsParticipantInList(*participant, _participantList);
-        // API must be called with a new state.
-        if(!(mixable ^ isMixed)) {
-            LOG(LS_ERROR) << "Mixable is aready " <<
-                (isMixed ? "ON" : "off");
-            return -1;
-        }
-        bool success = false;
-        if(mixable) {
-            success = AddParticipantToList(participant, &_participantList);
-        } else {
-            success = RemoveParticipantFromList(participant, &_participantList);
-        }
-        if(!success) {
-            LOG(LS_ERROR) << "failed to " << (mixable ? "add" : "remove")
-                          << " participant";
-            assert(false);
-            return -1;
-        }
-
-        size_t numMixedNonAnonymous = _participantList.size();
-        if (numMixedNonAnonymous > kMaximumAmountOfMixedParticipants) {
-            numMixedNonAnonymous = kMaximumAmountOfMixedParticipants;
-        }
-        numMixedParticipants =
-            numMixedNonAnonymous + _additionalParticipantList.size();
-    }
-    // A MixerParticipant was added or removed. Make sure the scratch
-    // buffer is updated if necessary.
-    // Note: The scratch buffer may only be updated in Process().
-    rtc::CritScope cs(&_crit);
-    _numMixedParticipants = numMixedParticipants;
-    return 0;
-}
-
-bool AudioConferenceMixerImpl::MixabilityStatus(
-    const MixerParticipant& participant) const {
-    rtc::CritScope cs(&_cbCrit);
-    return IsParticipantInList(participant, _participantList);
-}
-
-int32_t AudioConferenceMixerImpl::SetAnonymousMixabilityStatus(
-    MixerParticipant* participant, bool anonymous) {
-    rtc::CritScope cs(&_cbCrit);
-    if(IsParticipantInList(*participant, _additionalParticipantList)) {
-        if(anonymous) {
-            return 0;
-        }
-        if(!RemoveParticipantFromList(participant,
-                                      &_additionalParticipantList)) {
-            LOG(LS_ERROR) << "unable to remove participant from anonymous list";
-            assert(false);
-            return -1;
-        }
-        return AddParticipantToList(participant, &_participantList) ? 0 : -1;
-    }
-    if(!anonymous) {
-        return 0;
-    }
-    const bool mixable = RemoveParticipantFromList(participant,
-                                                   &_participantList);
-    if(!mixable) {
-        LOG(LS_WARNING) <<
-          "participant must be registered before turning it into anonymous";
-        // Setting anonymous status is only possible if MixerParticipant is
-        // already registered.
-        return -1;
-    }
-    return AddParticipantToList(participant, &_additionalParticipantList) ?
-        0 : -1;
-}
-
-bool AudioConferenceMixerImpl::AnonymousMixabilityStatus(
-    const MixerParticipant& participant) const {
-    rtc::CritScope cs(&_cbCrit);
-    return IsParticipantInList(participant, _additionalParticipantList);
-}
-
-int32_t AudioConferenceMixerImpl::SetMinimumMixingFrequency(
-    Frequency freq) {
-    // Make sure that only allowed sampling frequencies are used. Use closest
-    // higher sampling frequency to avoid losing information.
-    if (static_cast<int>(freq) == 12000) {
-         freq = kWbInHz;
-    } else if (static_cast<int>(freq) == 24000) {
-        freq = kSwbInHz;
-    }
-
-    if((freq == kNbInHz) || (freq == kWbInHz) || (freq == kSwbInHz) ||
-       (freq == kLowestPossible)) {
-        _minimumMixingFreq=freq;
-        return 0;
-    } else {
-        LOG(LS_ERROR) << "SetMinimumMixingFrequency incorrect frequency: "
-                      << freq;
-        assert(false);
-        return -1;
-    }
-}
-
-// Check all AudioFrames that are to be mixed. The highest sampling frequency
-// found is the lowest that can be used without losing information.
-int32_t AudioConferenceMixerImpl::GetLowestMixingFrequency() const {
-    const int participantListFrequency =
-        GetLowestMixingFrequencyFromList(_participantList);
-    const int anonymousListFrequency =
-        GetLowestMixingFrequencyFromList(_additionalParticipantList);
-    const int highestFreq =
-        (participantListFrequency > anonymousListFrequency) ?
-            participantListFrequency : anonymousListFrequency;
-    // Check if the user specified a lowest mixing frequency.
-    if(_minimumMixingFreq != kLowestPossible) {
-        if(_minimumMixingFreq > highestFreq) {
-            return _minimumMixingFreq;
-        }
-    }
-    return highestFreq;
-}
-
-int32_t AudioConferenceMixerImpl::GetLowestMixingFrequencyFromList(
-    const MixerParticipantList& mixList) const {
-    int32_t highestFreq = 8000;
-    for (MixerParticipantList::const_iterator iter = mixList.begin();
-         iter != mixList.end();
-         ++iter) {
-        const int32_t neededFrequency = (*iter)->NeededFrequency(_id);
-        if(neededFrequency > highestFreq) {
-            highestFreq = neededFrequency;
-        }
-    }
-    return highestFreq;
-}
-
-void AudioConferenceMixerImpl::UpdateToMix(
-    AudioFrameList* mixList,
-    AudioFrameList* rampOutList,
-    std::map<int, MixerParticipant*>* mixParticipantList,
-    size_t* maxAudioFrameCounter) const {
-    LOG(LS_VERBOSE) <<
-        "UpdateToMix(mixList,rampOutList,mixParticipantList," <<
-        *maxAudioFrameCounter << ")";
-    const size_t mixListStartSize = mixList->size();
-    AudioFrameList activeList;
-    // Struct needed by the passive lists to keep track of which AudioFrame
-    // belongs to which MixerParticipant.
-    ParticipantFrameStructList passiveWasNotMixedList;
-    ParticipantFrameStructList passiveWasMixedList;
-    for (MixerParticipantList::const_iterator participant =
-        _participantList.begin(); participant != _participantList.end();
-         ++participant) {
-        // Stop keeping track of passive participants if there are already
-        // enough participants available (they wont be mixed anyway).
-        bool mustAddToPassiveList = (*maxAudioFrameCounter >
-                                    (activeList.size() +
-                                     passiveWasMixedList.size() +
-                                     passiveWasNotMixedList.size()));
-
-        bool wasMixed = false;
-        wasMixed = (*participant)->_mixHistory->WasMixed();
-        AudioFrame* audioFrame = NULL;
-        if(_audioFramePool->PopMemory(audioFrame) == -1) {
-            LOG(LS_ERROR) << "failed PopMemory() call";
-            assert(false);
-            return;
-        }
-        audioFrame->sample_rate_hz_ = _outputFrequency;
-
-        auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame);
-        if (ret == MixerParticipant::AudioFrameInfo::kError) {
-            LOG(LS_WARNING)
-              << "failed to GetAudioFrameWithMuted() from participant";
-            _audioFramePool->PushMemory(audioFrame);
-            continue;
-        }
-        const bool muted = (ret == MixerParticipant::AudioFrameInfo::kMuted);
-        if (_participantList.size() != 1) {
-          // TODO(wu): Issue 3390, add support for multiple participants case.
-          audioFrame->ntp_time_ms_ = -1;
-        }
-
-        // TODO(henrike): this assert triggers in some test cases where SRTP is
-        // used which prevents NetEQ from making a VAD. Temporarily disable this
-        // assert until the problem is fixed on a higher level.
-        // assert(audioFrame->vad_activity_ != AudioFrame::kVadUnknown);
-        if (audioFrame->vad_activity_ == AudioFrame::kVadUnknown) {
-            LOG(LS_WARNING) << "invalid VAD state from participant";
-        }
-
-        if(audioFrame->vad_activity_ == AudioFrame::kVadActive) {
-            if(!wasMixed && !muted) {
-                RampIn(*audioFrame);
-            }
-
-            if(activeList.size() >= *maxAudioFrameCounter) {
-                // There are already more active participants than should be
-                // mixed. Only keep the ones with the highest energy.
-                AudioFrameList::iterator replaceItem;
-                uint32_t lowestEnergy =
-                    muted ? 0 : CalculateEnergy(*audioFrame);
-
-                bool found_replace_item = false;
-                for (AudioFrameList::iterator iter = activeList.begin();
-                     iter != activeList.end();
-                     ++iter) {
-                    const uint32_t energy =
-                        muted ? 0 : CalculateEnergy(*iter->frame);
-                    if(energy < lowestEnergy) {
-                        replaceItem = iter;
-                        lowestEnergy = energy;
-                        found_replace_item = true;
-                    }
-                }
-                if(found_replace_item) {
-                    RTC_DCHECK(!muted);  // Cannot replace with a muted frame.
-                    FrameAndMuteInfo replaceFrame = *replaceItem;
-
-                    bool replaceWasMixed = false;
-                    std::map<int, MixerParticipant*>::const_iterator it =
-                        mixParticipantList->find(replaceFrame.frame->id_);
-
-                    // When a frame is pushed to |activeList| it is also pushed
-                    // to mixParticipantList with the frame's id. This means
-                    // that the Find call above should never fail.
-                    assert(it != mixParticipantList->end());
-                    replaceWasMixed = it->second->_mixHistory->WasMixed();
-
-                    mixParticipantList->erase(replaceFrame.frame->id_);
-                    activeList.erase(replaceItem);
-
-                    activeList.push_front(FrameAndMuteInfo(audioFrame, muted));
-                    (*mixParticipantList)[audioFrame->id_] = *participant;
-                    assert(mixParticipantList->size() <=
-                           kMaximumAmountOfMixedParticipants);
-
-                    if (replaceWasMixed) {
-                      if (!replaceFrame.muted) {
-                        RampOut(*replaceFrame.frame);
-                      }
-                      rampOutList->push_back(replaceFrame);
-                      assert(rampOutList->size() <=
-                             kMaximumAmountOfMixedParticipants);
-                    } else {
-                      _audioFramePool->PushMemory(replaceFrame.frame);
-                    }
-                } else {
-                    if(wasMixed) {
-                        if (!muted) {
-                            RampOut(*audioFrame);
-                        }
-                        rampOutList->push_back(FrameAndMuteInfo(audioFrame,
-                                                                muted));
-                        assert(rampOutList->size() <=
-                               kMaximumAmountOfMixedParticipants);
-                    } else {
-                        _audioFramePool->PushMemory(audioFrame);
-                    }
-                }
-            } else {
-                activeList.push_front(FrameAndMuteInfo(audioFrame, muted));
-                (*mixParticipantList)[audioFrame->id_] = *participant;
-                assert(mixParticipantList->size() <=
-                       kMaximumAmountOfMixedParticipants);
-            }
-        } else {
-            if(wasMixed) {
-                ParticipantFrameStruct* part_struct =
-                    new ParticipantFrameStruct(*participant, audioFrame, muted);
-                passiveWasMixedList.push_back(part_struct);
-            } else if(mustAddToPassiveList) {
-                if (!muted) {
-                    RampIn(*audioFrame);
-                }
-                ParticipantFrameStruct* part_struct =
-                    new ParticipantFrameStruct(*participant, audioFrame, muted);
-                passiveWasNotMixedList.push_back(part_struct);
-            } else {
-                _audioFramePool->PushMemory(audioFrame);
-            }
-        }
-    }
-    assert(activeList.size() <= *maxAudioFrameCounter);
-    // At this point it is known which participants should be mixed. Transfer
-    // this information to this functions output parameters.
-    for (AudioFrameList::const_iterator iter = activeList.begin();
-         iter != activeList.end();
-         ++iter) {
-        mixList->push_back(*iter);
-    }
-    activeList.clear();
-    // Always mix a constant number of AudioFrames. If there aren't enough
-    // active participants mix passive ones. Starting with those that was mixed
-    // last iteration.
-    for (ParticipantFrameStructList::const_iterator
-        iter = passiveWasMixedList.begin(); iter != passiveWasMixedList.end();
-         ++iter) {
-        if(mixList->size() < *maxAudioFrameCounter + mixListStartSize) {
-            mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame,
-                                                (*iter)->muted));
-            (*mixParticipantList)[(*iter)->audioFrame->id_] =
-                (*iter)->participant;
-            assert(mixParticipantList->size() <=
-                   kMaximumAmountOfMixedParticipants);
-        } else {
-            _audioFramePool->PushMemory((*iter)->audioFrame);
-        }
-        delete *iter;
-    }
-    // And finally the ones that have not been mixed for a while.
-    for (ParticipantFrameStructList::const_iterator iter =
-             passiveWasNotMixedList.begin();
-         iter != passiveWasNotMixedList.end();
-         ++iter) {
-        if(mixList->size() <  *maxAudioFrameCounter + mixListStartSize) {
-          mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame,
-                                              (*iter)->muted));
-            (*mixParticipantList)[(*iter)->audioFrame->id_] =
-                (*iter)->participant;
-            assert(mixParticipantList->size() <=
-                   kMaximumAmountOfMixedParticipants);
-        } else {
-            _audioFramePool->PushMemory((*iter)->audioFrame);
-        }
-        delete *iter;
-    }
-    assert(*maxAudioFrameCounter + mixListStartSize >= mixList->size());
-    *maxAudioFrameCounter += mixListStartSize - mixList->size();
-}
-
-void AudioConferenceMixerImpl::GetAdditionalAudio(
-    AudioFrameList* additionalFramesList) const {
-    LOG(LS_VERBOSE) << "GetAdditionalAudio(additionalFramesList)";
-    // The GetAudioFrameWithMuted() callback may result in the participant being
-    // removed from additionalParticipantList_. If that happens it will
-    // invalidate any iterators. Create a copy of the participants list such
-    // that the list of participants can be traversed safely.
-    MixerParticipantList additionalParticipantList;
-    additionalParticipantList.insert(additionalParticipantList.begin(),
-                                     _additionalParticipantList.begin(),
-                                     _additionalParticipantList.end());
-
-    for (MixerParticipantList::const_iterator participant =
-             additionalParticipantList.begin();
-         participant != additionalParticipantList.end();
-         ++participant) {
-        AudioFrame* audioFrame = NULL;
-        if(_audioFramePool->PopMemory(audioFrame) == -1) {
-          LOG(LS_ERROR) << "failed PopMemory() call";
-            assert(false);
-            return;
-        }
-        audioFrame->sample_rate_hz_ = _outputFrequency;
-        auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame);
-        if (ret == MixerParticipant::AudioFrameInfo::kError) {
-            LOG(LS_WARNING)
-                << "failed to GetAudioFrameWithMuted() from participant";
-            _audioFramePool->PushMemory(audioFrame);
-            continue;
-        }
-        if(audioFrame->samples_per_channel_ == 0) {
-            // Empty frame. Don't use it.
-            _audioFramePool->PushMemory(audioFrame);
-            continue;
-        }
-        additionalFramesList->push_back(FrameAndMuteInfo(
-            audioFrame, ret == MixerParticipant::AudioFrameInfo::kMuted));
-    }
-}
-
-void AudioConferenceMixerImpl::UpdateMixedStatus(
-    const std::map<int, MixerParticipant*>& mixedParticipantsMap) const {
-    LOG(LS_VERBOSE) << "UpdateMixedStatus(mixedParticipantsMap)";
-    assert(mixedParticipantsMap.size() <= kMaximumAmountOfMixedParticipants);
-
-    // Loop through all participants. If they are in the mix map they
-    // were mixed.
-    for (MixerParticipantList::const_iterator
-        participant =_participantList.begin();
-        participant != _participantList.end();
-         ++participant) {
-        bool isMixed = false;
-        for (auto it = mixedParticipantsMap.begin();
-             it != mixedParticipantsMap.end();
-             ++it) {
-          if (it->second == *participant) {
-            isMixed = true;
-            break;
-          }
-        }
-        (*participant)->_mixHistory->SetIsMixed(isMixed);
-    }
-}
-
-void AudioConferenceMixerImpl::ClearAudioFrameList(
-    AudioFrameList* audioFrameList) const {
-    LOG(LS_VERBOSE) << "ClearAudioFrameList(audioFrameList)";
-    for (AudioFrameList::iterator iter = audioFrameList->begin();
-         iter != audioFrameList->end();
-         ++iter) {
-        _audioFramePool->PushMemory(iter->frame);
-    }
-    audioFrameList->clear();
-}
-
-bool AudioConferenceMixerImpl::IsParticipantInList(
-    const MixerParticipant& participant,
-    const MixerParticipantList& participantList) const {
-    LOG(LS_VERBOSE) << "IsParticipantInList(participant,participantList)";
-    for (MixerParticipantList::const_iterator iter = participantList.begin();
-         iter != participantList.end();
-         ++iter) {
-        if(&participant == *iter) {
-            return true;
-        }
-    }
-    return false;
-}
-
-bool AudioConferenceMixerImpl::AddParticipantToList(
-    MixerParticipant* participant,
-    MixerParticipantList* participantList) const {
-    LOG(LS_VERBOSE) << "AddParticipantToList(participant, participantList)";
-    participantList->push_back(participant);
-    // Make sure that the mixed status is correct for new MixerParticipant.
-    participant->_mixHistory->ResetMixedStatus();
-    return true;
-}
-
-bool AudioConferenceMixerImpl::RemoveParticipantFromList(
-    MixerParticipant* participant,
-    MixerParticipantList* participantList) const {
-    LOG(LS_VERBOSE)
-        << "RemoveParticipantFromList(participant, participantList)";
-    for (MixerParticipantList::iterator iter = participantList->begin();
-         iter != participantList->end();
-         ++iter) {
-        if(*iter == participant) {
-            participantList->erase(iter);
-            // Participant is no longer mixed, reset to default.
-            participant->_mixHistory->ResetMixedStatus();
-            return true;
-        }
-    }
-    return false;
-}
-
-int32_t AudioConferenceMixerImpl::MixFromList(
-    AudioFrame* mixedAudio,
-    const AudioFrameList& audioFrameList) const {
-
-    LOG(LS_VERBOSE) << "MixFromList(mixedAudio, audioFrameList)";
-    if(audioFrameList.empty()) return 0;
-
-    uint32_t position = 0;
-
-    if (_numMixedParticipants == 1) {
-      mixedAudio->timestamp_ = audioFrameList.front().frame->timestamp_;
-      mixedAudio->elapsed_time_ms_ =
-          audioFrameList.front().frame->elapsed_time_ms_;
-    } else {
-      // TODO(wu): Issue 3390.
-      // Audio frame timestamp is only supported in one channel case.
-      mixedAudio->timestamp_ = 0;
-      mixedAudio->elapsed_time_ms_ = -1;
-    }
-
-    for (AudioFrameList::const_iterator iter = audioFrameList.begin();
-         iter != audioFrameList.end();
-         ++iter) {
-        if(position >= kMaximumAmountOfMixedParticipants) {
-            LOG(LS_ERROR) <<
-                "Trying to mix more than max amount of mixed participants:"
-                       << kMaximumAmountOfMixedParticipants << "!";
-            // Assert and avoid crash
-            assert(false);
-            position = 0;
-        }
-        if (!iter->muted) {
-          MixFrames(mixedAudio, iter->frame, use_limiter_);
-        }
-
-        position++;
-    }
-
-    return 0;
-}
-
-// TODO(andrew): consolidate this function with MixFromList.
-int32_t AudioConferenceMixerImpl::MixAnonomouslyFromList(
-    AudioFrame* mixedAudio,
-    const AudioFrameList& audioFrameList) const {
-    LOG(LS_VERBOSE) << "MixAnonomouslyFromList(mixedAudio, audioFrameList)";
-
-    if(audioFrameList.empty()) return 0;
-
-    for (AudioFrameList::const_iterator iter = audioFrameList.begin();
-         iter != audioFrameList.end();
-         ++iter) {
-        if (!iter->muted) {
-            MixFrames(mixedAudio, iter->frame, use_limiter_);
-        }
-    }
-    return 0;
-}
-
-bool AudioConferenceMixerImpl::LimitMixedAudio(AudioFrame* mixedAudio) const {
-    if (!use_limiter_) {
-      return true;
-    }
-
-    // Smoothly limit the mixed frame.
-    const int error = _limiter->ProcessStream(mixedAudio);
-
-    // And now we can safely restore the level. This procedure results in
-    // some loss of resolution, deemed acceptable.
-    //
-    // It's possible to apply the gain in the AGC (with a target level of 0 dbFS
-    // and compression gain of 6 dB). However, in the transition frame when this
-    // is enabled (moving from one to two participants) it has the potential to
-    // create discontinuities in the mixed frame.
-    //
-    // Instead we double the frame (with addition since left-shifting a
-    // negative value is undefined).
-    AudioFrameOperations::Add(*mixedAudio, mixedAudio);
-
-    if(error != _limiter->kNoError) {
-        LOG(LS_ERROR) << "Error from AudioProcessing: " << error;
-        assert(false);
-        return false;
-    }
-    return true;
-}
-}  // namespace webrtc
diff --git a/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h b/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h
deleted file mode 100644
index e88cb19..0000000
--- a/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef MODULES_AUDIO_CONFERENCE_MIXER_SOURCE_AUDIO_CONFERENCE_MIXER_IMPL_H_
-#define MODULES_AUDIO_CONFERENCE_MIXER_SOURCE_AUDIO_CONFERENCE_MIXER_IMPL_H_
-
-#include <list>
-#include <map>
-#include <memory>
-
-#include "modules/audio_conference_mixer/include/audio_conference_mixer.h"
-#include "modules/audio_conference_mixer/source/memory_pool.h"
-#include "modules/audio_conference_mixer/source/time_scheduler.h"
-#include "modules/include/module_common_types.h"
-#include "rtc_base/criticalsection.h"
-#include "typedefs.h"  // NOLINT(build/include)
-
-namespace webrtc {
-class AudioProcessing;
-
-struct FrameAndMuteInfo {
-  FrameAndMuteInfo(AudioFrame* f, bool m) : frame(f), muted(m) {}
-  AudioFrame* frame;
-  bool muted;
-};
-
-typedef std::list<FrameAndMuteInfo> AudioFrameList;
-typedef std::list<MixerParticipant*> MixerParticipantList;
-
-// Cheshire cat implementation of MixerParticipant's non virtual functions.
-class MixHistory
-{
-public:
-    MixHistory();
-    ~MixHistory();
-
-    // Returns true if the participant is being mixed.
-    bool IsMixed() const;
-
-    // Returns true if the participant was mixed previous mix
-    // iteration.
-    bool WasMixed() const;
-
-    // Updates the mixed status.
-    int32_t SetIsMixed(bool mixed);
-
-    void ResetMixedStatus();
-private:
-    bool _isMixed;
-};
-
-class AudioConferenceMixerImpl : public AudioConferenceMixer
-{
-public:
-    // AudioProcessing only accepts 10 ms frames.
-    enum {kProcessPeriodicityInMs = 10};
-
-    AudioConferenceMixerImpl(int id);
-    ~AudioConferenceMixerImpl();
-
-    // Must be called after ctor.
-    bool Init();
-
-    // Module functions
-    int64_t TimeUntilNextProcess() override;
-    void Process() override;
-
-    // AudioConferenceMixer functions
-    int32_t RegisterMixedStreamCallback(
-        AudioMixerOutputReceiver* mixReceiver) override;
-    int32_t UnRegisterMixedStreamCallback() override;
-    int32_t SetMixabilityStatus(MixerParticipant* participant,
-                                bool mixable) override;
-    bool MixabilityStatus(const MixerParticipant& participant) const override;
-    int32_t SetMinimumMixingFrequency(Frequency freq) override;
-    int32_t SetAnonymousMixabilityStatus(
-        MixerParticipant* participant, bool mixable) override;
-    bool AnonymousMixabilityStatus(
-        const MixerParticipant& participant) const override;
-
-private:
-    enum{DEFAULT_AUDIO_FRAME_POOLSIZE = 50};
-
-    // Set/get mix frequency
-    int32_t SetOutputFrequency(const Frequency& frequency);
-    Frequency OutputFrequency() const;
-
-    // Fills mixList with the AudioFrames pointers that should be used when
-    // mixing.
-    // maxAudioFrameCounter both input and output specifies how many more
-    // AudioFrames that are allowed to be mixed.
-    // rampOutList contain AudioFrames corresponding to an audio stream that
-    // used to be mixed but shouldn't be mixed any longer. These AudioFrames
-    // should be ramped out over this AudioFrame to avoid audio discontinuities.
-    void UpdateToMix(
-        AudioFrameList* mixList,
-        AudioFrameList* rampOutList,
-        std::map<int, MixerParticipant*>* mixParticipantList,
-        size_t* maxAudioFrameCounter) const;
-
-    // Return the lowest mixing frequency that can be used without having to
-    // downsample any audio.
-    int32_t GetLowestMixingFrequency() const;
-    int32_t GetLowestMixingFrequencyFromList(
-        const MixerParticipantList& mixList) const;
-
-    // Return the AudioFrames that should be mixed anonymously.
-    void GetAdditionalAudio(AudioFrameList* additionalFramesList) const;
-
-    // Update the MixHistory of all MixerParticipants. mixedParticipantsList
-    // should contain a map of MixerParticipants that have been mixed.
-    void UpdateMixedStatus(
-        const std::map<int, MixerParticipant*>& mixedParticipantsList) const;
-
-    // Clears audioFrameList and reclaims all memory associated with it.
-    void ClearAudioFrameList(AudioFrameList* audioFrameList) const;
-
-    // This function returns true if it finds the MixerParticipant in the
-    // specified list of MixerParticipants.
-    bool IsParticipantInList(const MixerParticipant& participant,
-                             const MixerParticipantList& participantList) const;
-
-    // Add/remove the MixerParticipant to the specified
-    // MixerParticipant list.
-    bool AddParticipantToList(
-        MixerParticipant* participant,
-        MixerParticipantList* participantList) const;
-    bool RemoveParticipantFromList(
-        MixerParticipant* removeParticipant,
-        MixerParticipantList* participantList) const;
-
-    // Mix the AudioFrames stored in audioFrameList into mixedAudio.
-    int32_t MixFromList(AudioFrame* mixedAudio,
-                        const AudioFrameList& audioFrameList) const;
-
-    // Mix the AudioFrames stored in audioFrameList into mixedAudio. No
-    // record will be kept of this mix (e.g. the corresponding MixerParticipants
-    // will not be marked as IsMixed()
-    int32_t MixAnonomouslyFromList(AudioFrame* mixedAudio,
-                                   const AudioFrameList& audioFrameList) const;
-
-    bool LimitMixedAudio(AudioFrame* mixedAudio) const;
-
-    rtc::CriticalSection _crit;
-    rtc::CriticalSection _cbCrit;
-
-    int32_t _id;
-
-    Frequency _minimumMixingFreq;
-
-    // Mix result callback
-    AudioMixerOutputReceiver* _mixReceiver;
-
-    // The current sample frequency and sample size when mixing.
-    Frequency _outputFrequency;
-    size_t _sampleSize;
-
-    // Memory pool to avoid allocating/deallocating AudioFrames
-    MemoryPool<AudioFrame>* _audioFramePool;
-
-    // List of all participants. Note all lists are disjunct
-    MixerParticipantList _participantList;              // May be mixed.
-    // Always mixed, anonomously.
-    MixerParticipantList _additionalParticipantList;
-
-    size_t _numMixedParticipants;
-    // Determines if we will use a limiter for clipping protection during
-    // mixing.
-    bool use_limiter_;
-
-    uint32_t _timeStamp;
-
-    // Metronome class.
-    TimeScheduler _timeScheduler;
-
-    // Counter keeping track of concurrent calls to process.
-    // Note: should never be higher than 1 or lower than 0.
-    int16_t _processCalls;
-
-    // Used for inhibiting saturation in mixing.
-    std::unique_ptr<AudioProcessing> _limiter;
-};
-}  // namespace webrtc
-
-#endif // MODULES_AUDIO_CONFERENCE_MIXER_SOURCE_AUDIO_CONFERENCE_MIXER_IMPL_H_
diff --git a/modules/audio_conference_mixer/source/audio_frame_manipulator.cc b/modules/audio_conference_mixer/source/audio_frame_manipulator.cc
deleted file mode 100644
index a16afb7..0000000
--- a/modules/audio_conference_mixer/source/audio_frame_manipulator.cc
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "modules/audio_conference_mixer/source/audio_frame_manipulator.h"
-#include "modules/include/module_common_types.h"
-#include "typedefs.h"  // NOLINT(build/include)
-
-namespace {
-// Linear ramping over 80 samples.
-// TODO(hellner): ramp using fix point?
-const float rampArray[] = {0.0000f, 0.0127f, 0.0253f, 0.0380f,
-                           0.0506f, 0.0633f, 0.0759f, 0.0886f,
-                           0.1013f, 0.1139f, 0.1266f, 0.1392f,
-                           0.1519f, 0.1646f, 0.1772f, 0.1899f,
-                           0.2025f, 0.2152f, 0.2278f, 0.2405f,
-                           0.2532f, 0.2658f, 0.2785f, 0.2911f,
-                           0.3038f, 0.3165f, 0.3291f, 0.3418f,
-                           0.3544f, 0.3671f, 0.3797f, 0.3924f,
-                           0.4051f, 0.4177f, 0.4304f, 0.4430f,
-                           0.4557f, 0.4684f, 0.4810f, 0.4937f,
-                           0.5063f, 0.5190f, 0.5316f, 0.5443f,
-                           0.5570f, 0.5696f, 0.5823f, 0.5949f,
-                           0.6076f, 0.6203f, 0.6329f, 0.6456f,
-                           0.6582f, 0.6709f, 0.6835f, 0.6962f,
-                           0.7089f, 0.7215f, 0.7342f, 0.7468f,
-                           0.7595f, 0.7722f, 0.7848f, 0.7975f,
-                           0.8101f, 0.8228f, 0.8354f, 0.8481f,
-                           0.8608f, 0.8734f, 0.8861f, 0.8987f,
-                           0.9114f, 0.9241f, 0.9367f, 0.9494f,
-                           0.9620f, 0.9747f, 0.9873f, 1.0000f};
-const size_t rampSize = sizeof(rampArray)/sizeof(rampArray[0]);
-}  // namespace
-
-namespace webrtc {
-uint32_t CalculateEnergy(const AudioFrame& audioFrame)
-{
-    if (audioFrame.muted()) return 0;
-
-    uint32_t energy = 0;
-    const int16_t* frame_data = audioFrame.data();
-    for(size_t position = 0; position < audioFrame.samples_per_channel_;
-        position++)
-    {
-        // TODO(andrew): this can easily overflow.
-        energy += frame_data[position] * frame_data[position];
-    }
-    return energy;
-}
-
-void RampIn(AudioFrame& audioFrame)
-{
-    assert(rampSize <= audioFrame.samples_per_channel_);
-    if (audioFrame.muted()) return;
-
-    int16_t* frame_data = audioFrame.mutable_data();
-    for(size_t i = 0; i < rampSize; i++)
-    {
-        frame_data[i] = static_cast<int16_t>(rampArray[i] * frame_data[i]);
-    }
-}
-
-void RampOut(AudioFrame& audioFrame)
-{
-    assert(rampSize <= audioFrame.samples_per_channel_);
-    if (audioFrame.muted()) return;
-
-    int16_t* frame_data = audioFrame.mutable_data();
-    for(size_t i = 0; i < rampSize; i++)
-    {
-        const size_t rampPos = rampSize - 1 - i;
-        frame_data[i] = static_cast<int16_t>(rampArray[rampPos] *
-                                             frame_data[i]);
-    }
-    memset(&frame_data[rampSize], 0,
-           (audioFrame.samples_per_channel_ - rampSize) *
-           sizeof(frame_data[0]));
-}
-}  // namespace webrtc
diff --git a/modules/audio_conference_mixer/source/audio_frame_manipulator.h b/modules/audio_conference_mixer/source/audio_frame_manipulator.h
deleted file mode 100644
index f8fe2d8..0000000
--- a/modules/audio_conference_mixer/source/audio_frame_manipulator.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef MODULES_AUDIO_CONFERENCE_MIXER_SOURCE_AUDIO_FRAME_MANIPULATOR_H_
-#define MODULES_AUDIO_CONFERENCE_MIXER_SOURCE_AUDIO_FRAME_MANIPULATOR_H_
-
-#include "typedefs.h"  // NOLINT(build/include)
-
-namespace webrtc {
-class AudioFrame;
-
-// Updates the audioFrame's energy (based on its samples).
-uint32_t CalculateEnergy(const AudioFrame& audioFrame);
-
-// Apply linear step function that ramps in/out the audio samples in audioFrame
-void RampIn(AudioFrame& audioFrame);
-void RampOut(AudioFrame& audioFrame);
-
-}  // namespace webrtc
-
-#endif // MODULES_AUDIO_CONFERENCE_MIXER_SOURCE_AUDIO_FRAME_MANIPULATOR_H_
diff --git a/modules/audio_conference_mixer/source/memory_pool.h b/modules/audio_conference_mixer/source/memory_pool.h
deleted file mode 100644
index 4e93596..0000000
--- a/modules/audio_conference_mixer/source/memory_pool.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef MODULES_AUDIO_CONFERENCE_MIXER_SOURCE_MEMORY_POOL_H_
-#define MODULES_AUDIO_CONFERENCE_MIXER_SOURCE_MEMORY_POOL_H_
-
-#include <assert.h>
-
-#include "typedefs.h"  // NOLINT(build/include)
-
-#ifdef _WIN32
-#include "modules/audio_conference_mixer/source/memory_pool_win.h"
-#else
-#include "modules/audio_conference_mixer/source/memory_pool_posix.h"
-#endif
-
-namespace webrtc {
-
-template<class MemoryType>
-class MemoryPool
-{
-public:
-    // Factory method, constructor disabled.
-    static int32_t CreateMemoryPool(MemoryPool*& memoryPool,
-                                    uint32_t initialPoolSize);
-
-    // Try to delete the memory pool. Fail with return value -1 if there is
-    // outstanding memory.
-    static int32_t DeleteMemoryPool(
-        MemoryPool*& memoryPool);
-
-    // Get/return unused memory.
-    int32_t PopMemory(MemoryType*&  memory);
-    int32_t PushMemory(MemoryType*& memory);
-private:
-    MemoryPool(int32_t initialPoolSize);
-    ~MemoryPool();
-
-    MemoryPoolImpl<MemoryType>* _ptrImpl;
-};
-
-template<class MemoryType>
-MemoryPool<MemoryType>::MemoryPool(int32_t initialPoolSize)
-{
-    _ptrImpl = new MemoryPoolImpl<MemoryType>(initialPoolSize);
-}
-
-template<class MemoryType>
-MemoryPool<MemoryType>::~MemoryPool()
-{
-    delete _ptrImpl;
-}
-
-template<class MemoryType> int32_t
-MemoryPool<MemoryType>::CreateMemoryPool(MemoryPool*&   memoryPool,
-                                         uint32_t initialPoolSize)
-{
-    memoryPool = new MemoryPool(initialPoolSize);
-    if(memoryPool == NULL)
-    {
-        return -1;
-    }
-    if(memoryPool->_ptrImpl == NULL)
-    {
-        delete memoryPool;
-        memoryPool = NULL;
-        return -1;
-    }
-    if(!memoryPool->_ptrImpl->Initialize())
-    {
-        delete memoryPool;
-        memoryPool = NULL;
-        return -1;
-    }
-    return 0;
-}
-
-template<class MemoryType>
-int32_t MemoryPool<MemoryType>::DeleteMemoryPool(MemoryPool*& memoryPool)
-{
-    if(memoryPool == NULL)
-    {
-        return -1;
-    }
-    if(memoryPool->_ptrImpl == NULL)
-    {
-        return -1;
-    }
-    if(memoryPool->_ptrImpl->Terminate() == -1)
-    {
-        return -1;
-    }
-    delete memoryPool;
-    memoryPool = NULL;
-    return 0;
-}
-
-template<class MemoryType>
-int32_t MemoryPool<MemoryType>::PopMemory(MemoryType*& memory)
-{
-    return _ptrImpl->PopMemory(memory);
-}
-
-template<class MemoryType>
-int32_t MemoryPool<MemoryType>::PushMemory(MemoryType*& memory)
-{
-    if(memory == NULL)
-    {
-        return -1;
-    }
-    return _ptrImpl->PushMemory(memory);
-}
-}  // namespace webrtc
-
-#endif // MODULES_AUDIO_CONFERENCE_MIXER_SOURCE_MEMORY_POOL_H_
diff --git a/modules/audio_conference_mixer/source/memory_pool_posix.h b/modules/audio_conference_mixer/source/memory_pool_posix.h
deleted file mode 100644
index 70e2bfd..0000000
--- a/modules/audio_conference_mixer/source/memory_pool_posix.h
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef MODULES_AUDIO_CONFERENCE_MIXER_SOURCE_MEMORY_POOL_GENERIC_H_
-#define MODULES_AUDIO_CONFERENCE_MIXER_SOURCE_MEMORY_POOL_GENERIC_H_
-
-#include <assert.h>
-#include <list>
-
-#include "rtc_base/criticalsection.h"
-#include "typedefs.h"  // NOLINT(build/include)
-
-namespace webrtc {
-template<class MemoryType>
-class MemoryPoolImpl
-{
-public:
-    // MemoryPool functions.
-    int32_t PopMemory(MemoryType*&  memory);
-    int32_t PushMemory(MemoryType*& memory);
-
-    MemoryPoolImpl(int32_t initialPoolSize);
-    ~MemoryPoolImpl();
-
-    // Atomic functions
-    int32_t Terminate();
-    bool Initialize();
-private:
-    // Non-atomic function.
-    int32_t CreateMemory(uint32_t amountToCreate);
-
-    rtc::CriticalSection _crit;
-
-    bool _terminate;
-
-    std::list<MemoryType*> _memoryPool;
-
-    uint32_t _initialPoolSize;
-    uint32_t _createdMemory;
-    uint32_t _outstandingMemory;
-};
-
-template<class MemoryType>
-MemoryPoolImpl<MemoryType>::MemoryPoolImpl(int32_t initialPoolSize)
-    : _terminate(false),
-      _initialPoolSize(initialPoolSize),
-      _createdMemory(0),
-      _outstandingMemory(0)
-{
-}
-
-template<class MemoryType>
-MemoryPoolImpl<MemoryType>::~MemoryPoolImpl()
-{
-    // Trigger assert if there is outstanding memory.
-    assert(_createdMemory == 0);
-    assert(_outstandingMemory == 0);
-}
-
-template<class MemoryType>
-int32_t MemoryPoolImpl<MemoryType>::PopMemory(MemoryType*& memory)
-{
-    rtc::CritScope cs(&_crit);
-    if(_terminate)
-    {
-        memory = NULL;
-        return -1;
-    }
-    if (_memoryPool.empty()) {
-        // _memoryPool empty create new memory.
-        CreateMemory(_initialPoolSize);
-        if(_memoryPool.empty())
-        {
-            memory = NULL;
-            return -1;
-        }
-    }
-    memory = _memoryPool.front();
-    _memoryPool.pop_front();
-    _outstandingMemory++;
-    return 0;
-}
-
-template<class MemoryType>
-int32_t MemoryPoolImpl<MemoryType>::PushMemory(MemoryType*& memory)
-{
-    if(memory == NULL)
-    {
-        return -1;
-    }
-    rtc::CritScope cs(&_crit);
-    _outstandingMemory--;
-    if(_memoryPool.size() > (_initialPoolSize << 1))
-    {
-        // Reclaim memory if less than half of the pool is unused.
-        _createdMemory--;
-        delete memory;
-        memory = NULL;
-        return 0;
-    }
-    _memoryPool.push_back(memory);
-    memory = NULL;
-    return 0;
-}
-
-template<class MemoryType>
-bool MemoryPoolImpl<MemoryType>::Initialize()
-{
-    rtc::CritScope cs(&_crit);
-    return CreateMemory(_initialPoolSize) == 0;
-}
-
-template<class MemoryType>
-int32_t MemoryPoolImpl<MemoryType>::Terminate()
-{
-    rtc::CritScope cs(&_crit);
-    assert(_createdMemory == _outstandingMemory + _memoryPool.size());
-
-    _terminate = true;
-    // Reclaim all memory.
-    while(_createdMemory > 0)
-    {
-        MemoryType* memory = _memoryPool.front();
-        _memoryPool.pop_front();
-        delete memory;
-        _createdMemory--;
-    }
-    return 0;
-}
-
-template<class MemoryType>
-int32_t MemoryPoolImpl<MemoryType>::CreateMemory(
-    uint32_t amountToCreate)
-{
-    for(uint32_t i = 0; i < amountToCreate; i++)
-    {
-        MemoryType* memory = new MemoryType();
-        if(memory == NULL)
-        {
-            return -1;
-        }
-        _memoryPool.push_back(memory);
-        _createdMemory++;
-    }
-    return 0;
-}
-}  // namespace webrtc
-
-#endif // MODULES_AUDIO_CONFERENCE_MIXER_SOURCE_MEMORY_POOL_GENERIC_H_
diff --git a/modules/audio_conference_mixer/source/memory_pool_win.h b/modules/audio_conference_mixer/source/memory_pool_win.h
deleted file mode 100644
index cce38aa..0000000
--- a/modules/audio_conference_mixer/source/memory_pool_win.h
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef MODULES_AUDIO_CONFERENCE_MIXER_SOURCE_MEMORY_POOL_WINDOWS_H_
-#define MODULES_AUDIO_CONFERENCE_MIXER_SOURCE_MEMORY_POOL_WINDOWS_H_
-
-#include <assert.h>
-#include <windows.h>
-
-#include "system_wrappers/include/aligned_malloc.h"
-#include "system_wrappers/include/atomic32.h"
-#include "typedefs.h"  // NOLINT(build/include)
-
-namespace webrtc {
-template<class MemoryType> struct MemoryPoolItem;
-
-template<class MemoryType>
-struct MemoryPoolItemPayload
-{
-    MemoryPoolItemPayload()
-        : memoryType(),
-          base(NULL)
-    {
-    }
-    MemoryType                  memoryType;
-    MemoryPoolItem<MemoryType>* base;
-};
-
-template<class MemoryType>
-struct MemoryPoolItem
-{
-    // Atomic single linked list entry header.
-    SLIST_ENTRY itemEntry;
-    // Atomic single linked list payload.
-    MemoryPoolItemPayload<MemoryType>* payload;
-};
-
-template<class MemoryType>
-class MemoryPoolImpl
-{
-public:
-    // MemoryPool functions.
-    int32_t PopMemory(MemoryType*&  memory);
-    int32_t PushMemory(MemoryType*& memory);
-
-    MemoryPoolImpl(int32_t /*initialPoolSize*/);
-    ~MemoryPoolImpl();
-
-    // Atomic functions.
-    int32_t Terminate();
-    bool Initialize();
-private:
-    // Non-atomic function.
-    MemoryPoolItem<MemoryType>* CreateMemory();
-
-    // Windows implementation of single linked atomic list, documented here:
-    // http://msdn.microsoft.com/en-us/library/ms686962(VS.85).aspx
-
-    // Atomic single linked list head.
-    PSLIST_HEADER _pListHead;
-
-    Atomic32 _createdMemory;
-    Atomic32 _outstandingMemory;
-};
-
-template<class MemoryType>
-MemoryPoolImpl<MemoryType>::MemoryPoolImpl(
-    int32_t /*initialPoolSize*/)
-    : _pListHead(NULL),
-      _createdMemory(0),
-      _outstandingMemory(0)
-{
-}
-
-template<class MemoryType>
-MemoryPoolImpl<MemoryType>::~MemoryPoolImpl()
-{
-    Terminate();
-    if(_pListHead != NULL)
-    {
-        AlignedFree(reinterpret_cast<void*>(_pListHead));
-        _pListHead = NULL;
-    }
-    // Trigger assert if there is outstanding memory.
-    assert(_createdMemory.Value() == 0);
-    assert(_outstandingMemory.Value() == 0);
-}
-
-template<class MemoryType>
-int32_t MemoryPoolImpl<MemoryType>::PopMemory(MemoryType*& memory)
-{
-    PSLIST_ENTRY pListEntry = InterlockedPopEntrySList(_pListHead);
-    if(pListEntry == NULL)
-    {
-        MemoryPoolItem<MemoryType>* item = CreateMemory();
-        if(item == NULL)
-        {
-            return -1;
-        }
-        pListEntry = &(item->itemEntry);
-    }
-    ++_outstandingMemory;
-    memory = &((MemoryPoolItem<MemoryType>*)pListEntry)->payload->memoryType;
-    return 0;
-}
-
-template<class MemoryType>
-int32_t MemoryPoolImpl<MemoryType>::PushMemory(MemoryType*& memory)
-{
-    if(memory == NULL)
-    {
-        return -1;
-    }
-
-    MemoryPoolItem<MemoryType>* item =
-        ((MemoryPoolItemPayload<MemoryType>*)memory)->base;
-
-    const int32_t usedItems  = --_outstandingMemory;
-    const int32_t totalItems = _createdMemory.Value();
-    const int32_t freeItems  = totalItems - usedItems;
-    if(freeItems < 0)
-    {
-        assert(false);
-        delete item->payload;
-        AlignedFree(item);
-        return -1;
-    }
-    if(freeItems >= totalItems>>1)
-    {
-        delete item->payload;
-        AlignedFree(item);
-        --_createdMemory;
-        return 0;
-    }
-    InterlockedPushEntrySList(_pListHead,&(item->itemEntry));
-    return 0;
-}
-
-template<class MemoryType>
-bool MemoryPoolImpl<MemoryType>::Initialize()
-{
-    _pListHead = (PSLIST_HEADER)AlignedMalloc(sizeof(SLIST_HEADER),
-                                              MEMORY_ALLOCATION_ALIGNMENT);
-    if(_pListHead == NULL)
-    {
-        return false;
-    }
-    InitializeSListHead(_pListHead);
-    return true;
-}
-
-template<class MemoryType>
-int32_t MemoryPoolImpl<MemoryType>::Terminate()
-{
-    int32_t itemsFreed = 0;
-    PSLIST_ENTRY pListEntry = InterlockedPopEntrySList(_pListHead);
-    while(pListEntry != NULL)
-    {
-        MemoryPoolItem<MemoryType>* item = ((MemoryPoolItem<MemoryType>*)pListEntry);
-        delete item->payload;
-        AlignedFree(item);
-        --_createdMemory;
-        itemsFreed++;
-        pListEntry = InterlockedPopEntrySList(_pListHead);
-    }
-    return itemsFreed;
-}
-
-template<class MemoryType>
-MemoryPoolItem<MemoryType>* MemoryPoolImpl<MemoryType>::CreateMemory()
-{
-    MemoryPoolItem<MemoryType>* returnValue = (MemoryPoolItem<MemoryType>*)
-        AlignedMalloc(sizeof(MemoryPoolItem<MemoryType>),
-                      MEMORY_ALLOCATION_ALIGNMENT);
-    if(returnValue == NULL)
-    {
-        return NULL;
-    }
-
-    returnValue->payload = new MemoryPoolItemPayload<MemoryType>();
-    if(returnValue->payload == NULL)
-    {
-        delete returnValue;
-        return NULL;
-    }
-    returnValue->payload->base = returnValue;
-    ++_createdMemory;
-    return returnValue;
-}
-}  // namespace webrtc
-
-#endif // MODULES_AUDIO_CONFERENCE_MIXER_SOURCE_MEMORY_POOL_WINDOWS_H_
diff --git a/modules/audio_conference_mixer/source/time_scheduler.cc b/modules/audio_conference_mixer/source/time_scheduler.cc
deleted file mode 100644
index 5a27b42..0000000
--- a/modules/audio_conference_mixer/source/time_scheduler.cc
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "modules/audio_conference_mixer/source/time_scheduler.h"
-#include "rtc_base/timeutils.h"
-
-namespace webrtc {
-TimeScheduler::TimeScheduler(const int64_t periodicityInMs)
-    : _isStarted(false),
-      _lastPeriodMark(),
-      _periodicityInMs(periodicityInMs),
-      _periodicityInTicks(periodicityInMs * rtc::kNumNanosecsPerMillisec),
-      _missedPeriods(0) {}
-
-int32_t TimeScheduler::UpdateScheduler() {
-    rtc::CritScope cs(&_crit);
-    if(!_isStarted)
-    {
-        _isStarted = true;
-        _lastPeriodMark = rtc::TimeNanos();
-        return 0;
-    }
-    // Don't perform any calculations until the debt of pending periods have
-    // been worked off.
-    if(_missedPeriods > 0)
-    {
-        _missedPeriods--;
-        return 0;
-    }
-
-    // Calculate the time that has past since previous call to this function.
-    int64_t tickNow = rtc::TimeNanos();
-    int64_t amassedTicks = tickNow - _lastPeriodMark;
-    int64_t amassedMs = amassedTicks / rtc::kNumNanosecsPerMillisec;
-
-    // Calculate the number of periods the time that has passed correspond to.
-    int64_t periodsToClaim = amassedMs / _periodicityInMs;
-
-    // One period will be worked off by this call. Make sure that the number of
-    // pending periods don't end up being negative (e.g. if this function is
-    // called to often).
-    if(periodsToClaim < 1)
-    {
-        periodsToClaim = 1;
-    }
-
-    // Update the last period mark without introducing any drifting.
-    // Note that if this fuunction is called to often _lastPeriodMark can
-    // refer to a time in the future which in turn will yield TimeToNextUpdate
-    // that is greater than the periodicity
-    for(int64_t i = 0; i < periodsToClaim; i++)
-    {
-        _lastPeriodMark += _periodicityInTicks;
-    }
-
-    // Update the total amount of missed periods note that we have processed
-    // one period hence the - 1
-    _missedPeriods += periodsToClaim - 1;
-    return 0;
-}
-
-int32_t TimeScheduler::TimeToNextUpdate(
-    int64_t& updateTimeInMS) const
-{
-    rtc::CritScope cs(&_crit);
-    // Missed periods means that the next UpdateScheduler() should happen
-    // immediately.
-    if(_missedPeriods > 0)
-    {
-        updateTimeInMS = 0;
-        return 0;
-    }
-
-    // Calculate the time (in ms) that has past since last call to
-    // UpdateScheduler()
-    int64_t tickNow = rtc::TimeNanos();
-    int64_t ticksSinceLastUpdate = tickNow - _lastPeriodMark;
-    const int64_t millisecondsSinceLastUpdate =
-      ticksSinceLastUpdate / rtc::kNumNanosecsPerMillisec;
-
-    updateTimeInMS = _periodicityInMs - millisecondsSinceLastUpdate;
-    updateTimeInMS =  (updateTimeInMS < 0) ? 0 : updateTimeInMS;
-    return 0;
-}
-}  // namespace webrtc
diff --git a/modules/audio_conference_mixer/source/time_scheduler.h b/modules/audio_conference_mixer/source/time_scheduler.h
deleted file mode 100644
index 1580861..0000000
--- a/modules/audio_conference_mixer/source/time_scheduler.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-// The TimeScheduler class keeps track of periodic events. It is non-drifting
-// and keeps track of any missed periods so that it is possible to catch up.
-// (compare to a metronome)
-#include "rtc_base/criticalsection.h"
-
-#ifndef MODULES_AUDIO_CONFERENCE_MIXER_SOURCE_TIME_SCHEDULER_H_
-#define MODULES_AUDIO_CONFERENCE_MIXER_SOURCE_TIME_SCHEDULER_H_
-
-namespace webrtc {
-
-class TimeScheduler {
-public:
-    TimeScheduler(const int64_t periodicityInMs);
-    ~TimeScheduler() = default;
-
-    // Signal that a periodic event has been triggered.
-    int32_t UpdateScheduler();
-
-    // Set updateTimeInMs to the amount of time until UpdateScheduler() should
-    // be called. This time will never be negative.
-    int32_t TimeToNextUpdate(int64_t& updateTimeInMS) const;
-
-private:
-    rtc::CriticalSection _crit;
-
-    bool _isStarted;
-    int64_t _lastPeriodMark; // In ns
-
-    int64_t _periodicityInMs;
-    int64_t _periodicityInTicks;
-    uint32_t _missedPeriods;
-};
-}  // namespace webrtc
-
-#endif // MODULES_AUDIO_CONFERENCE_MIXER_SOURCE_TIME_SCHEDULER_H_
diff --git a/modules/audio_conference_mixer/test/audio_conference_mixer_unittest.cc b/modules/audio_conference_mixer/test/audio_conference_mixer_unittest.cc
deleted file mode 100644
index 6685899..0000000
--- a/modules/audio_conference_mixer/test/audio_conference_mixer_unittest.cc
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <memory>
-
-#include "modules/audio_conference_mixer/include/audio_conference_mixer.h"
-#include "modules/audio_conference_mixer/include/audio_conference_mixer_defines.h"
-#include "test/gmock.h"
-
-namespace webrtc {
-
-using testing::_;
-using testing::AtLeast;
-using testing::Invoke;
-using testing::Return;
-
-class MockAudioMixerOutputReceiver : public AudioMixerOutputReceiver {
- public:
-  MOCK_METHOD4(NewMixedAudio, void(const int32_t id,
-                                   const AudioFrame& general_audio_frame,
-                                   const AudioFrame** unique_audio_frames,
-                                   const uint32_t size));
-};
-
-class MockMixerParticipant : public MixerParticipant {
- public:
-  MockMixerParticipant() {
-    ON_CALL(*this, GetAudioFrame(_, _))
-        .WillByDefault(Invoke(this, &MockMixerParticipant::FakeAudioFrame));
-  }
-  MOCK_METHOD2(GetAudioFrame,
-               int32_t(const int32_t id, AudioFrame* audio_frame));
-  MOCK_CONST_METHOD1(NeededFrequency, int32_t(const int32_t id));
-  AudioFrame* fake_frame() { return &fake_frame_; }
-
- private:
-  AudioFrame fake_frame_;
-  int32_t FakeAudioFrame(const int32_t id, AudioFrame* audio_frame) {
-    audio_frame->CopyFrom(fake_frame_);
-    return 0;
-  }
-};
-
-TEST(AudioConferenceMixer, AnonymousAndNamed) {
-  const int kId = 1;
-  // Should not matter even if partipants are more than
-  // kMaximumAmountOfMixedParticipants.
-  const int kNamed =
-      AudioConferenceMixer::kMaximumAmountOfMixedParticipants + 1;
-  const int kAnonymous =
-      AudioConferenceMixer::kMaximumAmountOfMixedParticipants + 1;
-
-  std::unique_ptr<AudioConferenceMixer> mixer(
-      AudioConferenceMixer::Create(kId));
-
-  MockMixerParticipant named[kNamed];
-  MockMixerParticipant anonymous[kAnonymous];
-
-  for (int i = 0; i < kNamed; ++i) {
-    EXPECT_EQ(0, mixer->SetMixabilityStatus(&named[i], true));
-    EXPECT_TRUE(mixer->MixabilityStatus(named[i]));
-  }
-
-  for (int i = 0; i < kAnonymous; ++i) {
-    // Participant must be registered before turning it into anonymous.
-    EXPECT_EQ(-1, mixer->SetAnonymousMixabilityStatus(&anonymous[i], true));
-    EXPECT_EQ(0, mixer->SetMixabilityStatus(&anonymous[i], true));
-    EXPECT_TRUE(mixer->MixabilityStatus(anonymous[i]));
-    EXPECT_FALSE(mixer->AnonymousMixabilityStatus(anonymous[i]));
-
-    EXPECT_EQ(0, mixer->SetAnonymousMixabilityStatus(&anonymous[i], true));
-    EXPECT_TRUE(mixer->AnonymousMixabilityStatus(anonymous[i]));
-
-    // Anonymous participants do not show status by MixabilityStatus.
-    EXPECT_FALSE(mixer->MixabilityStatus(anonymous[i]));
-  }
-
-  for (int i = 0; i < kNamed; ++i) {
-    EXPECT_EQ(0, mixer->SetMixabilityStatus(&named[i], false));
-    EXPECT_FALSE(mixer->MixabilityStatus(named[i]));
-  }
-
-  for (int i = 0; i < kAnonymous - 1; i++) {
-    EXPECT_EQ(0, mixer->SetAnonymousMixabilityStatus(&anonymous[i], false));
-    EXPECT_FALSE(mixer->AnonymousMixabilityStatus(anonymous[i]));
-
-    // SetAnonymousMixabilityStatus(anonymous, false) moves anonymous to the
-    // named group.
-    EXPECT_TRUE(mixer->MixabilityStatus(anonymous[i]));
-  }
-
-  // SetMixabilityStatus(anonymous, false) will remove anonymous from both
-  // anonymous and named groups.
-  EXPECT_EQ(0, mixer->SetMixabilityStatus(&anonymous[kAnonymous - 1], false));
-  EXPECT_FALSE(mixer->AnonymousMixabilityStatus(anonymous[kAnonymous - 1]));
-  EXPECT_FALSE(mixer->MixabilityStatus(anonymous[kAnonymous - 1]));
-}
-
-TEST(AudioConferenceMixer, LargestEnergyVadActiveMixed) {
-  const int kId = 1;
-  const int kParticipants =
-      AudioConferenceMixer::kMaximumAmountOfMixedParticipants + 3;
-  const int kSampleRateHz = 32000;
-
-  std::unique_ptr<AudioConferenceMixer> mixer(
-      AudioConferenceMixer::Create(kId));
-
-  MockAudioMixerOutputReceiver output_receiver;
-  EXPECT_EQ(0, mixer->RegisterMixedStreamCallback(&output_receiver));
-
-  MockMixerParticipant participants[kParticipants];
-
-  for (int i = 0; i < kParticipants; ++i) {
-    participants[i].fake_frame()->id_ = i;
-    participants[i].fake_frame()->sample_rate_hz_ = kSampleRateHz;
-    participants[i].fake_frame()->speech_type_ =  AudioFrame::kNormalSpeech;
-    participants[i].fake_frame()->vad_activity_ = AudioFrame::kVadActive;
-    participants[i].fake_frame()->num_channels_ = 1;
-
-    // Frame duration 10ms.
-    participants[i].fake_frame()->samples_per_channel_ = kSampleRateHz / 100;
-
-    // We set the 80-th sample value since the first 80 samples may be
-    // modified by a ramped-in window.
-    participants[i].fake_frame()->mutable_data()[80] = i;
-
-    EXPECT_EQ(0, mixer->SetMixabilityStatus(&participants[i], true));
-    EXPECT_CALL(participants[i], GetAudioFrame(_, _))
-        .Times(AtLeast(1));
-    EXPECT_CALL(participants[i], NeededFrequency(_))
-        .WillRepeatedly(Return(kSampleRateHz));
-  }
-
-  // Last participant gives audio frame with passive VAD, although it has the
-  // largest energy.
-  participants[kParticipants - 1].fake_frame()->vad_activity_ =
-      AudioFrame::kVadPassive;
-
-  EXPECT_CALL(output_receiver, NewMixedAudio(_, _, _, _))
-      .Times(AtLeast(1));
-
-  mixer->Process();
-
-  for (int i = 0; i < kParticipants; ++i) {
-    bool is_mixed = participants[i].IsMixed();
-    if (i == kParticipants - 1 || i < kParticipants - 1 -
-        AudioConferenceMixer::kMaximumAmountOfMixedParticipants) {
-      EXPECT_FALSE(is_mixed) << "Mixing status of Participant #"
-                             << i << " wrong.";
-    } else {
-      EXPECT_TRUE(is_mixed) << "Mixing status of Participant #"
-                            << i << " wrong.";
-    }
-  }
-
-  EXPECT_EQ(0, mixer->UnRegisterMixedStreamCallback());
-}
-
-}  // namespace webrtc
diff --git a/modules/audio_mixer/DEPS b/modules/audio_mixer/DEPS
index a076950..51608ad 100644
--- a/modules/audio_mixer/DEPS
+++ b/modules/audio_mixer/DEPS
@@ -3,7 +3,6 @@
   "+call",
   "+common_audio",
   "+modules/audio_coding",
-  "+modules/audio_conference_mixer",
   "+modules/audio_device",
   "+modules/audio_processing",
   "+modules/media_file",
diff --git a/test/mock_voe_channel_proxy.h b/test/mock_voe_channel_proxy.h
index 77fb171..0778bda 100644
--- a/test/mock_voe_channel_proxy.h
+++ b/test/mock_voe_channel_proxy.h
@@ -77,7 +77,7 @@
   MOCK_METHOD2(GetAudioFrameWithInfo,
       AudioMixer::Source::AudioFrameInfo(int sample_rate_hz,
                                          AudioFrame* audio_frame));
-  MOCK_CONST_METHOD0(NeededFrequency, int());
+  MOCK_CONST_METHOD0(PreferredSampleRate, int());
   MOCK_METHOD1(SetTransportOverhead, void(int transport_overhead_per_packet));
   MOCK_METHOD1(AssociateSendChannel,
                void(const ChannelProxy& send_channel_proxy));
diff --git a/voice_engine/BUILD.gn b/voice_engine/BUILD.gn
index b9af474..444c5fd 100644
--- a/voice_engine/BUILD.gn
+++ b/voice_engine/BUILD.gn
@@ -19,8 +19,6 @@
     "include/voe_base.h",
     "include/voe_errors.h",
     "monitor_module.h",
-    "output_mixer.cc",
-    "output_mixer.h",
     "shared_data.cc",
     "shared_data.h",
     "statistics.cc",
@@ -74,7 +72,6 @@
     "../modules:module_api",
     "../modules/audio_coding:audio_format_conversion",
     "../modules/audio_coding:rent_a_codec",
-    "../modules/audio_conference_mixer",
     "../modules/audio_device",
     "../modules/audio_processing",
     "../modules/bitrate_controller",
@@ -109,7 +106,6 @@
       "../common_audio",
       "../modules:module_api",
       "../modules/audio_coding",
-      "../modules/audio_conference_mixer",
       "../modules/audio_device",
       "../modules/audio_processing",
       "../modules/media_file",
diff --git a/voice_engine/DEPS b/voice_engine/DEPS
index 496bf29..c8e9a1c 100644
--- a/voice_engine/DEPS
+++ b/voice_engine/DEPS
@@ -4,7 +4,6 @@
   "+common_audio",
   "+logging/rtc_event_log",
   "+modules/audio_coding",
-  "+modules/audio_conference_mixer",
   "+modules/audio_device",
   "+modules/audio_processing",
   "+modules/media_file",
diff --git a/voice_engine/channel.cc b/voice_engine/channel.cc
index 85b14fd..aaca65a 100644
--- a/voice_engine/channel.cc
+++ b/voice_engine/channel.cc
@@ -39,7 +39,6 @@
 #include "rtc_base/timeutils.h"
 #include "system_wrappers/include/field_trial.h"
 #include "system_wrappers/include/trace.h"
-#include "voice_engine/output_mixer.h"
 #include "voice_engine/statistics.h"
 #include "voice_engine/utility.h"
 
@@ -619,15 +618,17 @@
   return ReceivePacket(rtp_packet, rtp_packet_length, header, false);
 }
 
-MixerParticipant::AudioFrameInfo Channel::GetAudioFrameWithMuted(
-    int32_t id,
-    AudioFrame* audioFrame) {
+AudioMixer::Source::AudioFrameInfo Channel::GetAudioFrameWithInfo(
+    int sample_rate_hz,
+    AudioFrame* audio_frame) {
+  audio_frame->sample_rate_hz_ = sample_rate_hz;
+
   unsigned int ssrc;
   RTC_CHECK_EQ(GetRemoteSSRC(ssrc), 0);
   event_log_proxy_->LogAudioPlayout(ssrc);
   // Get 10ms raw PCM data from the ACM (mixer limits output frequency)
   bool muted;
-  if (audio_coding_->PlayoutData10Ms(audioFrame->sample_rate_hz_, audioFrame,
+  if (audio_coding_->PlayoutData10Ms(audio_frame->sample_rate_hz_, audio_frame,
                                      &muted) == -1) {
     WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, _channelId),
                  "Channel::GetAudioFrame() PlayoutData10Ms() failed!");
@@ -635,20 +636,20 @@
     // error so that the audio mixer module doesn't add it to the mix. As
     // a result, it won't be played out and the actions skipped here are
     // irrelevant.
-    return MixerParticipant::AudioFrameInfo::kError;
+    return AudioMixer::Source::AudioFrameInfo::kError;
   }
 
   if (muted) {
     // TODO(henrik.lundin): We should be able to do better than this. But we
     // will have to go through all the cases below where the audio samples may
     // be used, and handle the muted case in some way.
-    AudioFrameOperations::Mute(audioFrame);
+    AudioFrameOperations::Mute(audio_frame);
   }
 
   // Convert module ID to internal VoE channel ID
-  audioFrame->id_ = VoEChannelId(audioFrame->id_);
+  audio_frame->id_ = VoEChannelId(audio_frame->id_);
   // Store speech type for dead-or-alive detection
-  _outputSpeechType = audioFrame->speech_type_;
+  _outputSpeechType = audio_frame->speech_type_;
 
   {
     // Pass the audio buffers to an optional sink callback, before applying
@@ -658,9 +659,9 @@
     rtc::CritScope cs(&_callbackCritSect);
     if (audio_sink_) {
       AudioSinkInterface::Data data(
-          audioFrame->data(), audioFrame->samples_per_channel_,
-          audioFrame->sample_rate_hz_, audioFrame->num_channels_,
-          audioFrame->timestamp_);
+          audio_frame->data(), audio_frame->samples_per_channel_,
+          audio_frame->sample_rate_hz_, audio_frame->num_channels_,
+          audio_frame->timestamp_);
       audio_sink_->OnData(data);
     }
   }
@@ -674,89 +675,53 @@
   // Output volume scaling
   if (output_gain < 0.99f || output_gain > 1.01f) {
     // TODO(solenberg): Combine with mute state - this can cause clicks!
-    AudioFrameOperations::ScaleWithSat(output_gain, audioFrame);
+    AudioFrameOperations::ScaleWithSat(output_gain, audio_frame);
   }
 
   // Measure audio level (0-9)
   // TODO(henrik.lundin) Use the |muted| information here too.
   // TODO(deadbeef): Use RmsLevel for |_outputAudioLevel| (see
   // https://crbug.com/webrtc/7517).
-  _outputAudioLevel.ComputeLevel(*audioFrame, kAudioSampleDurationSeconds);
+  _outputAudioLevel.ComputeLevel(*audio_frame, kAudioSampleDurationSeconds);
 
-  if (capture_start_rtp_time_stamp_ < 0 && audioFrame->timestamp_ != 0) {
+  if (capture_start_rtp_time_stamp_ < 0 && audio_frame->timestamp_ != 0) {
     // The first frame with a valid rtp timestamp.
-    capture_start_rtp_time_stamp_ = audioFrame->timestamp_;
+    capture_start_rtp_time_stamp_ = audio_frame->timestamp_;
   }
 
   if (capture_start_rtp_time_stamp_ >= 0) {
-    // audioFrame.timestamp_ should be valid from now on.
+    // audio_frame.timestamp_ should be valid from now on.
 
     // Compute elapsed time.
     int64_t unwrap_timestamp =
-        rtp_ts_wraparound_handler_->Unwrap(audioFrame->timestamp_);
-    audioFrame->elapsed_time_ms_ =
+        rtp_ts_wraparound_handler_->Unwrap(audio_frame->timestamp_);
+    audio_frame->elapsed_time_ms_ =
         (unwrap_timestamp - capture_start_rtp_time_stamp_) /
         (GetRtpTimestampRateHz() / 1000);
 
     {
       rtc::CritScope lock(&ts_stats_lock_);
       // Compute ntp time.
-      audioFrame->ntp_time_ms_ =
-          ntp_estimator_.Estimate(audioFrame->timestamp_);
+      audio_frame->ntp_time_ms_ =
+          ntp_estimator_.Estimate(audio_frame->timestamp_);
       // |ntp_time_ms_| won't be valid until at least 2 RTCP SRs are received.
-      if (audioFrame->ntp_time_ms_ > 0) {
+      if (audio_frame->ntp_time_ms_ > 0) {
         // Compute |capture_start_ntp_time_ms_| so that
         // |capture_start_ntp_time_ms_| + |elapsed_time_ms_| == |ntp_time_ms_|
         capture_start_ntp_time_ms_ =
-            audioFrame->ntp_time_ms_ - audioFrame->elapsed_time_ms_;
+            audio_frame->ntp_time_ms_ - audio_frame->elapsed_time_ms_;
       }
     }
   }
 
-  return muted ? MixerParticipant::AudioFrameInfo::kMuted
-               : MixerParticipant::AudioFrameInfo::kNormal;
+  return muted ? AudioMixer::Source::AudioFrameInfo::kMuted
+               : AudioMixer::Source::AudioFrameInfo::kNormal;
 }
 
-AudioMixer::Source::AudioFrameInfo Channel::GetAudioFrameWithInfo(
-    int sample_rate_hz,
-    AudioFrame* audio_frame) {
-  audio_frame->sample_rate_hz_ = sample_rate_hz;
-
-  const auto frame_info = GetAudioFrameWithMuted(-1, audio_frame);
-
-  using FrameInfo = AudioMixer::Source::AudioFrameInfo;
-  FrameInfo new_audio_frame_info = FrameInfo::kError;
-  switch (frame_info) {
-    case MixerParticipant::AudioFrameInfo::kNormal:
-      new_audio_frame_info = FrameInfo::kNormal;
-      break;
-    case MixerParticipant::AudioFrameInfo::kMuted:
-      new_audio_frame_info = FrameInfo::kMuted;
-      break;
-    case MixerParticipant::AudioFrameInfo::kError:
-      new_audio_frame_info = FrameInfo::kError;
-      break;
-  }
-  return new_audio_frame_info;
-}
-
-int32_t Channel::NeededFrequency(int32_t id) const {
-  WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId),
-               "Channel::NeededFrequency(id=%d)", id);
-
-  int highestNeeded = 0;
-
-  // Determine highest needed receive frequency
-  int32_t receiveFrequency = audio_coding_->ReceiveFrequency();
-
+int Channel::PreferredSampleRate() const {
   // Return the bigger of playout and receive frequency in the ACM.
-  if (audio_coding_->PlayoutFrequency() > receiveFrequency) {
-    highestNeeded = audio_coding_->PlayoutFrequency();
-  } else {
-    highestNeeded = receiveFrequency;
-  }
-
-  return highestNeeded;
+  return std::max(audio_coding_->ReceiveFrequency(),
+                  audio_coding_->PlayoutFrequency());
 }
 
 int32_t Channel::CreateChannel(Channel*& channel,
@@ -806,7 +771,6 @@
       capture_start_rtp_time_stamp_(-1),
       capture_start_ntp_time_ms_(-1),
       _engineStatisticsPtr(NULL),
-      _outputMixerPtr(NULL),
       _moduleProcessThreadPtr(NULL),
       _audioDeviceModulePtr(NULL),
       _voiceEngineObserverPtr(NULL),
@@ -983,7 +947,6 @@
 }
 
 int32_t Channel::SetEngineInformation(Statistics& engineStatistics,
-                                      OutputMixer& outputMixer,
                                       ProcessThread& moduleProcessThread,
                                       AudioDeviceModule& audioDeviceModule,
                                       VoiceEngineObserver* voiceEngineObserver,
@@ -994,7 +957,6 @@
   WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
                "Channel::SetEngineInformation()");
   _engineStatisticsPtr = &engineStatistics;
-  _outputMixerPtr = &outputMixer;
   _moduleProcessThreadPtr = &moduleProcessThread;
   _audioDeviceModulePtr = &audioDeviceModule;
   _voiceEngineObserverPtr = voiceEngineObserver;
@@ -1020,14 +982,6 @@
     return 0;
   }
 
-  // Add participant as candidates for mixing.
-  if (_outputMixerPtr->SetMixabilityStatus(*this, true) != 0) {
-    _engineStatisticsPtr->SetLastError(
-        VE_AUDIO_CONF_MIX_MODULE_ERROR, kTraceError,
-        "StartPlayout() failed to add participant to mixer");
-    return -1;
-  }
-
   channel_state_.SetPlaying(true);
 
   return 0;
@@ -1040,14 +994,6 @@
     return 0;
   }
 
-  // Remove participant as candidates for mixing
-  if (_outputMixerPtr->SetMixabilityStatus(*this, false) != 0) {
-    _engineStatisticsPtr->SetLastError(
-        VE_AUDIO_CONF_MIX_MODULE_ERROR, kTraceError,
-        "StopPlayout() failed to remove participant from mixer");
-    return -1;
-  }
-
   channel_state_.SetPlaying(false);
   _outputAudioLevel.Clear();
 
diff --git a/voice_engine/channel.h b/voice_engine/channel.h
index f4e47ee..53b8cd0 100644
--- a/voice_engine/channel.h
+++ b/voice_engine/channel.h
@@ -23,7 +23,6 @@
 #include "modules/audio_coding/acm2/codec_manager.h"
 #include "modules/audio_coding/acm2/rent_a_codec.h"
 #include "modules/audio_coding/include/audio_coding_module.h"
-#include "modules/audio_conference_mixer/include/audio_conference_mixer_defines.h"
 #include "modules/audio_processing/rms_level.h"
 #include "modules/rtp_rtcp/include/remote_ntp_time_estimator.h"
 #include "modules/rtp_rtcp/include/rtp_header_parser.h"
@@ -89,7 +88,6 @@
 
 namespace voe {
 
-class OutputMixer;
 class RtcEventLogProxy;
 class RtcpRttStatsProxy;
 class RtpPacketSenderProxy;
@@ -144,7 +142,6 @@
       public Transport,
       public AudioPacketizationCallback,  // receive encoded packets from the
                                           // ACM
-      public MixerParticipant,  // supplies output mixer with audio frames
       public OverheadObserver {
  public:
   friend class VoERtcpObserver;
@@ -162,7 +159,6 @@
   int32_t Init();
   void Terminate();
   int32_t SetEngineInformation(Statistics& engineStatistics,
-                               OutputMixer& outputMixer,
                                ProcessThread& moduleProcessThread,
                                AudioDeviceModule& audioDeviceModule,
                                VoiceEngineObserver* voiceEngineObserver,
@@ -283,17 +279,13 @@
                const PacketOptions& packet_options) override;
   bool SendRtcp(const uint8_t* data, size_t len) override;
 
-  // From MixerParticipant
-  MixerParticipant::AudioFrameInfo GetAudioFrameWithMuted(
-      int32_t id,
-      AudioFrame* audioFrame) override;
-  int32_t NeededFrequency(int32_t id) const override;
-
   // From AudioMixer::Source.
   AudioMixer::Source::AudioFrameInfo GetAudioFrameWithInfo(
       int sample_rate_hz,
       AudioFrame* audio_frame);
 
+  int PreferredSampleRate() const;
+
   uint32_t InstanceId() const { return _instanceId; }
   int32_t ChannelId() const { return _channelId; }
   bool Playing() const { return channel_state_.Get().playing; }
@@ -433,7 +425,6 @@
 
   // uses
   Statistics* _engineStatisticsPtr;
-  OutputMixer* _outputMixerPtr;
   ProcessThread* _moduleProcessThreadPtr;
   AudioDeviceModule* _audioDeviceModulePtr;
   VoiceEngineObserver* _voiceEngineObserverPtr;  // owned by base
diff --git a/voice_engine/channel_proxy.cc b/voice_engine/channel_proxy.cc
index f6013c2..15db60d 100644
--- a/voice_engine/channel_proxy.cc
+++ b/voice_engine/channel_proxy.cc
@@ -257,9 +257,9 @@
   return channel()->GetAudioFrameWithInfo(sample_rate_hz, audio_frame);
 }
 
-int ChannelProxy::NeededFrequency() const {
+int ChannelProxy::PreferredSampleRate() const {
   RTC_DCHECK_RUNS_SERIALIZED(&audio_thread_race_checker_);
-  return static_cast<int>(channel()->NeededFrequency(-1));
+  return channel()->PreferredSampleRate();
 }
 
 void ChannelProxy::SetTransportOverhead(int transport_overhead_per_packet) {
diff --git a/voice_engine/channel_proxy.h b/voice_engine/channel_proxy.h
index 40246aaa..cb292ec 100644
--- a/voice_engine/channel_proxy.h
+++ b/voice_engine/channel_proxy.h
@@ -107,7 +107,7 @@
   virtual AudioMixer::Source::AudioFrameInfo GetAudioFrameWithInfo(
       int sample_rate_hz,
       AudioFrame* audio_frame);
-  virtual int NeededFrequency() const;
+  virtual int PreferredSampleRate() const;
   virtual void SetTransportOverhead(int transport_overhead_per_packet);
   virtual void AssociateSendChannel(const ChannelProxy& send_channel_proxy);
   virtual void DisassociateSendChannel();
diff --git a/voice_engine/output_mixer.cc b/voice_engine/output_mixer.cc
deleted file mode 100644
index 8d1e31d..0000000
--- a/voice_engine/output_mixer.cc
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "voice_engine/output_mixer.h"
-
-#include "modules/audio_processing/include/audio_processing.h"
-#include "rtc_base/format_macros.h"
-#include "system_wrappers/include/trace.h"
-#include "voice_engine/statistics.h"
-#include "voice_engine/utility.h"
-
-namespace webrtc {
-namespace voe {
-
-void
-OutputMixer::NewMixedAudio(int32_t id,
-                           const AudioFrame& generalAudioFrame,
-                           const AudioFrame** uniqueAudioFrames,
-                           uint32_t size)
-{
-    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
-                 "OutputMixer::NewMixedAudio(id=%d, size=%u)", id, size);
-
-    _audioFrame.CopyFrom(generalAudioFrame);
-    _audioFrame.id_ = id;
-}
-
-int32_t
-OutputMixer::Create(OutputMixer*& mixer, uint32_t instanceId)
-{
-    WEBRTC_TRACE(kTraceMemory, kTraceVoice, instanceId,
-                 "OutputMixer::Create(instanceId=%d)", instanceId);
-    mixer = new OutputMixer(instanceId);
-    if (mixer == NULL)
-    {
-        WEBRTC_TRACE(kTraceMemory, kTraceVoice, instanceId,
-                     "OutputMixer::Create() unable to allocate memory for"
-                     "mixer");
-        return -1;
-    }
-    return 0;
-}
-
-OutputMixer::OutputMixer(uint32_t instanceId) :
-    _mixerModule(*AudioConferenceMixer::Create(instanceId)),
-    _instanceId(instanceId),
-    _mixingFrequencyHz(8000)
-{
-    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
-                 "OutputMixer::OutputMixer() - ctor");
-
-    if (_mixerModule.RegisterMixedStreamCallback(this) == -1)
-    {
-        WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
-                     "OutputMixer::OutputMixer() failed to register mixer"
-                     "callbacks");
-    }
-}
-
-void
-OutputMixer::Destroy(OutputMixer*& mixer)
-{
-    if (mixer)
-    {
-        delete mixer;
-        mixer = NULL;
-    }
-}
-
-OutputMixer::~OutputMixer()
-{
-    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
-                 "OutputMixer::~OutputMixer() - dtor");
-    _mixerModule.UnRegisterMixedStreamCallback();
-    delete &_mixerModule;
-}
-
-int32_t
-OutputMixer::SetEngineInformation(voe::Statistics& engineStatistics)
-{
-    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
-                 "OutputMixer::SetEngineInformation()");
-    _engineStatisticsPtr = &engineStatistics;
-    return 0;
-}
-
-int32_t
-OutputMixer::SetAudioProcessingModule(AudioProcessing* audioProcessingModule)
-{
-    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
-                 "OutputMixer::SetAudioProcessingModule("
-                 "audioProcessingModule=0x%x)", audioProcessingModule);
-    _audioProcessingModulePtr = audioProcessingModule;
-    return 0;
-}
-
-int32_t
-OutputMixer::SetMixabilityStatus(MixerParticipant& participant,
-                                 bool mixable)
-{
-    return _mixerModule.SetMixabilityStatus(&participant, mixable);
-}
-
-int32_t
-OutputMixer::MixActiveChannels()
-{
-    _mixerModule.Process();
-    return 0;
-}
-
-int OutputMixer::GetMixedAudio(int sample_rate_hz,
-                               size_t num_channels,
-                               AudioFrame* frame) {
-  WEBRTC_TRACE(
-      kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
-      "OutputMixer::GetMixedAudio(sample_rate_hz=%d, num_channels=%" PRIuS ")",
-      sample_rate_hz, num_channels);
-
-  frame->num_channels_ = num_channels;
-  frame->sample_rate_hz_ = sample_rate_hz;
-  // TODO(andrew): Ideally the downmixing would occur much earlier, in
-  // AudioCodingModule.
-  RemixAndResample(_audioFrame, &resampler_, frame);
-  return 0;
-}
-
-int32_t
-OutputMixer::DoOperationsOnCombinedSignal(bool feed_data_to_apm)
-{
-    if (_audioFrame.sample_rate_hz_ != _mixingFrequencyHz)
-    {
-        WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
-                     "OutputMixer::DoOperationsOnCombinedSignal() => "
-                     "mixing frequency = %d", _audioFrame.sample_rate_hz_);
-        _mixingFrequencyHz = _audioFrame.sample_rate_hz_;
-    }
-
-    // --- Far-end Voice Quality Enhancement (AudioProcessing Module)
-    if (feed_data_to_apm) {
-      if (_audioProcessingModulePtr->ProcessReverseStream(&_audioFrame) != 0) {
-        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
-                     "AudioProcessingModule::ProcessReverseStream() => error");
-        RTC_NOTREACHED();
-      }
-    }
-
-    return 0;
-}
-}  // namespace voe
-}  // namespace webrtc
diff --git a/voice_engine/output_mixer.h b/voice_engine/output_mixer.h
deleted file mode 100644
index 284f92d..0000000
--- a/voice_engine/output_mixer.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef VOICE_ENGINE_OUTPUT_MIXER_H_
-#define VOICE_ENGINE_OUTPUT_MIXER_H_
-
-#include <memory>
-
-#include "common_audio/resampler/include/push_resampler.h"
-#include "common_types.h"  // NOLINT(build/include)
-#include "modules/audio_conference_mixer/include/audio_conference_mixer.h"
-#include "modules/audio_conference_mixer/include/audio_conference_mixer_defines.h"
-#include "rtc_base/criticalsection.h"
-
-namespace webrtc {
-
-class AudioProcessing;
-class FileWrapper;
-
-namespace voe {
-
-class Statistics;
-
-class OutputMixer : public AudioMixerOutputReceiver
-{
-public:
-    static int32_t Create(OutputMixer*& mixer, uint32_t instanceId);
-
-    static void Destroy(OutputMixer*& mixer);
-
-    int32_t SetEngineInformation(Statistics& engineStatistics);
-
-    int32_t SetAudioProcessingModule(
-        AudioProcessing* audioProcessingModule);
-
-    int32_t MixActiveChannels();
-
-    int32_t DoOperationsOnCombinedSignal(bool feed_data_to_apm);
-
-    int32_t SetMixabilityStatus(MixerParticipant& participant,
-                                bool mixable);
-
-    int GetMixedAudio(int sample_rate_hz, size_t num_channels,
-                      AudioFrame* audioFrame);
-
-    virtual ~OutputMixer();
-
-    // from AudioMixerOutputReceiver
-    virtual void NewMixedAudio(
-        int32_t id,
-        const AudioFrame& generalAudioFrame,
-        const AudioFrame** uniqueAudioFrames,
-        uint32_t size);
-
-private:
-    OutputMixer(uint32_t instanceId);
-
-    // uses
-    Statistics* _engineStatisticsPtr;
-    AudioProcessing* _audioProcessingModulePtr;
-
-    AudioConferenceMixer& _mixerModule;
-    AudioFrame _audioFrame;
-    // Converts mixed audio to the audio device output rate.
-    PushResampler<int16_t> resampler_;
-    // Converts mixed audio to the audio processing rate.
-    PushResampler<int16_t> audioproc_resampler_;
-    int _instanceId;
-    int _mixingFrequencyHz;
-};
-
-}  // namespace voe
-
-}  // namespace werbtc
-
-#endif  // VOICE_ENGINE_OUTPUT_MIXER_H_
diff --git a/voice_engine/shared_data.cc b/voice_engine/shared_data.cc
index dc3d8c0..3b3f17f 100644
--- a/voice_engine/shared_data.cc
+++ b/voice_engine/shared_data.cc
@@ -13,7 +13,6 @@
 #include "modules/audio_processing/include/audio_processing.h"
 #include "system_wrappers/include/trace.h"
 #include "voice_engine/channel.h"
-#include "voice_engine/output_mixer.h"
 #include "voice_engine/transmit_mixer.h"
 
 namespace webrtc {
@@ -30,9 +29,6 @@
       _moduleProcessThreadPtr(ProcessThread::Create("VoiceProcessThread")),
       encoder_queue_("AudioEncoderQueue") {
   Trace::CreateTrace();
-  if (OutputMixer::Create(_outputMixerPtr, _gInstanceCounter) == 0) {
-    _outputMixerPtr->SetEngineInformation(_engineStatistics);
-  }
   if (TransmitMixer::Create(_transmitMixerPtr, _gInstanceCounter) == 0) {
     _transmitMixerPtr->SetEngineInformation(*_moduleProcessThreadPtr,
                                             _engineStatistics, _channelManager);
@@ -41,7 +37,6 @@
 
 SharedData::~SharedData()
 {
-    OutputMixer::Destroy(_outputMixerPtr);
     TransmitMixer::Destroy(_transmitMixerPtr);
     if (_audioDevicePtr) {
         _audioDevicePtr->Release();
@@ -62,7 +57,6 @@
 
 void SharedData::set_audio_processing(AudioProcessing* audioproc) {
   _transmitMixerPtr->SetAudioProcessingModule(audioproc);
-  _outputMixerPtr->SetAudioProcessingModule(audioproc);
 }
 
 int SharedData::NumOfSendingChannels() {
diff --git a/voice_engine/shared_data.h b/voice_engine/shared_data.h
index 58ac827..336aab2 100644
--- a/voice_engine/shared_data.h
+++ b/voice_engine/shared_data.h
@@ -31,7 +31,6 @@
 namespace voe {
 
 class TransmitMixer;
-class OutputMixer;
 
 class SharedData
 {
@@ -45,7 +44,6 @@
         const rtc::scoped_refptr<AudioDeviceModule>& audio_device);
     void set_audio_processing(AudioProcessing* audio_processing);
     TransmitMixer* transmit_mixer() { return _transmitMixerPtr; }
-    OutputMixer* output_mixer() { return _outputMixerPtr; }
     rtc::CriticalSection* crit_sec() { return &_apiCritPtr; }
     ProcessThread* process_thread() { return _moduleProcessThreadPtr.get(); }
     rtc::TaskQueue* encoder_queue();
@@ -66,7 +64,6 @@
  ChannelManager _channelManager;
  Statistics _engineStatistics;
  rtc::scoped_refptr<AudioDeviceModule> _audioDevicePtr;
- OutputMixer* _outputMixerPtr;
  TransmitMixer* _transmitMixerPtr;
  std::unique_ptr<ProcessThread> _moduleProcessThreadPtr;
  // |encoder_queue| is defined last to ensure all pending tasks are cancelled
diff --git a/voice_engine/voe_base_impl.cc b/voice_engine/voe_base_impl.cc
index ec28e06..76dd55a 100644
--- a/voice_engine/voe_base_impl.cc
+++ b/voice_engine/voe_base_impl.cc
@@ -18,12 +18,9 @@
 #include "rtc_base/format_macros.h"
 #include "rtc_base/location.h"
 #include "rtc_base/logging.h"
-#include "system_wrappers/include/file_wrapper.h"
 #include "voice_engine/channel.h"
 #include "voice_engine/include/voe_errors.h"
-#include "voice_engine/output_mixer.h"
 #include "voice_engine/transmit_mixer.h"
-#include "voice_engine/utility.h"
 #include "voice_engine/voice_engine_impl.h"
 
 namespace webrtc {
@@ -148,9 +145,7 @@
                                       size_t& nSamplesOut,
                                       int64_t* elapsed_time_ms,
                                       int64_t* ntp_time_ms) {
-  GetPlayoutData(static_cast<int>(samplesPerSec), nChannels, nSamples, true,
-                 audioSamples, elapsed_time_ms, ntp_time_ms);
-  nSamplesOut = audioFrame_.samples_per_channel_;
+  RTC_NOTREACHED();
   return 0;
 }
 
@@ -177,11 +172,7 @@
                                  size_t number_of_frames,
                                  void* audio_data, int64_t* elapsed_time_ms,
                                  int64_t* ntp_time_ms) {
-  assert(bits_per_sample == 16);
-  assert(number_of_frames == static_cast<size_t>(sample_rate / 100));
-
-  GetPlayoutData(sample_rate, number_of_channels, number_of_frames, false,
-                 audio_data, elapsed_time_ms, ntp_time_ms);
+  RTC_NOTREACHED();
 }
 
 int VoEBaseImpl::RegisterVoiceEngineObserver(VoiceEngineObserver& observer) {
@@ -418,7 +409,7 @@
 
 int VoEBaseImpl::InitializeChannel(voe::ChannelOwner* channel_owner) {
   if (channel_owner->channel()->SetEngineInformation(
-          shared_->statistics(), *shared_->output_mixer(),
+          shared_->statistics(),
           *shared_->process_thread(), *shared_->audio_device(),
           voiceEngineObserverPtr_, &callbackCritSect_,
           shared_->encoder_queue()) != 0) {
@@ -653,34 +644,4 @@
 
   return shared_->statistics().SetUnInitialized();
 }
-
-void VoEBaseImpl::GetPlayoutData(int sample_rate, size_t number_of_channels,
-                                 size_t number_of_frames, bool feed_data_to_apm,
-                                 void* audio_data, int64_t* elapsed_time_ms,
-                                 int64_t* ntp_time_ms) {
-  assert(shared_->output_mixer() != nullptr);
-
-  // TODO(andrew): if the device is running in mono, we should tell the mixer
-  // here so that it will only request mono from AudioCodingModule.
-  // Perform mixing of all active participants (channel-based mixing)
-  shared_->output_mixer()->MixActiveChannels();
-
-  // Additional operations on the combined signal
-  shared_->output_mixer()->DoOperationsOnCombinedSignal(feed_data_to_apm);
-
-  // Retrieve the final output mix (resampled to match the ADM)
-  shared_->output_mixer()->GetMixedAudio(sample_rate, number_of_channels,
-                                         &audioFrame_);
-
-  assert(number_of_frames == audioFrame_.samples_per_channel_);
-  assert(sample_rate == audioFrame_.sample_rate_hz_);
-
-  // Deliver audio (PCM) samples to the ADM
-  memcpy(audio_data, audioFrame_.data(),
-         sizeof(int16_t) * number_of_frames * number_of_channels);
-
-  *elapsed_time_ms = audioFrame_.elapsed_time_ms_;
-  *ntp_time_ms = audioFrame_.ntp_time_ms_;
-}
-
 }  // namespace webrtc
diff --git a/voice_engine/voe_base_impl.h b/voice_engine/voe_base_impl.h
index 608292c..e15d1f1 100644
--- a/voice_engine/voe_base_impl.h
+++ b/voice_engine/voe_base_impl.h
@@ -62,27 +62,27 @@
                                   const uint32_t volume,
                                   const bool key_pressed,
                                   uint32_t& new_mic_volume) override;
-  int32_t NeedMorePlayData(const size_t nSamples,
-                           const size_t nBytesPerSample,
-                           const size_t nChannels,
-                           const uint32_t samplesPerSec,
-                           void* audioSamples,
-                           size_t& nSamplesOut,
-                           int64_t* elapsed_time_ms,
-                           int64_t* ntp_time_ms) override;
+  RTC_DEPRECATED int32_t NeedMorePlayData(const size_t nSamples,
+                                          const size_t nBytesPerSample,
+                                          const size_t nChannels,
+                                          const uint32_t samplesPerSec,
+                                          void* audioSamples,
+                                          size_t& nSamplesOut,
+                                          int64_t* elapsed_time_ms,
+                                          int64_t* ntp_time_ms) override;
   void PushCaptureData(int voe_channel,
                        const void* audio_data,
                        int bits_per_sample,
                        int sample_rate,
                        size_t number_of_channels,
                        size_t number_of_frames) override;
-  void PullRenderData(int bits_per_sample,
-                      int sample_rate,
-                      size_t number_of_channels,
-                      size_t number_of_frames,
-                      void* audio_data,
-                      int64_t* elapsed_time_ms,
-                      int64_t* ntp_time_ms) override;
+  RTC_DEPRECATED void PullRenderData(int bits_per_sample,
+                                     int sample_rate,
+                                     size_t number_of_channels,
+                                     size_t number_of_frames,
+                                     void* audio_data,
+                                     int64_t* elapsed_time_ms,
+                                     int64_t* ntp_time_ms) override;
 
   // AudioDeviceObserver
   void OnErrorIsReported(const ErrorCode error) override;