VoIP API implementation on top of AudioIngress/Egress

This is one last CL that includes the rest of VoIP API implementation.

Bug: webrtc:11251
Change-Id: I3f1b0bf2fd48be864ffc73482105f9514f75f9e0
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/173860
Commit-Queue: Tim Na <natim@webrtc.org>
Reviewed-by: Per Ã…hgren <peah@webrtc.org>
Reviewed-by: Karl Wiberg <kwiberg@webrtc.org>
Reviewed-by: Mirko Bonadei <mbonadei@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#31168}
diff --git a/BUILD.gn b/BUILD.gn
index 85c428d..f7d15f4 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -687,8 +687,11 @@
   rtc_test("voip_unittests") {
     testonly = true
     deps = [
+      "api/voip:voip_engine_factory_unittests",
+      "audio/voip/test:audio_channel_unittests",
       "audio/voip/test:audio_egress_unittests",
       "audio/voip/test:audio_ingress_unittests",
+      "audio/voip/test:voip_core_unittests",
       "test:test_main",
     ]
   }
diff --git a/api/voip/BUILD.gn b/api/voip/BUILD.gn
index 665b9e3..2c5f71c 100644
--- a/api/voip/BUILD.gn
+++ b/api/voip/BUILD.gn
@@ -1,10 +1,10 @@
-#Copyright(c) 2020 The WebRTC project authors.All Rights Reserved.
+# Copyright(c) 2020 The WebRTC project authors.All Rights Reserved.
 #
-#Use of this source code is governed by a BSD - style license
-#that can be found in the LICENSE file in the root of the source
-#tree.An additional intellectual property rights grant can be found
-#in the file PATENTS.All contributing project authors may
-#be found in the AUTHORS file in the root of the source tree.
+# Use of this source code is governed by a BSD - style license
+# that can be found in the LICENSE file in the root of the source
+# tree.An additional intellectual property rights grant can be found
+# in the file PATENTS.All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
 
 import("../../webrtc.gni")
 
@@ -22,3 +22,36 @@
     "//third_party/abseil-cpp/absl/types:optional",
   ]
 }
+
+rtc_library("voip_engine_factory") {
+  visibility = [ "*" ]
+  sources = [
+    "voip_engine_factory.cc",
+    "voip_engine_factory.h",
+  ]
+  deps = [
+    ":voip_api",
+    "..:scoped_refptr",
+    "../../audio/voip:voip_core",
+    "../../modules/audio_device:audio_device_api",
+    "../../modules/audio_processing:api",
+    "../../rtc_base:logging",
+    "../audio_codecs:audio_codecs_api",
+    "../task_queue",
+  ]
+}
+
+if (rtc_include_tests) {
+  rtc_library("voip_engine_factory_unittests") {
+    testonly = true
+    sources = [ "voip_engine_factory_unittest.cc" ]
+    deps = [
+      ":voip_engine_factory",
+      "../../modules/audio_device:mock_audio_device",
+      "../../modules/audio_processing:mocks",
+      "../../test:audio_codec_mocks",
+      "../../test:test_support",
+      "../task_queue:default_task_queue_factory",
+    ]
+  }
+}
diff --git a/api/voip/DEPS b/api/voip/DEPS
index 446fd4e..3845dff 100644
--- a/api/voip/DEPS
+++ b/api/voip/DEPS
@@ -2,4 +2,9 @@
   ".*\.h": [
     "+third_party/absl/types/optional.h",
   ],
-}
\ No newline at end of file
+
+  "voip_engine_factory.h": [
+    "+modules/audio_device/include/audio_device.h",
+    "+modules/audio_processing/include/audio_processing.h",
+  ],
+}
diff --git a/api/voip/voip_base.h b/api/voip/voip_base.h
index 67cd49b..ef83b51 100644
--- a/api/voip/voip_base.h
+++ b/api/voip/voip_base.h
@@ -1,17 +1,17 @@
-//
-//  Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
-//
-//  Use of this source code is governed by a BSD-style license
-//  that can be found in the LICENSE file in the root of the source
-//  tree. An additional intellectual property rights grant can be found
-//  in the file PATENTS.  All contributing project authors may
-//  be found in the AUTHORS file in the root of the source tree.
-//
+/*
+ *  Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
 
 #ifndef API_VOIP_VOIP_BASE_H_
 #define API_VOIP_VOIP_BASE_H_
 
-#include "third_party/absl/types/optional.h"
+#include "absl/types/optional.h"
 
 namespace webrtc {
 
@@ -51,13 +51,11 @@
       Transport* transport,
       absl::optional<uint32_t> local_ssrc) = 0;
 
-  // Releases |channel_id| that has served the purpose.
-  // Released channel will be re-allocated again that invoking operations
-  // on released |channel_id| will lead to undefined behavior.
+  // Releases |channel_id| that no longer has any use.
   virtual void ReleaseChannel(ChannelId channel_id) = 0;
 
-  // Starts sending on |channel_id|. This will start microphone if first to
-  // start. Returns false if initialization has failed on selected microphone
+  // Starts sending on |channel_id|. This will start microphone if not started
+  // yet. Returns false if initialization has failed on selected microphone
   // device. API is subject to expand to reflect error condition to application
   // later.
   virtual bool StartSend(ChannelId channel_id) = 0;
diff --git a/api/voip/voip_codec.h b/api/voip/voip_codec.h
index 32c4a72..eb42c44 100644
--- a/api/voip/voip_codec.h
+++ b/api/voip/voip_codec.h
@@ -1,12 +1,12 @@
-//
-//  Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
-//
-//  Use of this source code is governed by a BSD-style license
-//  that can be found in the LICENSE file in the root of the source
-//  tree. An additional intellectual property rights grant can be found
-//  in the file PATENTS.  All contributing project authors may
-//  be found in the AUTHORS file in the root of the source tree.
-//
+/*
+ *  Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
 
 #ifndef API_VOIP_VOIP_CODEC_H_
 #define API_VOIP_VOIP_CODEC_H_
diff --git a/api/voip/voip_engine.h b/api/voip/voip_engine.h
index 96905a1..81c97c0 100644
--- a/api/voip/voip_engine.h
+++ b/api/voip/voip_engine.h
@@ -1,12 +1,12 @@
-//
-//  Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
-//
-//  Use of this source code is governed by a BSD-style license
-//  that can be found in the LICENSE file in the root of the source
-//  tree. An additional intellectual property rights grant can be found
-//  in the file PATENTS.  All contributing project authors may
-//  be found in the AUTHORS file in the root of the source tree.
-//
+/*
+ *  Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
 
 #ifndef API_VOIP_VOIP_ENGINE_H_
 #define API_VOIP_VOIP_ENGINE_H_
@@ -17,50 +17,60 @@
 class VoipCodec;
 class VoipNetwork;
 
-// VoipEngine interfaces
+// VoipEngine is the main interface serving as the entry point for all VoIP
+// APIs. A single instance of VoipEngine should suffice the most of the need for
+// typical VoIP applications as it handles multiple media sessions including a
+// specialized session type like ad-hoc mesh conferencing. Below example code
+// describes the typical sequence of API usage. Each API header contains more
+// description on what the methods are used for.
 //
-// These pointer interfaces are valid as long as VoipEngine is available.
-// Therefore, application must synchronize the usage within the life span of
-// created VoipEngine instance.
+//   // Caller is responsible of setting desired audio components.
+//   VoipEngineConfig config;
+//   config.encoder_factory = CreateBuiltinAudioEncoderFactory();
+//   config.decoder_factory = CreateBuiltinAudioDecoderFactory();
+//   config.task_queue_factory = CreateDefaultTaskQueueFactory();
+//   config.audio_device =
+//       AudioDeviceModule::Create(AudioDeviceModule::kPlatformDefaultAudio,
+//                                 config.task_queue_factory.get());
+//   config.audio_processing = AudioProcessingBuilder().Create();
 //
-//   auto voip_engine =
-//       webrtc::VoipEngineBuilder()
-//           .SetAudioEncoderFactory(CreateBuiltinAudioEncoderFactory())
-//           .SetAudioDecoderFactory(CreateBuiltinAudioDecoderFactory())
-//           .Create();
+//   auto voip_engine = CreateVoipEngine(std::move(config));
+//   if (!voip_engine) return some_failure;
 //
-//   auto voip_base = voip_engine->Base();
-//   auto voip_codec = voip_engine->Codec();
-//   auto voip_network = voip_engine->Network();
+//   auto& voip_base = voip_engine->Base();
+//   auto& voip_codec = voip_engine->Codec();
+//   auto& voip_network = voip_engine->Network();
 //
-//   VoipChannel::Config config = { &app_transport_, 0xdeadc0de };
-//   int channel = voip_base.CreateChannel(config);
+//   absl::optional<ChannelId> channel =
+//       voip_base.CreateChannel(&app_transport_);
+//   if (!channel) return some_failure;
 //
-//   // After SDP offer/answer, payload type and codec usage have been
-//   // decided through negotiation.
-//   voip_codec.SetSendCodec(channel, ...);
-//   voip_codec.SetReceiveCodecs(channel, ...);
+//   // After SDP offer/answer, set payload type and codecs that have been
+//   // decided through SDP negotiation.
+//   voip_codec.SetSendCodec(*channel, ...);
+//   voip_codec.SetReceiveCodecs(*channel, ...);
 //
-//   // Start Send/Playout on voip channel.
-//   voip_base.StartSend(channel);
-//   voip_base.StartPlayout(channel);
+//   // Start sending and playing RTP on voip channel.
+//   voip_base.StartSend(*channel);
+//   voip_base.StartPlayout(*channel);
 //
-//   // Inject received rtp/rtcp thru voip network interface.
-//   voip_network.ReceivedRTPPacket(channel, rtp_data, rtp_size);
-//   voip_network.ReceivedRTCPPacket(channel, rtcp_data, rtcp_size);
+//   // Inject received RTP/RTCP through VoipNetwork interface.
+//   voip_network.ReceivedRTPPacket(*channel, ...);
+//   voip_network.ReceivedRTCPPacket(*channel, ...);
 //
 //   // Stop and release voip channel.
-//   voip_base.StopSend(channel);
-//   voip_base.StopPlayout(channel);
+//   voip_base.StopSend(*channel);
+//   voip_base.StopPlayout(*channel);
+//   voip_base.ReleaseChannel(*channel);
 //
-//   voip_base.ReleaseChannel(channel);
-//
+// Current VoipEngine defines three sub-API classes and is subject to expand in
+// near future.
 class VoipEngine {
  public:
   virtual ~VoipEngine() = default;
 
   // VoipBase is the audio session management interface that
-  // create/release/start/stop one-to-one audio media session.
+  // creates/releases/starts/stops an one-to-one audio media session.
   virtual VoipBase& Base() = 0;
 
   // VoipNetwork provides injection APIs that would enable application
diff --git a/api/voip/voip_engine_factory.cc b/api/voip/voip_engine_factory.cc
new file mode 100644
index 0000000..6ac3c86
--- /dev/null
+++ b/api/voip/voip_engine_factory.cc
@@ -0,0 +1,44 @@
+/*
+ *  Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/voip/voip_engine_factory.h"
+
+#include <utility>
+
+#include "audio/voip/voip_core.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+std::unique_ptr<VoipEngine> CreateVoipEngine(VoipEngineConfig config) {
+  RTC_CHECK(config.encoder_factory);
+  RTC_CHECK(config.decoder_factory);
+  RTC_CHECK(config.task_queue_factory);
+  RTC_CHECK(config.audio_device_module);
+
+  if (!config.audio_processing) {
+    RTC_DLOG(INFO) << "No audio processing functionality provided.";
+  }
+
+  auto voip_core = std::make_unique<VoipCore>();
+
+  if (!voip_core->Init(std::move(config.encoder_factory),
+                       std::move(config.decoder_factory),
+                       std::move(config.task_queue_factory),
+                       std::move(config.audio_device_module),
+                       std::move(config.audio_processing))) {
+    RTC_DLOG(LS_ERROR) << "Failed to initialize VoIP core.";
+    return nullptr;
+  }
+
+  return voip_core;
+}
+
+}  // namespace webrtc
diff --git a/api/voip/voip_engine_factory.h b/api/voip/voip_engine_factory.h
new file mode 100644
index 0000000..658ebfa
--- /dev/null
+++ b/api/voip/voip_engine_factory.h
@@ -0,0 +1,71 @@
+/*
+ *  Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_VOIP_VOIP_ENGINE_FACTORY_H_
+#define API_VOIP_VOIP_ENGINE_FACTORY_H_
+
+#include <memory>
+
+#include "api/audio_codecs/audio_decoder_factory.h"
+#include "api/audio_codecs/audio_encoder_factory.h"
+#include "api/scoped_refptr.h"
+#include "api/task_queue/task_queue_factory.h"
+#include "api/voip/voip_engine.h"
+#include "modules/audio_device/include/audio_device.h"
+#include "modules/audio_processing/include/audio_processing.h"
+
+namespace webrtc {
+
+// VoipEngineConfig is a struct that defines parameters to instantiate a
+// VoipEngine instance through CreateVoipEngine factory method. Each member is
+// marked with comments as either mandatory or optional and default
+// implementations that applications can use.
+struct VoipEngineConfig {
+  // Mandatory (e.g. api/audio_codec/builtin_audio_encoder_factory).
+  // AudioEncoderFactory provides a set of audio codecs for VoipEngine to encode
+  // the audio input sample. Application can choose to limit the set to reduce
+  // application footprint.
+  rtc::scoped_refptr<AudioEncoderFactory> encoder_factory;
+
+  // Mandatory (e.g. api/audio_codec/builtin_audio_decoder_factory).
+  // AudioDecoderFactory provides a set of audio codecs for VoipEngine to decode
+  // the received RTP packets from remote media endpoint. Application can choose
+  // to limit the set to reduce application footprint.
+  rtc::scoped_refptr<AudioDecoderFactory> decoder_factory;
+
+  // Mandatory (e.g. api/task_queue/default_task_queue_factory).
+  // TaskQeueuFactory provided for VoipEngine to work asynchronously on its
+  // encoding flow.
+  std::unique_ptr<TaskQueueFactory> task_queue_factory;
+
+  // Mandatory (e.g. modules/audio_device/include).
+  // AudioDeviceModule that periocally provides audio input samples from
+  // recording device (e.g. microphone) and requests audio output samples to
+  // play through its output device (e.g. speaker).
+  rtc::scoped_refptr<AudioDeviceModule> audio_device_module;
+
+  // Optional (e.g. modules/audio_processing/include).
+  // AudioProcessing provides audio procesing functionalities (e.g. acoustic
+  // echo cancellation, noise suppression, gain control, etc) on audio input
+  // samples for VoipEngine. When optionally not set, VoipEngine will not have
+  // such functionalities to perform on audio input samples received from
+  // AudioDeviceModule.
+  rtc::scoped_refptr<AudioProcessing> audio_processing;
+};
+
+// Creates a VoipEngine instance with provided VoipEngineConfig.
+// This could return nullptr if AudioDeviceModule (ADM) initialization fails
+// during construction of VoipEngine which would render VoipEngine
+// nonfunctional.
+std::unique_ptr<VoipEngine> CreateVoipEngine(VoipEngineConfig config);
+
+}  // namespace webrtc
+
+#endif  // API_VOIP_VOIP_ENGINE_FACTORY_H_
diff --git a/api/voip/voip_engine_factory_unittest.cc b/api/voip/voip_engine_factory_unittest.cc
new file mode 100644
index 0000000..d0b8438
--- /dev/null
+++ b/api/voip/voip_engine_factory_unittest.cc
@@ -0,0 +1,51 @@
+/*
+ *  Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <utility>
+
+#include "api/task_queue/default_task_queue_factory.h"
+#include "api/voip/voip_engine_factory.h"
+#include "modules/audio_device/include/mock_audio_device.h"
+#include "modules/audio_processing/include/mock_audio_processing.h"
+#include "test/gtest.h"
+#include "test/mock_audio_decoder_factory.h"
+#include "test/mock_audio_encoder_factory.h"
+
+namespace webrtc {
+namespace {
+
+// Create voip engine with mock modules as normal use case.
+TEST(VoipEngineFactoryTest, CreateEngineWithMockModules) {
+  VoipEngineConfig config;
+  config.encoder_factory = new rtc::RefCountedObject<MockAudioEncoderFactory>();
+  config.decoder_factory = new rtc::RefCountedObject<MockAudioDecoderFactory>();
+  config.task_queue_factory = CreateDefaultTaskQueueFactory();
+  config.audio_processing =
+      new rtc::RefCountedObject<test::MockAudioProcessing>();
+  config.audio_device_module = test::MockAudioDeviceModule::CreateNice();
+
+  auto voip_engine = CreateVoipEngine(std::move(config));
+  EXPECT_NE(voip_engine, nullptr);
+}
+
+// Create voip engine without setting audio processing as optional component.
+TEST(VoipEngineFactoryTest, UseNoAudioProcessing) {
+  VoipEngineConfig config;
+  config.encoder_factory = new rtc::RefCountedObject<MockAudioEncoderFactory>();
+  config.decoder_factory = new rtc::RefCountedObject<MockAudioDecoderFactory>();
+  config.task_queue_factory = CreateDefaultTaskQueueFactory();
+  config.audio_device_module = test::MockAudioDeviceModule::CreateNice();
+
+  auto voip_engine = CreateVoipEngine(std::move(config));
+  EXPECT_NE(voip_engine, nullptr);
+}
+
+}  // namespace
+}  // namespace webrtc
diff --git a/api/voip/voip_network.h b/api/voip/voip_network.h
index 7742978..c49c769 100644
--- a/api/voip/voip_network.h
+++ b/api/voip/voip_network.h
@@ -1,12 +1,12 @@
-//
-//  Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
-//
-//  Use of this source code is governed by a BSD-style license
-//  that can be found in the LICENSE file in the root of the source
-//  tree. An additional intellectual property rights grant can be found
-//  in the file PATENTS.  All contributing project authors may
-//  be found in the AUTHORS file in the root of the source tree.
-//
+/*
+ *  Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
 
 #ifndef API_VOIP_VOIP_NETWORK_H_
 #define API_VOIP_VOIP_NETWORK_H_
@@ -16,24 +16,24 @@
 
 namespace webrtc {
 
-// VoipNetwork interface currently provides any network related interface
-// such as processing received RTP/RTCP packet from remote endpoint.
-// The interface subject to expand as needed.
-//
-// This interface requires a channel handle created via VoipBase interface.
+// VoipNetwork interface provides any network related interfaces such as
+// processing received RTP/RTCP packet from remote endpoint. This interface
+// requires a ChannelId created via VoipBase interface. Note that using invalid
+// (previously released) ChannelId will silently fail these API calls as it
+// would have released underlying audio components. It's anticipated that caller
+// may be using different thread for network I/O where released channel id is
+// still used to input incoming RTP packets in which case we should silently
+// ignore. The interface is subjected to expand as needed in near future.
 class VoipNetwork {
  public:
-  // The packets received from the network should be passed to this
-  // function. Note that the data including the RTP-header must also be
-  // given to the VoipEngine.
+  // The data received from the network including RTP header is passed here.
   virtual void ReceivedRTPPacket(ChannelId channel_id,
-                                 rtc::ArrayView<const uint8_t> data) = 0;
+                                 rtc::ArrayView<const uint8_t> rtp_packet) = 0;
 
-  // The packets received from the network should be passed to this
-  // function. Note that the data including the RTCP-header must also be
-  // given to the VoipEngine.
-  virtual void ReceivedRTCPPacket(ChannelId channel_id,
-                                  rtc::ArrayView<const uint8_t> data) = 0;
+  // The data received from the network including RTCP header is passed here.
+  virtual void ReceivedRTCPPacket(
+      ChannelId channel_id,
+      rtc::ArrayView<const uint8_t> rtcp_packet) = 0;
 
  protected:
   virtual ~VoipNetwork() = default;
diff --git a/audio/voip/BUILD.gn b/audio/voip/BUILD.gn
index 8ebc3ce4..60232d5 100644
--- a/audio/voip/BUILD.gn
+++ b/audio/voip/BUILD.gn
@@ -8,20 +8,64 @@
 
 import("../../webrtc.gni")
 
+rtc_library("voip_core") {
+  sources = [
+    "voip_core.cc",
+    "voip_core.h",
+  ]
+  deps = [
+    ":audio_channel",
+    "..:audio",
+    "../../api:scoped_refptr",
+    "../../api/audio_codecs:audio_codecs_api",
+    "../../api/task_queue",
+    "../../api/voip:voip_api",
+    "../../modules/audio_device:audio_device_api",
+    "../../modules/audio_mixer:audio_mixer_impl",
+    "../../modules/audio_processing:api",
+    "../../modules/utility:utility",
+    "../../rtc_base:criticalsection",
+    "../../rtc_base:logging",
+    "//third_party/abseil-cpp/absl/types:optional",
+  ]
+}
+
+rtc_library("audio_channel") {
+  sources = [
+    "audio_channel.cc",
+    "audio_channel.h",
+  ]
+  deps = [
+    ":audio_egress",
+    ":audio_ingress",
+    "../../api:transport_api",
+    "../../api/audio_codecs:audio_codecs_api",
+    "../../api/task_queue",
+    "../../api/voip:voip_api",
+    "../../modules/audio_device:audio_device_api",
+    "../../modules/rtp_rtcp",
+    "../../modules/rtp_rtcp:rtp_rtcp_format",
+    "../../modules/utility",
+    "../../rtc_base:criticalsection",
+    "../../rtc_base:logging",
+    "../../rtc_base:refcount",
+    "../../rtc_base:rtc_base_approved",
+  ]
+}
+
 rtc_library("audio_ingress") {
   sources = [
     "audio_ingress.cc",
     "audio_ingress.h",
   ]
   deps = [
+    "..:audio",
     "../../api:array_view",
     "../../api:rtp_headers",
     "../../api:scoped_refptr",
     "../../api:transport_api",
     "../../api/audio:audio_mixer_api",
     "../../api/audio_codecs:audio_codecs_api",
-    "../../audio",
-    "../../audio/utility:audio_frame_operations",
     "../../modules/audio_coding",
     "../../modules/rtp_rtcp",
     "../../modules/rtp_rtcp:rtp_rtcp_format",
@@ -30,6 +74,7 @@
     "../../rtc_base:logging",
     "../../rtc_base:safe_minmax",
     "../../rtc_base:timeutils",
+    "../utility:audio_frame_operations",
   ]
 }
 
@@ -39,10 +84,9 @@
     "audio_egress.h",
   ]
   deps = [
+    "..:audio",
     "../../api/audio_codecs:audio_codecs_api",
     "../../api/task_queue",
-    "../../audio",
-    "../../audio/utility:audio_frame_operations",
     "../../call:audio_sender_interface",
     "../../modules/audio_coding",
     "../../modules/rtp_rtcp",
@@ -51,5 +95,6 @@
     "../../rtc_base:rtc_task_queue",
     "../../rtc_base:thread_checker",
     "../../rtc_base:timeutils",
+    "../utility:audio_frame_operations",
   ]
 }
diff --git a/audio/voip/audio_channel.cc b/audio/voip/audio_channel.cc
new file mode 100644
index 0000000..b9ce7ac
--- /dev/null
+++ b/audio/voip/audio_channel.cc
@@ -0,0 +1,126 @@
+/*
+ *  Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/voip/audio_channel.h"
+
+#include <utility>
+#include <vector>
+
+#include "api/audio_codecs/audio_format.h"
+#include "api/task_queue/task_queue_factory.h"
+#include "modules/rtp_rtcp/include/receive_statistics.h"
+#include "rtc_base/critical_section.h"
+#include "rtc_base/location.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr int kRtcpReportIntervalMs = 5000;
+
+}  // namespace
+
+AudioChannel::AudioChannel(
+    Transport* transport,
+    uint32_t local_ssrc,
+    TaskQueueFactory* task_queue_factory,
+    ProcessThread* process_thread,
+    AudioMixer* audio_mixer,
+    rtc::scoped_refptr<AudioDecoderFactory> decoder_factory)
+    : audio_mixer_(audio_mixer), process_thread_(process_thread) {
+  RTC_DCHECK(task_queue_factory);
+  RTC_DCHECK(process_thread);
+  RTC_DCHECK(audio_mixer);
+
+  Clock* clock = Clock::GetRealTimeClock();
+  receive_statistics_ = ReceiveStatistics::Create(clock);
+
+  RtpRtcp::Configuration rtp_config;
+  rtp_config.clock = clock;
+  rtp_config.audio = true;
+  rtp_config.receive_statistics = receive_statistics_.get();
+  rtp_config.rtcp_report_interval_ms = kRtcpReportIntervalMs;
+  rtp_config.outgoing_transport = transport;
+  rtp_config.local_media_ssrc = local_ssrc;
+
+  rtp_rtcp_ = RtpRtcp::Create(rtp_config);
+
+  rtp_rtcp_->SetSendingMediaStatus(false);
+  rtp_rtcp_->SetRTCPStatus(RtcpMode::kCompound);
+
+  // ProcessThread periodically services RTP stack for RTCP.
+  process_thread_->RegisterModule(rtp_rtcp_.get(), RTC_FROM_HERE);
+
+  ingress_ = std::make_unique<AudioIngress>(rtp_rtcp_.get(), clock,
+                                            receive_statistics_.get(),
+                                            std::move(decoder_factory));
+  egress_ =
+      std::make_unique<AudioEgress>(rtp_rtcp_.get(), clock, task_queue_factory);
+
+  // Set the instance of audio ingress to be part of audio mixer for ADM to
+  // fetch audio samples to play.
+  audio_mixer_->AddSource(ingress_.get());
+}
+
+AudioChannel::~AudioChannel() {
+  if (egress_->IsSending()) {
+    StopSend();
+  }
+  if (ingress_->IsPlaying()) {
+    StopPlay();
+  }
+
+  audio_mixer_->RemoveSource(ingress_.get());
+  process_thread_->DeRegisterModule(rtp_rtcp_.get());
+}
+
+void AudioChannel::StartSend() {
+  egress_->StartSend();
+
+  // Start sending with RTP stack if it has not been sending yet.
+  if (!rtp_rtcp_->Sending() && rtp_rtcp_->SetSendingStatus(true) != 0) {
+    RTC_DLOG(LS_ERROR) << "StartSend() RTP/RTCP failed to start sending";
+  }
+}
+
+void AudioChannel::StopSend() {
+  egress_->StopSend();
+
+  // If the channel is not playing and RTP stack is active then deactivate RTP
+  // stack. SetSendingStatus(false) triggers the transmission of RTCP BYE
+  // message to remote endpoint.
+  if (!IsPlaying() && rtp_rtcp_->Sending() &&
+      rtp_rtcp_->SetSendingStatus(false) != 0) {
+    RTC_DLOG(LS_ERROR) << "StopSend() RTP/RTCP failed to stop sending";
+  }
+}
+
+void AudioChannel::StartPlay() {
+  ingress_->StartPlay();
+
+  // If RTP stack is not sending then start sending as in recv-only mode, RTCP
+  // receiver report is expected.
+  if (!rtp_rtcp_->Sending() && rtp_rtcp_->SetSendingStatus(true) != 0) {
+    RTC_DLOG(LS_ERROR) << "StartPlay() RTP/RTCP failed to start sending";
+  }
+}
+
+void AudioChannel::StopPlay() {
+  ingress_->StopPlay();
+
+  // Deactivate RTP stack only when both sending and receiving are stopped.
+  if (!IsSendingMedia() && rtp_rtcp_->Sending() &&
+      rtp_rtcp_->SetSendingStatus(false) != 0) {
+    RTC_DLOG(LS_ERROR) << "StopPlay() RTP/RTCP failed to stop sending";
+  }
+}
+
+}  // namespace webrtc
diff --git a/audio/voip/audio_channel.h b/audio/voip/audio_channel.h
new file mode 100644
index 0000000..8b6f1a8
--- /dev/null
+++ b/audio/voip/audio_channel.h
@@ -0,0 +1,98 @@
+/*
+ *  Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_VOIP_AUDIO_CHANNEL_H_
+#define AUDIO_VOIP_AUDIO_CHANNEL_H_
+
+#include <map>
+#include <memory>
+#include <queue>
+#include <utility>
+
+#include "api/task_queue/task_queue_factory.h"
+#include "api/voip/voip_base.h"
+#include "audio/voip/audio_egress.h"
+#include "audio/voip/audio_ingress.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "modules/utility/include/process_thread.h"
+#include "rtc_base/critical_section.h"
+#include "rtc_base/ref_count.h"
+
+namespace webrtc {
+
+// AudioChannel represents a single media session and provides APIs over
+// AudioIngress and AudioEgress. Note that a single RTP stack is shared with
+// these two classes as it has both sending and receiving capabilities.
+class AudioChannel : public rtc::RefCountInterface {
+ public:
+  AudioChannel(Transport* transport,
+               uint32_t local_ssrc,
+               TaskQueueFactory* task_queue_factory,
+               ProcessThread* process_thread,
+               AudioMixer* audio_mixer,
+               rtc::scoped_refptr<AudioDecoderFactory> decoder_factory);
+  ~AudioChannel() override;
+
+  // Set and get ChannelId that this audio channel belongs for debugging and
+  // logging purpose.
+  void SetId(ChannelId id) { id_ = id; }
+  ChannelId GetId() const { return id_; }
+
+  // APIs to start/stop audio channel on each direction.
+  void StartSend();
+  void StopSend();
+  void StartPlay();
+  void StopPlay();
+
+  // APIs relayed to AudioEgress.
+  bool IsSendingMedia() const { return egress_->IsSending(); }
+  AudioSender* GetAudioSender() { return egress_.get(); }
+  void SetEncoder(int payload_type,
+                  const SdpAudioFormat& encoder_format,
+                  std::unique_ptr<AudioEncoder> encoder) {
+    egress_->SetEncoder(payload_type, encoder_format, std::move(encoder));
+  }
+  absl::optional<SdpAudioFormat> GetEncoderFormat() const {
+    return egress_->GetEncoderFormat();
+  }
+
+  // APIs relayed to AudioIngress.
+  bool IsPlaying() const { return ingress_->IsPlaying(); }
+  void ReceivedRTPPacket(rtc::ArrayView<const uint8_t> rtp_packet) {
+    ingress_->ReceivedRTPPacket(rtp_packet);
+  }
+  void ReceivedRTCPPacket(rtc::ArrayView<const uint8_t> rtcp_packet) {
+    ingress_->ReceivedRTCPPacket(rtcp_packet);
+  }
+  void SetReceiveCodecs(const std::map<int, SdpAudioFormat>& codecs) {
+    ingress_->SetReceiveCodecs(codecs);
+  }
+
+ private:
+  // ChannelId that this audio channel belongs for logging purpose.
+  ChannelId id_;
+
+  // Synchronization is handled internally by AudioMixer.
+  AudioMixer* audio_mixer_;
+
+  // Synchronization is handled internally by ProcessThread.
+  ProcessThread* process_thread_;
+
+  // Listed in order for safe destruction of AudioChannel object.
+  // Synchronization for these are handled internally.
+  std::unique_ptr<ReceiveStatistics> receive_statistics_;
+  std::unique_ptr<RtpRtcp> rtp_rtcp_;
+  std::unique_ptr<AudioIngress> ingress_;
+  std::unique_ptr<AudioEgress> egress_;
+};
+
+}  // namespace webrtc
+
+#endif  // AUDIO_VOIP_AUDIO_CHANNEL_H_
diff --git a/audio/voip/audio_egress.cc b/audio/voip/audio_egress.cc
index 98f73fa..a7bc202 100644
--- a/audio/voip/audio_egress.cc
+++ b/audio/voip/audio_egress.cc
@@ -34,18 +34,16 @@
 }
 
 bool AudioEgress::IsSending() const {
-  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   return rtp_rtcp_->SendingMedia();
 }
 
 void AudioEgress::SetEncoder(int payload_type,
                              const SdpAudioFormat& encoder_format,
                              std::unique_ptr<AudioEncoder> encoder) {
-  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   RTC_DCHECK_GE(payload_type, 0);
   RTC_DCHECK_LE(payload_type, 127);
 
-  encoder_format_ = encoder_format;
+  SetEncoderFormat(encoder_format);
 
   // The RTP/RTCP module needs to know the RTP timestamp rate (i.e. clockrate)
   // as well as some other things, so we collect this info and send it along.
@@ -58,20 +56,11 @@
   audio_coding_->SetEncoder(std::move(encoder));
 }
 
-absl::optional<SdpAudioFormat> AudioEgress::GetEncoderFormat() const {
-  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
-  return encoder_format_;
-}
-
 void AudioEgress::StartSend() {
-  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
-
   rtp_rtcp_->SetSendingMediaStatus(true);
 }
 
 void AudioEgress::StopSend() {
-  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
-
   rtp_rtcp_->SetSendingMediaStatus(false);
 }
 
@@ -144,7 +133,6 @@
 
 void AudioEgress::RegisterTelephoneEventType(int rtp_payload_type,
                                              int sample_rate_hz) {
-  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   RTC_DCHECK_GE(rtp_payload_type, 0);
   RTC_DCHECK_LE(rtp_payload_type, 127);
 
@@ -154,7 +142,6 @@
 }
 
 bool AudioEgress::SendTelephoneEvent(int dtmf_event, int duration_ms) {
-  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   RTC_DCHECK_GE(dtmf_event, 0);
   RTC_DCHECK_LE(dtmf_event, 255);
   RTC_DCHECK_GE(duration_ms, 0);
@@ -175,8 +162,6 @@
 }
 
 void AudioEgress::SetMute(bool mute) {
-  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
-
   encoder_queue_.PostTask([this, mute] {
     RTC_DCHECK_RUN_ON(&encoder_queue_);
     encoder_context_.mute_ = mute;
diff --git a/audio/voip/audio_egress.h b/audio/voip/audio_egress.h
index 192d5ff..e5632cd 100644
--- a/audio/voip/audio_egress.h
+++ b/audio/voip/audio_egress.h
@@ -34,10 +34,9 @@
 // encoded payload will be packetized by the RTP stack, resulting in ready to
 // send RTP packet to remote endpoint.
 //
-// This class enforces single worker thread access by caller via SequenceChecker
-// in debug mode as expected thread usage pattern. In order to minimize the hold
-// on audio input thread from OS, TaskQueue is employed to encode and send RTP
-// asynchrounously.
+// TaskQueue is used to encode and send RTP asynchrounously as some OS platform
+// uses the same thread for both audio input and output sample deliveries which
+// can affect audio quality.
 //
 // Note that this class is originally based on ChannelSend in
 // audio/channel_send.cc with non-audio related logic trimmed as aimed for
@@ -72,7 +71,10 @@
 
   // Retrieve current encoder format info. This returns encoder format set
   // by SetEncoder() and if encoder is not set, this will return nullopt.
-  absl::optional<SdpAudioFormat> GetEncoderFormat() const;
+  absl::optional<SdpAudioFormat> GetEncoderFormat() const {
+    rtc::CritScope lock(&lock_);
+    return encoder_format_;
+  }
 
   // Register the payload type and sample rate for DTMF (RFC 4733) payload.
   void RegisterTelephoneEventType(int rtp_payload_type, int sample_rate_hz);
@@ -96,12 +98,15 @@
                    size_t payload_size) override;
 
  private:
-  // Ensure that single worker thread access.
-  SequenceChecker worker_thread_checker_;
+  void SetEncoderFormat(const SdpAudioFormat& encoder_format) {
+    rtc::CritScope lock(&lock_);
+    encoder_format_ = encoder_format;
+  }
+
+  rtc::CriticalSection lock_;
 
   // Current encoder format selected by caller.
-  absl::optional<SdpAudioFormat> encoder_format_
-      RTC_GUARDED_BY(worker_thread_checker_);
+  absl::optional<SdpAudioFormat> encoder_format_ RTC_GUARDED_BY(lock_);
 
   // Synchronization is handled internally by RtpRtcp.
   RtpRtcp* const rtp_rtcp_;
diff --git a/audio/voip/audio_ingress.cc b/audio/voip/audio_ingress.cc
index aae6842..fb43fcd 100644
--- a/audio/voip/audio_ingress.cc
+++ b/audio/voip/audio_ingress.cc
@@ -38,27 +38,18 @@
 AudioIngress::AudioIngress(
     RtpRtcp* rtp_rtcp,
     Clock* clock,
-    rtc::scoped_refptr<AudioDecoderFactory> decoder_factory,
-    std::unique_ptr<ReceiveStatistics> receive_statistics)
+    ReceiveStatistics* receive_statistics,
+    rtc::scoped_refptr<AudioDecoderFactory> decoder_factory)
     : playing_(false),
       remote_ssrc_(0),
       first_rtp_timestamp_(-1),
-      rtp_receive_statistics_(std::move(receive_statistics)),
+      rtp_receive_statistics_(receive_statistics),
       rtp_rtcp_(rtp_rtcp),
       acm_receiver_(CreateAcmConfig(decoder_factory)),
       ntp_estimator_(clock) {}
 
 AudioIngress::~AudioIngress() = default;
 
-void AudioIngress::StartPlay() {
-  playing_ = true;
-}
-
-void AudioIngress::StopPlay() {
-  playing_ = false;
-  output_audio_level_.ResetLevelFullRange();
-}
-
 AudioMixer::Source::AudioFrameInfo AudioIngress::GetAudioFrameWithInfo(
     int sampling_rate,
     AudioFrame* audio_frame) {
@@ -113,17 +104,6 @@
                : AudioMixer::Source::AudioFrameInfo::kNormal;
 }
 
-int AudioIngress::Ssrc() const {
-  return rtc::dchecked_cast<int>(remote_ssrc_.load());
-}
-
-int AudioIngress::PreferredSampleRate() const {
-  // Return the bigger of playout and receive frequency in the ACM. Note that
-  // return 0 means anything higher shouldn't cause any quality loss.
-  return std::max(acm_receiver_.last_packet_sample_rate_hz().value_or(0),
-                  acm_receiver_.last_output_sample_rate_hz());
-}
-
 void AudioIngress::SetReceiveCodecs(
     const std::map<int, SdpAudioFormat>& codecs) {
   {
@@ -135,36 +115,37 @@
   acm_receiver_.SetCodecs(codecs);
 }
 
-void AudioIngress::ReceivedRTPPacket(const uint8_t* data, size_t length) {
-  if (!Playing()) {
+void AudioIngress::ReceivedRTPPacket(rtc::ArrayView<const uint8_t> rtp_packet) {
+  if (!IsPlaying()) {
     return;
   }
 
-  RtpPacketReceived rtp_packet;
-  rtp_packet.Parse(data, length);
+  RtpPacketReceived rtp_packet_received;
+  rtp_packet_received.Parse(rtp_packet.data(), rtp_packet.size());
 
   // Set payload type's sampling rate before we feed it into ReceiveStatistics.
   {
     rtc::CritScope lock(&lock_);
-    const auto& it = receive_codec_info_.find(rtp_packet.PayloadType());
+    const auto& it =
+        receive_codec_info_.find(rtp_packet_received.PayloadType());
     // If sampling rate info is not available in our received codec set, it
     // would mean that remote media endpoint is sending incorrect payload id
     // which can't be processed correctly especially on payload type id in
     // dynamic range.
     if (it == receive_codec_info_.end()) {
       RTC_DLOG(LS_WARNING) << "Unexpected payload id received: "
-                           << rtp_packet.PayloadType();
+                           << rtp_packet_received.PayloadType();
       return;
     }
-    rtp_packet.set_payload_type_frequency(it->second);
+    rtp_packet_received.set_payload_type_frequency(it->second);
   }
 
-  rtp_receive_statistics_->OnRtpPacket(rtp_packet);
+  rtp_receive_statistics_->OnRtpPacket(rtp_packet_received);
 
   RTPHeader header;
-  rtp_packet.GetHeader(&header);
+  rtp_packet_received.GetHeader(&header);
 
-  size_t packet_length = rtp_packet.size();
+  size_t packet_length = rtp_packet_received.size();
   if (packet_length < header.headerLength ||
       (packet_length - header.headerLength) < header.paddingLength) {
     RTC_DLOG(LS_ERROR) << "Packet length(" << packet_length << ") header("
@@ -173,7 +154,7 @@
     return;
   }
 
-  const uint8_t* payload = rtp_packet.data() + header.headerLength;
+  const uint8_t* payload = rtp_packet_received.data() + header.headerLength;
   size_t payload_length = packet_length - header.headerLength;
   size_t payload_data_length = payload_length - header.paddingLength;
   auto data_view = rtc::ArrayView<const uint8_t>(payload, payload_data_length);
@@ -185,9 +166,10 @@
   }
 }
 
-void AudioIngress::ReceivedRTCPPacket(const uint8_t* data, size_t length) {
-  // Deliver RTCP packet to RTP/RTCP module for parsing
-  rtp_rtcp_->IncomingRtcpPacket(data, length);
+void AudioIngress::ReceivedRTCPPacket(
+    rtc::ArrayView<const uint8_t> rtcp_packet) {
+  // Deliver RTCP packet to RTP/RTCP module for parsing.
+  rtp_rtcp_->IncomingRtcpPacket(rtcp_packet.data(), rtcp_packet.size());
 
   int64_t rtt = GetRoundTripTime();
   if (rtt == -1) {
@@ -234,24 +216,4 @@
   return (block_data.has_rtt() ? block_data.last_rtt_ms() : -1);
 }
 
-int AudioIngress::GetSpeechOutputLevelFullRange() const {
-  return output_audio_level_.LevelFullRange();
-}
-
-bool AudioIngress::Playing() const {
-  return playing_;
-}
-
-NetworkStatistics AudioIngress::GetNetworkStatistics() const {
-  NetworkStatistics stats;
-  acm_receiver_.GetNetworkStatistics(&stats);
-  return stats;
-}
-
-AudioDecodingCallStats AudioIngress::GetDecodingStatistics() const {
-  AudioDecodingCallStats stats;
-  acm_receiver_.GetDecodingCallStatistics(&stats);
-  return stats;
-}
-
 }  // namespace webrtc
diff --git a/audio/voip/audio_ingress.h b/audio/voip/audio_ingress.h
index f703440..9976674 100644
--- a/audio/voip/audio_ingress.h
+++ b/audio/voip/audio_ingress.h
@@ -11,6 +11,7 @@
 #ifndef AUDIO_VOIP_AUDIO_INGRESS_H_
 #define AUDIO_VOIP_AUDIO_INGRESS_H_
 
+#include <algorithm>
 #include <atomic>
 #include <map>
 #include <memory>
@@ -45,47 +46,68 @@
  public:
   AudioIngress(RtpRtcp* rtp_rtcp,
                Clock* clock,
-               rtc::scoped_refptr<AudioDecoderFactory> decoder_factory,
-               std::unique_ptr<ReceiveStatistics> receive_statistics);
+               ReceiveStatistics* receive_statistics,
+               rtc::scoped_refptr<AudioDecoderFactory> decoder_factory);
   ~AudioIngress() override;
 
   // Start or stop receiving operation of AudioIngress.
-  void StartPlay();
-  void StopPlay();
+  void StartPlay() { playing_ = true; }
+  void StopPlay() {
+    playing_ = false;
+    output_audio_level_.ResetLevelFullRange();
+  }
 
   // Query the state of the AudioIngress.
-  bool Playing() const;
+  bool IsPlaying() const { return playing_; }
 
   // Set the decoder formats and payload type for AcmReceiver where the
   // key type (int) of the map is the payload type of SdpAudioFormat.
   void SetReceiveCodecs(const std::map<int, SdpAudioFormat>& codecs);
 
   // APIs to handle received RTP/RTCP packets from caller.
-  void ReceivedRTPPacket(const uint8_t* data, size_t length);
-  void ReceivedRTCPPacket(const uint8_t* data, size_t length);
+  void ReceivedRTPPacket(rtc::ArrayView<const uint8_t> rtp_packet);
+  void ReceivedRTCPPacket(rtc::ArrayView<const uint8_t> rtcp_packet);
 
   // Retrieve highest speech output level in last 100 ms.  Note that
   // this isn't RMS but absolute raw audio level on int16_t sample unit.
   // Therefore, the return value will vary between 0 ~ 0xFFFF. This type of
   // value may be useful to be used for measuring active speaker gauge.
-  int GetSpeechOutputLevelFullRange() const;
+  int GetSpeechOutputLevelFullRange() const {
+    return output_audio_level_.LevelFullRange();
+  }
 
   // Returns network round trip time (RTT) measued by RTCP exchange with
   // remote media endpoint. RTT value -1 indicates that it's not initialized.
   int64_t GetRoundTripTime();
 
-  NetworkStatistics GetNetworkStatistics() const;
-  AudioDecodingCallStats GetDecodingStatistics() const;
+  NetworkStatistics GetNetworkStatistics() const {
+    NetworkStatistics stats;
+    acm_receiver_.GetNetworkStatistics(&stats);
+    return stats;
+  }
+  AudioDecodingCallStats GetDecodingStatistics() const {
+    AudioDecodingCallStats stats;
+    acm_receiver_.GetDecodingCallStatistics(&stats);
+    return stats;
+  }
 
   // Implementation of AudioMixer::Source interface.
   AudioMixer::Source::AudioFrameInfo GetAudioFrameWithInfo(
       int sampling_rate,
       AudioFrame* audio_frame) override;
-  int Ssrc() const override;
-  int PreferredSampleRate() const override;
+  int Ssrc() const override {
+    return rtc::dchecked_cast<int>(remote_ssrc_.load());
+  }
+  int PreferredSampleRate() const override {
+    // If we haven't received any RTP packet from remote and thus
+    // last_packet_sampling_rate is not available then use NetEq's sampling
+    // rate as that would be what would be used for audio output sample.
+    return std::max(acm_receiver_.last_packet_sample_rate_hz().value_or(0),
+                    acm_receiver_.last_output_sample_rate_hz());
+  }
 
  private:
-  // Indicate AudioIngress status as caller invokes Start/StopPlaying.
+  // Indicates AudioIngress status as caller invokes Start/StopPlaying.
   // If not playing, incoming RTP data processing is skipped, thus
   // producing no data to output device.
   std::atomic<bool> playing_;
@@ -98,7 +120,7 @@
   std::atomic<int64_t> first_rtp_timestamp_;
 
   // Synchronizaton is handled internally by ReceiveStatistics.
-  const std::unique_ptr<ReceiveStatistics> rtp_receive_statistics_;
+  ReceiveStatistics* const rtp_receive_statistics_;
 
   // Synchronizaton is handled internally by RtpRtcp.
   RtpRtcp* const rtp_rtcp_;
diff --git a/audio/voip/test/BUILD.gn b/audio/voip/test/BUILD.gn
index 0decdb2..39f100a 100644
--- a/audio/voip/test/BUILD.gn
+++ b/audio/voip/test/BUILD.gn
@@ -9,6 +9,42 @@
 import("../../../webrtc.gni")
 
 if (rtc_include_tests) {
+  rtc_library("voip_core_unittests") {
+    testonly = true
+    sources = [ "voip_core_unittest.cc" ]
+    deps = [
+      "..:voip_core",
+      "../../../api/audio_codecs:builtin_audio_decoder_factory",
+      "../../../api/audio_codecs:builtin_audio_encoder_factory",
+      "../../../api/task_queue:default_task_queue_factory",
+      "../../../modules/audio_device:mock_audio_device",
+      "../../../modules/audio_processing:mocks",
+      "../../../test:audio_codec_mocks",
+      "../../../test:mock_transport",
+      "../../../test:test_support",
+    ]
+  }
+
+  rtc_library("audio_channel_unittests") {
+    testonly = true
+    sources = [ "audio_channel_unittest.cc" ]
+    deps = [
+      "..:audio_channel",
+      "../../../api:transport_api",
+      "../../../api/audio_codecs:builtin_audio_decoder_factory",
+      "../../../api/audio_codecs:builtin_audio_encoder_factory",
+      "../../../api/task_queue:default_task_queue_factory",
+      "../../../modules/audio_mixer:audio_mixer_impl",
+      "../../../modules/audio_mixer:audio_mixer_test_utils",
+      "../../../modules/rtp_rtcp:rtp_rtcp_format",
+      "../../../modules/utility",
+      "../../../rtc_base:logging",
+      "../../../rtc_base:rtc_event",
+      "../../../test:mock_transport",
+      "../../../test:test_support",
+    ]
+  }
+
   rtc_library("audio_ingress_unittests") {
     testonly = true
     sources = [ "audio_ingress_unittest.cc" ]
diff --git a/audio/voip/test/audio_channel_unittest.cc b/audio/voip/test/audio_channel_unittest.cc
new file mode 100644
index 0000000..ce55782
--- /dev/null
+++ b/audio/voip/test/audio_channel_unittest.cc
@@ -0,0 +1,143 @@
+/*
+ *  Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/voip/audio_channel.h"
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "api/audio_codecs/builtin_audio_encoder_factory.h"
+#include "api/call/transport.h"
+#include "api/task_queue/default_task_queue_factory.h"
+#include "modules/audio_mixer/audio_mixer_impl.h"
+#include "modules/audio_mixer/sine_wave_generator.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/utility/include/process_thread.h"
+#include "rtc_base/event.h"
+#include "rtc_base/logging.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/mock_transport.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::Invoke;
+using ::testing::NiceMock;
+using ::testing::Unused;
+
+constexpr uint64_t kStartTime = 123456789;
+constexpr uint32_t kLocalSsrc = 0xdeadc0de;
+constexpr int16_t kAudioLevel = 3004;  // used for sine wave level
+constexpr int kPcmuPayload = 0;
+
+class AudioChannelTest : public ::testing::Test {
+ public:
+  const SdpAudioFormat kPcmuFormat = {"pcmu", 8000, 1};
+
+  AudioChannelTest()
+      : fake_clock_(kStartTime), wave_generator_(1000.0, kAudioLevel) {
+    process_thread_ = ProcessThread::Create("ModuleProcessThread");
+    audio_mixer_ = AudioMixerImpl::Create();
+    task_queue_factory_ = CreateDefaultTaskQueueFactory();
+    encoder_factory_ = CreateBuiltinAudioEncoderFactory();
+    decoder_factory_ = CreateBuiltinAudioDecoderFactory();
+  }
+
+  void SetUp() override {
+    audio_channel_ = new rtc::RefCountedObject<AudioChannel>(
+        &transport_, kLocalSsrc, task_queue_factory_.get(),
+        process_thread_.get(), audio_mixer_.get(), decoder_factory_);
+
+    audio_channel_->SetEncoder(kPcmuPayload, kPcmuFormat,
+                               encoder_factory_->MakeAudioEncoder(
+                                   kPcmuPayload, kPcmuFormat, absl::nullopt));
+    audio_channel_->SetReceiveCodecs({{kPcmuPayload, kPcmuFormat}});
+    audio_channel_->StartSend();
+    audio_channel_->StartPlay();
+  }
+
+  void TearDown() override {
+    audio_channel_->StopSend();
+    audio_channel_->StopPlay();
+    audio_channel_ = nullptr;
+  }
+
+  std::unique_ptr<AudioFrame> GetAudioFrame(int order) {
+    auto frame = std::make_unique<AudioFrame>();
+    frame->sample_rate_hz_ = kPcmuFormat.clockrate_hz;
+    frame->samples_per_channel_ = kPcmuFormat.clockrate_hz / 100;  // 10 ms.
+    frame->num_channels_ = kPcmuFormat.num_channels;
+    frame->timestamp_ = frame->samples_per_channel_ * order;
+    wave_generator_.GenerateNextFrame(frame.get());
+    return frame;
+  }
+
+  SimulatedClock fake_clock_;
+  SineWaveGenerator wave_generator_;
+  NiceMock<MockTransport> transport_;
+  std::unique_ptr<TaskQueueFactory> task_queue_factory_;
+  rtc::scoped_refptr<AudioMixer> audio_mixer_;
+  rtc::scoped_refptr<AudioDecoderFactory> decoder_factory_;
+  rtc::scoped_refptr<AudioEncoderFactory> encoder_factory_;
+  std::unique_ptr<ProcessThread> process_thread_;
+  rtc::scoped_refptr<AudioChannel> audio_channel_;
+};
+
+// Validate RTP packet generation by feeding audio frames with sine wave.
+// Resulted RTP packet is looped back into AudioChannel and gets decoded into
+// audio frame to see if it has some signal to indicate its validity.
+TEST_F(AudioChannelTest, PlayRtpByLocalLoop) {
+  rtc::Event event;
+  auto loop_rtp = [&](const uint8_t* packet, size_t length, Unused) {
+    audio_channel_->ReceivedRTPPacket(
+        rtc::ArrayView<const uint8_t>(packet, length));
+    event.Set();
+    return true;
+  };
+  EXPECT_CALL(transport_, SendRtp).WillOnce(Invoke(loop_rtp));
+
+  auto audio_sender = audio_channel_->GetAudioSender();
+  audio_sender->SendAudioData(GetAudioFrame(0));
+  audio_sender->SendAudioData(GetAudioFrame(1));
+
+  event.Wait(/*ms=*/1000);
+
+  AudioFrame empty_frame, audio_frame;
+  empty_frame.Mute();
+  empty_frame.mutable_data();  // This will zero out the data.
+  audio_frame.CopyFrom(empty_frame);
+  audio_mixer_->Mix(/*number_of_channels*/ 1, &audio_frame);
+
+  // We expect now audio frame to pick up something.
+  EXPECT_NE(memcmp(empty_frame.data(), audio_frame.data(),
+                   AudioFrame::kMaxDataSizeBytes),
+            0);
+}
+
+// Validate assigned local SSRC is resulted in RTP packet.
+TEST_F(AudioChannelTest, VerifyLocalSsrcAsAssigned) {
+  RtpPacketReceived rtp;
+  rtc::Event event;
+  auto loop_rtp = [&](const uint8_t* packet, size_t length, Unused) {
+    rtp.Parse(packet, length);
+    event.Set();
+    return true;
+  };
+  EXPECT_CALL(transport_, SendRtp).WillOnce(Invoke(loop_rtp));
+
+  auto audio_sender = audio_channel_->GetAudioSender();
+  audio_sender->SendAudioData(GetAudioFrame(0));
+  audio_sender->SendAudioData(GetAudioFrame(1));
+
+  event.Wait(/*ms=*/1000);
+
+  EXPECT_EQ(rtp.Ssrc(), kLocalSsrc);
+}
+
+}  // namespace
+}  // namespace webrtc
diff --git a/audio/voip/test/audio_egress_unittest.cc b/audio/voip/test/audio_egress_unittest.cc
index a7e3d65..3391265 100644
--- a/audio/voip/test/audio_egress_unittest.cc
+++ b/audio/voip/test/audio_egress_unittest.cc
@@ -76,6 +76,7 @@
 
   // Make sure we have shut down rtp stack and reset egress for each test.
   void TearDown() override {
+    egress_->StopSend();
     rtp_rtcp_->SetSendingStatus(false);
     egress_.reset();
   }
@@ -99,10 +100,10 @@
   SimulatedClock fake_clock_;
   NiceMock<MockTransport> transport_;
   SineWaveGenerator wave_generator_;
-  std::unique_ptr<AudioEgress> egress_;
-  std::unique_ptr<TaskQueueFactory> task_queue_factory_;
   std::unique_ptr<RtpRtcp> rtp_rtcp_;
+  std::unique_ptr<TaskQueueFactory> task_queue_factory_;
   rtc::scoped_refptr<AudioEncoderFactory> encoder_factory_;
+  std::unique_ptr<AudioEgress> egress_;
 };
 
 TEST_F(AudioEgressTest, SendingStatusAfterStartAndStop) {
diff --git a/audio/voip/test/audio_ingress_unittest.cc b/audio/voip/test/audio_ingress_unittest.cc
index 752c06c..bedb82e 100644
--- a/audio/voip/test/audio_ingress_unittest.cc
+++ b/audio/voip/test/audio_ingress_unittest.cc
@@ -30,26 +30,26 @@
 
 constexpr int16_t kAudioLevel = 3004;  // Used for sine wave level.
 
-std::unique_ptr<RtpRtcp> CreateRtpStack(Clock* clock, Transport* transport) {
-  RtpRtcp::Configuration rtp_config;
-  rtp_config.clock = clock;
-  rtp_config.audio = true;
-  rtp_config.rtcp_report_interval_ms = 5000;
-  rtp_config.outgoing_transport = transport;
-  rtp_config.local_media_ssrc = 0xdeadc0de;
-  auto rtp_rtcp = RtpRtcp::Create(rtp_config);
-  rtp_rtcp->SetSendingMediaStatus(false);
-  rtp_rtcp->SetRTCPStatus(RtcpMode::kCompound);
-  return rtp_rtcp;
-}
-
 class AudioIngressTest : public ::testing::Test {
  public:
   const SdpAudioFormat kPcmuFormat = {"pcmu", 8000, 1};
 
   AudioIngressTest()
       : fake_clock_(123456789), wave_generator_(1000.0, kAudioLevel) {
-    rtp_rtcp_ = CreateRtpStack(&fake_clock_, &transport_);
+    receive_statistics_ = ReceiveStatistics::Create(&fake_clock_);
+
+    RtpRtcp::Configuration rtp_config;
+    rtp_config.clock = &fake_clock_;
+    rtp_config.audio = true;
+    rtp_config.receive_statistics = receive_statistics_.get();
+    rtp_config.rtcp_report_interval_ms = 5000;
+    rtp_config.outgoing_transport = &transport_;
+    rtp_config.local_media_ssrc = 0xdeadc0de;
+    rtp_rtcp_ = RtpRtcp::Create(rtp_config);
+
+    rtp_rtcp_->SetSendingMediaStatus(false);
+    rtp_rtcp_->SetRTCPStatus(RtcpMode::kCompound);
+
     task_queue_factory_ = CreateDefaultTaskQueueFactory();
     encoder_factory_ = CreateBuiltinAudioEncoderFactory();
     decoder_factory_ = CreateBuiltinAudioDecoderFactory();
@@ -57,9 +57,9 @@
 
   void SetUp() override {
     constexpr int kPcmuPayload = 0;
-    ingress_ = std::make_unique<AudioIngress>(
-        rtp_rtcp_.get(), &fake_clock_, decoder_factory_,
-        ReceiveStatistics::Create(&fake_clock_));
+    ingress_ = std::make_unique<AudioIngress>(rtp_rtcp_.get(), &fake_clock_,
+                                              receive_statistics_.get(),
+                                              decoder_factory_);
     ingress_->SetReceiveCodecs({{kPcmuPayload, kPcmuFormat}});
 
     egress_ = std::make_unique<AudioEgress>(rtp_rtcp_.get(), &fake_clock_,
@@ -76,6 +76,8 @@
     rtp_rtcp_->SetSendingStatus(false);
     ingress_->StopPlay();
     egress_->StopSend();
+    egress_.reset();
+    ingress_.reset();
   }
 
   std::unique_ptr<AudioFrame> GetAudioFrame(int order) {
@@ -91,25 +93,25 @@
   SimulatedClock fake_clock_;
   SineWaveGenerator wave_generator_;
   NiceMock<MockTransport> transport_;
-  std::unique_ptr<AudioIngress> ingress_;
-  rtc::scoped_refptr<AudioDecoderFactory> decoder_factory_;
-  // Members used to drive the input to ingress.
-  std::unique_ptr<AudioEgress> egress_;
-  std::unique_ptr<TaskQueueFactory> task_queue_factory_;
-  std::shared_ptr<RtpRtcp> rtp_rtcp_;
+  std::unique_ptr<ReceiveStatistics> receive_statistics_;
+  std::unique_ptr<RtpRtcp> rtp_rtcp_;
   rtc::scoped_refptr<AudioEncoderFactory> encoder_factory_;
+  rtc::scoped_refptr<AudioDecoderFactory> decoder_factory_;
+  std::unique_ptr<TaskQueueFactory> task_queue_factory_;
+  std::unique_ptr<AudioIngress> ingress_;
+  std::unique_ptr<AudioEgress> egress_;
 };
 
 TEST_F(AudioIngressTest, PlayingAfterStartAndStop) {
-  EXPECT_EQ(ingress_->Playing(), true);
+  EXPECT_EQ(ingress_->IsPlaying(), true);
   ingress_->StopPlay();
-  EXPECT_EQ(ingress_->Playing(), false);
+  EXPECT_EQ(ingress_->IsPlaying(), false);
 }
 
 TEST_F(AudioIngressTest, GetAudioFrameAfterRtpReceived) {
   rtc::Event event;
   auto handle_rtp = [&](const uint8_t* packet, size_t length, Unused) {
-    ingress_->ReceivedRTPPacket(packet, length);
+    ingress_->ReceivedRTPPacket(rtc::ArrayView<const uint8_t>(packet, length));
     event.Set();
     return true;
   };
@@ -137,7 +139,7 @@
   int rtp_count = 0;
   rtc::Event event;
   auto handle_rtp = [&](const uint8_t* packet, size_t length, Unused) {
-    ingress_->ReceivedRTPPacket(packet, length);
+    ingress_->ReceivedRTPPacket(rtc::ArrayView<const uint8_t>(packet, length));
     if (++rtp_count == kNumRtp) {
       event.Set();
     }
@@ -162,7 +164,7 @@
 TEST_F(AudioIngressTest, PreferredSampleRate) {
   rtc::Event event;
   auto handle_rtp = [&](const uint8_t* packet, size_t length, Unused) {
-    ingress_->ReceivedRTPPacket(packet, length);
+    ingress_->ReceivedRTPPacket(rtc::ArrayView<const uint8_t>(packet, length));
     event.Set();
     return true;
   };
diff --git a/audio/voip/test/voip_core_unittest.cc b/audio/voip/test/voip_core_unittest.cc
new file mode 100644
index 0000000..c1969d6
--- /dev/null
+++ b/audio/voip/test/voip_core_unittest.cc
@@ -0,0 +1,100 @@
+/*
+ *  Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/voip/voip_core.h"
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "api/audio_codecs/builtin_audio_encoder_factory.h"
+#include "api/task_queue/default_task_queue_factory.h"
+#include "modules/audio_device/include/mock_audio_device.h"
+#include "modules/audio_processing/include/mock_audio_processing.h"
+#include "test/gtest.h"
+#include "test/mock_transport.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::NiceMock;
+using ::testing::Return;
+
+constexpr int kPcmuPayload = 0;
+
+class VoipCoreTest : public ::testing::Test {
+ public:
+  const SdpAudioFormat kPcmuFormat = {"pcmu", 8000, 1};
+
+  VoipCoreTest() { audio_device_ = test::MockAudioDeviceModule::CreateNice(); }
+
+  void SetUp() override {
+    auto encoder_factory = CreateBuiltinAudioEncoderFactory();
+    auto decoder_factory = CreateBuiltinAudioDecoderFactory();
+    rtc::scoped_refptr<AudioProcessing> audio_processing =
+        new rtc::RefCountedObject<test::MockAudioProcessing>();
+
+    voip_core_ = std::make_unique<VoipCore>();
+    voip_core_->Init(std::move(encoder_factory), std::move(decoder_factory),
+                     CreateDefaultTaskQueueFactory(), audio_device_,
+                     std::move(audio_processing));
+  }
+
+  std::unique_ptr<VoipCore> voip_core_;
+  NiceMock<MockTransport> transport_;
+  rtc::scoped_refptr<test::MockAudioDeviceModule> audio_device_;
+};
+
+// Validate expected API calls that involves with VoipCore. Some verification is
+// involved with checking mock audio device.
+TEST_F(VoipCoreTest, BasicVoipCoreOperation) {
+  // Program mock as non-operational and ready to start.
+  EXPECT_CALL(*audio_device_, Recording()).WillOnce(Return(false));
+  EXPECT_CALL(*audio_device_, Playing()).WillOnce(Return(false));
+  EXPECT_CALL(*audio_device_, InitRecording()).WillOnce(Return(0));
+  EXPECT_CALL(*audio_device_, InitPlayout()).WillOnce(Return(0));
+  EXPECT_CALL(*audio_device_, StartRecording()).WillOnce(Return(0));
+  EXPECT_CALL(*audio_device_, StartPlayout()).WillOnce(Return(0));
+
+  auto channel = voip_core_->CreateChannel(&transport_, 0xdeadc0de);
+  EXPECT_TRUE(channel);
+
+  voip_core_->SetSendCodec(*channel, kPcmuPayload, kPcmuFormat);
+  voip_core_->SetReceiveCodecs(*channel, {{kPcmuPayload, kPcmuFormat}});
+
+  EXPECT_TRUE(voip_core_->StartSend(*channel));
+  EXPECT_TRUE(voip_core_->StartPlayout(*channel));
+
+  // Program mock as operational that is ready to be stopped.
+  EXPECT_CALL(*audio_device_, Recording()).WillOnce(Return(true));
+  EXPECT_CALL(*audio_device_, Playing()).WillOnce(Return(true));
+  EXPECT_CALL(*audio_device_, StopRecording()).WillOnce(Return(0));
+  EXPECT_CALL(*audio_device_, StopPlayout()).WillOnce(Return(0));
+
+  EXPECT_TRUE(voip_core_->StopSend(*channel));
+  EXPECT_TRUE(voip_core_->StopPlayout(*channel));
+  voip_core_->ReleaseChannel(*channel);
+}
+
+TEST_F(VoipCoreTest, ExpectFailToUseReleasedChannelId) {
+  auto channel = voip_core_->CreateChannel(&transport_, 0xdeadc0de);
+  EXPECT_TRUE(channel);
+
+  // Release right after creation.
+  voip_core_->ReleaseChannel(*channel);
+
+  // Now use released channel.
+
+  // These should be no-op.
+  voip_core_->SetSendCodec(*channel, kPcmuPayload, kPcmuFormat);
+  voip_core_->SetReceiveCodecs(*channel, {{kPcmuPayload, kPcmuFormat}});
+
+  EXPECT_FALSE(voip_core_->StartSend(*channel));
+  EXPECT_FALSE(voip_core_->StartPlayout(*channel));
+}
+
+}  // namespace
+}  // namespace webrtc
diff --git a/audio/voip/voip_core.cc b/audio/voip/voip_core.cc
new file mode 100644
index 0000000..3275f02
--- /dev/null
+++ b/audio/voip/voip_core.cc
@@ -0,0 +1,348 @@
+/*
+ *  Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/voip/voip_core.h"
+
+#include <algorithm>
+#include <memory>
+#include <utility>
+
+#include "api/audio_codecs/audio_format.h"
+#include "rtc_base/critical_section.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+namespace {
+
+// For Windows, use specific enum type to initialize default audio device as
+// defined in AudioDeviceModule::WindowsDeviceType.
+#if defined(WEBRTC_WIN)
+constexpr AudioDeviceModule::WindowsDeviceType kAudioDeviceId =
+    AudioDeviceModule::WindowsDeviceType::kDefaultCommunicationDevice;
+#else
+constexpr uint16_t kAudioDeviceId = 0;
+#endif  // defined(WEBRTC_WIN)
+
+// Maximum value range limit on ChannelId. This can be increased without any
+// side effect and only set at this moderate value for better readability for
+// logging.
+static constexpr int kMaxChannelId = 100000;
+
+}  // namespace
+
+bool VoipCore::Init(rtc::scoped_refptr<AudioEncoderFactory> encoder_factory,
+                    rtc::scoped_refptr<AudioDecoderFactory> decoder_factory,
+                    std::unique_ptr<TaskQueueFactory> task_queue_factory,
+                    rtc::scoped_refptr<AudioDeviceModule> audio_device_module,
+                    rtc::scoped_refptr<AudioProcessing> audio_processing) {
+  encoder_factory_ = std::move(encoder_factory);
+  decoder_factory_ = std::move(decoder_factory);
+  task_queue_factory_ = std::move(task_queue_factory);
+  audio_device_module_ = std::move(audio_device_module);
+
+  process_thread_ = ProcessThread::Create("ModuleProcessThread");
+  audio_mixer_ = AudioMixerImpl::Create();
+
+  if (audio_processing) {
+    audio_processing_ = std::move(audio_processing);
+    AudioProcessing::Config apm_config = audio_processing_->GetConfig();
+    apm_config.echo_canceller.enabled = true;
+    audio_processing_->ApplyConfig(apm_config);
+  }
+
+  // AudioTransportImpl depends on audio mixer and audio processing instances.
+  audio_transport_ = std::make_unique<AudioTransportImpl>(
+      audio_mixer_.get(), audio_processing_.get());
+
+  // Initialize ADM.
+  if (audio_device_module_->Init() != 0) {
+    RTC_LOG(LS_ERROR) << "Failed to initialize the ADM.";
+    return false;
+  }
+
+  // Note that failures on initializing default recording/speaker devices are
+  // not considered to be fatal here. In certain case, caller may not care about
+  // recording device functioning (e.g webinar where only speaker is available).
+  // It's also possible that there are other audio devices available that may
+  // work.
+  // TODO(natim@webrtc.org): consider moving this part out of initialization.
+
+  // Initialize default speaker device.
+  if (audio_device_module_->SetPlayoutDevice(kAudioDeviceId) != 0) {
+    RTC_LOG(LS_WARNING) << "Unable to set playout device.";
+  }
+  if (audio_device_module_->InitSpeaker() != 0) {
+    RTC_LOG(LS_WARNING) << "Unable to access speaker.";
+  }
+
+  // Initialize default recording device.
+  if (audio_device_module_->SetRecordingDevice(kAudioDeviceId) != 0) {
+    RTC_LOG(LS_WARNING) << "Unable to set recording device.";
+  }
+  if (audio_device_module_->InitMicrophone() != 0) {
+    RTC_LOG(LS_WARNING) << "Unable to access microphone.";
+  }
+
+  // Set number of channels on speaker device.
+  bool available = false;
+  if (audio_device_module_->StereoPlayoutIsAvailable(&available) != 0) {
+    RTC_LOG(LS_WARNING) << "Unable to query stereo playout.";
+  }
+  if (audio_device_module_->SetStereoPlayout(available) != 0) {
+    RTC_LOG(LS_WARNING) << "Unable to set mono/stereo playout mode.";
+  }
+
+  // Set number of channels on recording device.
+  available = false;
+  if (audio_device_module_->StereoRecordingIsAvailable(&available) != 0) {
+    RTC_LOG(LS_WARNING) << "Unable to query stereo recording.";
+  }
+  if (audio_device_module_->SetStereoRecording(available) != 0) {
+    RTC_LOG(LS_WARNING) << "Unable to set stereo recording mode.";
+  }
+
+  if (audio_device_module_->RegisterAudioCallback(audio_transport_.get()) !=
+      0) {
+    RTC_LOG(LS_WARNING) << "Unable to register audio callback.";
+  }
+
+  return true;
+}
+
+absl::optional<ChannelId> VoipCore::CreateChannel(
+    Transport* transport,
+    absl::optional<uint32_t> local_ssrc) {
+  absl::optional<ChannelId> channel;
+
+  // Set local ssrc to random if not set by caller.
+  if (!local_ssrc) {
+    Random random(rtc::TimeMicros());
+    local_ssrc = random.Rand<uint32_t>();
+  }
+
+  rtc::scoped_refptr<AudioChannel> audio_channel =
+      new rtc::RefCountedObject<AudioChannel>(
+          transport, local_ssrc.value(), task_queue_factory_.get(),
+          process_thread_.get(), audio_mixer_.get(), decoder_factory_);
+
+  {
+    rtc::CritScope lock(&lock_);
+
+    channel = static_cast<ChannelId>(next_channel_id_);
+    channels_[*channel] = audio_channel;
+    next_channel_id_++;
+    if (next_channel_id_ >= kMaxChannelId) {
+      next_channel_id_ = 0;
+    }
+  }
+
+  // Set ChannelId in audio channel for logging/debugging purpose.
+  audio_channel->SetId(*channel);
+
+  return channel;
+}
+
+void VoipCore::ReleaseChannel(ChannelId channel) {
+  // Destroy channel outside of the lock.
+  rtc::scoped_refptr<AudioChannel> audio_channel;
+  {
+    rtc::CritScope lock(&lock_);
+
+    auto iter = channels_.find(channel);
+    if (iter != channels_.end()) {
+      audio_channel = std::move(iter->second);
+      channels_.erase(iter);
+    }
+  }
+  if (!audio_channel) {
+    RTC_LOG(LS_WARNING) << "Channel " << channel << " not found";
+  }
+}
+
+rtc::scoped_refptr<AudioChannel> VoipCore::GetChannel(ChannelId channel) {
+  rtc::scoped_refptr<AudioChannel> audio_channel;
+  {
+    rtc::CritScope lock(&lock_);
+    auto iter = channels_.find(channel);
+    if (iter != channels_.end()) {
+      audio_channel = iter->second;
+    }
+  }
+  if (!audio_channel) {
+    RTC_LOG(LS_ERROR) << "Channel " << channel << " not found";
+  }
+  return audio_channel;
+}
+
+bool VoipCore::UpdateAudioTransportWithSenders() {
+  std::vector<AudioSender*> audio_senders;
+
+  // Gather a list of audio channel that are currently sending along with
+  // highest sampling rate and channel numbers to configure into audio
+  // transport.
+  int max_sampling_rate = 8000;
+  size_t max_num_channels = 1;
+  {
+    rtc::CritScope lock(&lock_);
+    // Reserve to prevent run time vector re-allocation.
+    audio_senders.reserve(channels_.size());
+    for (auto kv : channels_) {
+      rtc::scoped_refptr<AudioChannel>& channel = kv.second;
+      if (channel->IsSendingMedia()) {
+        auto encoder_format = channel->GetEncoderFormat();
+        if (!encoder_format) {
+          RTC_LOG(LS_ERROR)
+              << "channel " << channel->GetId() << " encoder is not set";
+          continue;
+        }
+        audio_senders.push_back(channel->GetAudioSender());
+        max_sampling_rate =
+            std::max(max_sampling_rate, encoder_format->clockrate_hz);
+        max_num_channels =
+            std::max(max_num_channels, encoder_format->num_channels);
+      }
+    }
+  }
+
+  audio_transport_->UpdateAudioSenders(audio_senders, max_sampling_rate,
+                                       max_num_channels);
+
+  // Depending on availability of senders, turn on or off ADM recording.
+  if (!audio_senders.empty()) {
+    if (!audio_device_module_->Recording()) {
+      if (audio_device_module_->InitRecording() != 0) {
+        RTC_LOG(LS_ERROR) << "InitRecording failed";
+        return false;
+      }
+      if (audio_device_module_->StartRecording() != 0) {
+        RTC_LOG(LS_ERROR) << "StartRecording failed";
+        return false;
+      }
+    }
+  } else {
+    if (audio_device_module_->Recording() &&
+        audio_device_module_->StopRecording() != 0) {
+      RTC_LOG(LS_ERROR) << "StopRecording failed";
+      return false;
+    }
+  }
+  return true;
+}
+
+bool VoipCore::StartSend(ChannelId channel) {
+  auto audio_channel = GetChannel(channel);
+  if (!audio_channel) {
+    return false;
+  }
+
+  audio_channel->StartSend();
+
+  return UpdateAudioTransportWithSenders();
+}
+
+bool VoipCore::StopSend(ChannelId channel) {
+  auto audio_channel = GetChannel(channel);
+  if (!audio_channel) {
+    return false;
+  }
+
+  audio_channel->StopSend();
+
+  return UpdateAudioTransportWithSenders();
+}
+
+bool VoipCore::StartPlayout(ChannelId channel) {
+  auto audio_channel = GetChannel(channel);
+  if (!audio_channel) {
+    return false;
+  }
+
+  audio_channel->StartPlay();
+
+  if (!audio_device_module_->Playing()) {
+    if (audio_device_module_->InitPlayout() != 0) {
+      RTC_LOG(LS_ERROR) << "InitPlayout failed";
+      return false;
+    }
+    if (audio_device_module_->StartPlayout() != 0) {
+      RTC_LOG(LS_ERROR) << "StartPlayout failed";
+      return false;
+    }
+  }
+  return true;
+}
+
+bool VoipCore::StopPlayout(ChannelId channel) {
+  auto audio_channel = GetChannel(channel);
+  if (!audio_channel) {
+    return false;
+  }
+
+  audio_channel->StopPlay();
+
+  bool stop_device = true;
+  {
+    rtc::CritScope lock(&lock_);
+    for (auto kv : channels_) {
+      rtc::scoped_refptr<AudioChannel>& channel = kv.second;
+      if (channel->IsPlaying()) {
+        stop_device = false;
+        break;
+      }
+    }
+  }
+
+  if (stop_device && audio_device_module_->Playing()) {
+    if (audio_device_module_->StopPlayout() != 0) {
+      RTC_LOG(LS_ERROR) << "StopPlayout failed";
+      return false;
+    }
+  }
+  return true;
+}
+
+void VoipCore::ReceivedRTPPacket(ChannelId channel,
+                                 rtc::ArrayView<const uint8_t> rtp_packet) {
+  // Failure to locate channel is logged internally in GetChannel.
+  if (auto audio_channel = GetChannel(channel)) {
+    audio_channel->ReceivedRTPPacket(rtp_packet);
+  }
+}
+
+void VoipCore::ReceivedRTCPPacket(ChannelId channel,
+                                  rtc::ArrayView<const uint8_t> rtcp_packet) {
+  // Failure to locate channel is logged internally in GetChannel.
+  if (auto audio_channel = GetChannel(channel)) {
+    audio_channel->ReceivedRTCPPacket(rtcp_packet);
+  }
+}
+
+void VoipCore::SetSendCodec(ChannelId channel,
+                            int payload_type,
+                            const SdpAudioFormat& encoder_format) {
+  // Failure to locate channel is logged internally in GetChannel.
+  if (auto audio_channel = GetChannel(channel)) {
+    auto encoder = encoder_factory_->MakeAudioEncoder(
+        payload_type, encoder_format, absl::nullopt);
+    audio_channel->SetEncoder(payload_type, encoder_format, std::move(encoder));
+  }
+}
+
+void VoipCore::SetReceiveCodecs(
+    ChannelId channel,
+    const std::map<int, SdpAudioFormat>& decoder_specs) {
+  // Failure to locate channel is logged internally in GetChannel.
+  if (auto audio_channel = GetChannel(channel)) {
+    audio_channel->SetReceiveCodecs(decoder_specs);
+  }
+}
+
+}  // namespace webrtc
diff --git a/audio/voip/voip_core.h b/audio/voip/voip_core.h
new file mode 100644
index 0000000..08929d3
--- /dev/null
+++ b/audio/voip/voip_core.h
@@ -0,0 +1,139 @@
+/*
+ *  Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_VOIP_VOIP_CORE_H_
+#define AUDIO_VOIP_VOIP_CORE_H_
+
+#include <map>
+#include <memory>
+#include <queue>
+#include <unordered_map>
+#include <vector>
+
+#include "api/audio_codecs/audio_decoder_factory.h"
+#include "api/audio_codecs/audio_encoder_factory.h"
+#include "api/scoped_refptr.h"
+#include "api/task_queue/task_queue_factory.h"
+#include "api/voip/voip_base.h"
+#include "api/voip/voip_codec.h"
+#include "api/voip/voip_engine.h"
+#include "api/voip/voip_network.h"
+#include "audio/audio_transport_impl.h"
+#include "audio/voip/audio_channel.h"
+#include "modules/audio_device/include/audio_device.h"
+#include "modules/audio_mixer/audio_mixer_impl.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/utility/include/process_thread.h"
+#include "rtc_base/critical_section.h"
+
+namespace webrtc {
+
+// VoipCore is the implementatino of VoIP APIs listed in api/voip directory.
+// It manages a vector of AudioChannel objects where each is mapped with a
+// ChannelId (int) type. ChannelId is the primary key to locate a specific
+// AudioChannel object to operate requested VoIP API from the caller.
+//
+// This class receives required audio components from caller at construction and
+// owns the life cycle of them to orchestrate the proper destruction sequence.
+class VoipCore : public VoipEngine,
+                 public VoipBase,
+                 public VoipNetwork,
+                 public VoipCodec {
+ public:
+  ~VoipCore() override = default;
+
+  // Initialize VoipCore components with provided arguments.
+  // Returns false only when |audio_device_module| fails to initialize which
+  // would presumably render further processing useless.
+  // TODO(natim@webrtc.org): Need to report audio device errors to user layer.
+  bool Init(rtc::scoped_refptr<AudioEncoderFactory> encoder_factory,
+            rtc::scoped_refptr<AudioDecoderFactory> decoder_factory,
+            std::unique_ptr<TaskQueueFactory> task_queue_factory,
+            rtc::scoped_refptr<AudioDeviceModule> audio_device_module,
+            rtc::scoped_refptr<AudioProcessing> audio_processing);
+
+  // Implements VoipEngine interfaces.
+  VoipBase& Base() override { return *this; }
+  VoipNetwork& Network() override { return *this; }
+  VoipCodec& Codec() override { return *this; }
+
+  // Implements VoipBase interfaces.
+  absl::optional<ChannelId> CreateChannel(
+      Transport* transport,
+      absl::optional<uint32_t> local_ssrc) override;
+  void ReleaseChannel(ChannelId channel) override;
+  bool StartSend(ChannelId channel) override;
+  bool StopSend(ChannelId channel) override;
+  bool StartPlayout(ChannelId channel) override;
+  bool StopPlayout(ChannelId channel) override;
+
+  // Implements VoipNetwork interfaces.
+  void ReceivedRTPPacket(ChannelId channel,
+                         rtc::ArrayView<const uint8_t> rtp_packet) override;
+  void ReceivedRTCPPacket(ChannelId channel,
+                          rtc::ArrayView<const uint8_t> rtcp_packet) override;
+
+  // Implements VoipCodec interfaces.
+  void SetSendCodec(ChannelId channel,
+                    int payload_type,
+                    const SdpAudioFormat& encoder_format) override;
+  void SetReceiveCodecs(
+      ChannelId channel,
+      const std::map<int, SdpAudioFormat>& decoder_specs) override;
+
+ private:
+  // Fetches the corresponding AudioChannel assigned with given |channel|.
+  // Returns nullptr if not found.
+  rtc::scoped_refptr<AudioChannel> GetChannel(ChannelId channel);
+
+  // Updates AudioTransportImpl with a new set of actively sending AudioSender
+  // (AudioEgress). This needs to be invoked whenever StartSend/StopSend is
+  // involved by caller. Returns false when the selected audio device fails to
+  // initialize where it can't expect to deliver any audio input sample.
+  bool UpdateAudioTransportWithSenders();
+
+  // Synchronization for these are handled internally.
+  rtc::scoped_refptr<AudioEncoderFactory> encoder_factory_;
+  rtc::scoped_refptr<AudioDecoderFactory> decoder_factory_;
+  std::unique_ptr<TaskQueueFactory> task_queue_factory_;
+
+  // Synchronization is handled internally by AudioProessing.
+  // Must be placed before |audio_device_module_| for proper destruction.
+  rtc::scoped_refptr<AudioProcessing> audio_processing_;
+
+  // Synchronization is handled internally by AudioMixer.
+  // Must be placed before |audio_device_module_| for proper destruction.
+  rtc::scoped_refptr<AudioMixer> audio_mixer_;
+
+  // Synchronization is handled internally by AudioTransportImpl.
+  // Must be placed before |audio_device_module_| for proper destruction.
+  std::unique_ptr<AudioTransportImpl> audio_transport_;
+
+  // Synchronization is handled internally by AudioDeviceModule.
+  rtc::scoped_refptr<AudioDeviceModule> audio_device_module_;
+
+  // Synchronization is handled internally by ProcessThread.
+  // Must be placed before |channels_| for proper destruction.
+  std::unique_ptr<ProcessThread> process_thread_;
+
+  rtc::CriticalSection lock_;
+
+  // Member to track a next ChannelId for new AudioChannel.
+  int next_channel_id_ RTC_GUARDED_BY(lock_) = 0;
+
+  // Container to track currently active AudioChannel objects mapped by
+  // ChannelId.
+  std::unordered_map<ChannelId, rtc::scoped_refptr<AudioChannel>> channels_
+      RTC_GUARDED_BY(lock_);
+};
+
+}  // namespace webrtc
+
+#endif  // AUDIO_VOIP_VOIP_CORE_H_