Revert "Reland "Reland "Delete old Android ADM."""

This reverts commit db30009304ab97a5fde02977ed1239aa249e2656.

Reason for revert: ... and it's out again :(
 
Original change's description:
> Reland "Reland "Delete old Android ADM.""
>
> This reverts commit 38a28603fd7b2eec46a362105b225dd6f08b4137.
>
> Reason for revert: Attempt to reland, now that WebRTC dependency cycle has been broken.
>
> Original change's description:
> > Revert "Reland "Delete old Android ADM.""
> >
> > This reverts commit 6e4d7e606c4327eaa9298193e22794fcb9b30218.
> >
> > Reason for revert: Still breaks downstream build (though in a different way this time)
> >
> > Original change's description:
> > > Reland "Delete old Android ADM."
> > >
> > > This is a reland of commit 4ec3e9c98873520b3171d40ab0426b2f05edbbd2
> > >
> > > Original change's description:
> > > > Delete old Android ADM.
> > > >
> > > > The schedule move Android ADM code to sdk directory have been around
> > > > for several years, but the old code still not delete.
> > > >
> > > > Bug: webrtc:7452
> > > > Change-Id: I0f75c680f71f0b2ce614de6cbd9f124c2a59d453
> > > > Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/264620
> > > > Reviewed-by: Magnus Jedvert <magjed@webrtc.org>
> > > > Commit-Queue: Henrik Andreassson <henrika@webrtc.org>
> > > > Reviewed-by: Henrik Andreassson <henrika@webrtc.org>
> > > > Cr-Commit-Position: refs/heads/main@{#37174}
> > >
> > > Bug: webrtc:7452
> > > Change-Id: Icabad23e72c8258a854b7809a93811161517266c
> > > Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/265872
> > > Reviewed-by: Henrik Andreassson <henrika@webrtc.org>
> > > Commit-Queue: Björn Terelius <terelius@webrtc.org>
> > > Cr-Commit-Position: refs/heads/main@{#37236}
> >
> > Bug: webrtc:7452
> > Change-Id: Ide8fbd55fadd7aed9989053afff7c63c04f1320f
> > No-Presubmit: true
> > No-Tree-Checks: true
> > No-Try: true
> > Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/266023
> > Bot-Commit: rubber-stamper@appspot.gserviceaccount.com <rubber-stamper@appspot.gserviceaccount.com>
> > Commit-Queue: Björn Terelius <terelius@webrtc.org>
> > Owners-Override: Björn Terelius <terelius@webrtc.org>
> > Cr-Commit-Position: refs/heads/main@{#37242}
>
> Bug: webrtc:7452
> Change-Id: I6946d0fc28cf4c08387e451e6a07765f7410ce7c
> Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/266980
> Bot-Commit: rubber-stamper@appspot.gserviceaccount.com <rubber-stamper@appspot.gserviceaccount.com>
> Reviewed-by: Henrik Andreassson <henrika@webrtc.org>
> Commit-Queue: Björn Terelius <terelius@webrtc.org>
> Cr-Commit-Position: refs/heads/main@{#37356}

Bug: webrtc:7452
Change-Id: I1ef4004e89c8bea322bda0dc697a7ba45abeffcc
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/267067
Owners-Override: Björn Terelius <terelius@webrtc.org>
Bot-Commit: rubber-stamper@appspot.gserviceaccount.com <rubber-stamper@appspot.gserviceaccount.com>
Commit-Queue: Björn Terelius <terelius@webrtc.org>
Reviewed-by: Mirko Bonadei <mbonadei@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#37359}
diff --git a/examples/androidnativeapi/BUILD.gn b/examples/androidnativeapi/BUILD.gn
index f538149..680a16d 100644
--- a/examples/androidnativeapi/BUILD.gn
+++ b/examples/androidnativeapi/BUILD.gn
@@ -15,6 +15,7 @@
 
     deps = [
       ":resources",
+      "//modules/audio_device:audio_device_java",
       "//rtc_base:base_java",
       "//sdk/android:camera_java",
       "//sdk/android:surfaceviewrenderer_java",
diff --git a/examples/androidvoip/BUILD.gn b/examples/androidvoip/BUILD.gn
index b0ace2e..3120e06 100644
--- a/examples/androidvoip/BUILD.gn
+++ b/examples/androidvoip/BUILD.gn
@@ -24,6 +24,7 @@
 
     deps = [
       ":resources",
+      "//modules/audio_device:audio_device_java",
       "//rtc_base:base_java",
       "//sdk/android:base_java",
       "//sdk/android:java_audio_device_module_java",
diff --git a/modules/audio_device/BUILD.gn b/modules/audio_device/BUILD.gn
index 0624d62..b376955b 100644
--- a/modules/audio_device/BUILD.gn
+++ b/modules/audio_device/BUILD.gn
@@ -249,7 +249,39 @@
       "include/audio_device_data_observer.h",
     ]
     if (is_android) {
-      deps += [ "../../sdk/android:native_api_audio_device_module" ]
+      sources += [
+        "android/audio_common.h",
+        "android/audio_device_template.h",
+        "android/audio_manager.cc",
+        "android/audio_manager.h",
+        "android/audio_record_jni.cc",
+        "android/audio_record_jni.h",
+        "android/audio_track_jni.cc",
+        "android/audio_track_jni.h",
+        "android/build_info.cc",
+        "android/build_info.h",
+        "android/opensles_common.cc",
+        "android/opensles_common.h",
+        "android/opensles_player.cc",
+        "android/opensles_player.h",
+        "android/opensles_recorder.cc",
+        "android/opensles_recorder.h",
+      ]
+      libs = [
+        "log",
+        "OpenSLES",
+      ]
+      if (rtc_enable_android_aaudio) {
+        sources += [
+          "android/aaudio_player.cc",
+          "android/aaudio_player.h",
+          "android/aaudio_recorder.cc",
+          "android/aaudio_recorder.h",
+          "android/aaudio_wrapper.cc",
+          "android/aaudio_wrapper.h",
+        ]
+        libs += [ "aaudio" ]
+      }
 
       if (build_with_mozilla) {
         include_dirs += [
@@ -417,6 +449,12 @@
       ]
     }
     if (is_android) {
+      sources += [
+        "android/audio_device_unittest.cc",
+        "android/audio_manager_unittest.cc",
+        "android/ensure_initialized.cc",
+        "android/ensure_initialized.h",
+      ]
       deps += [
         "../../sdk/android:internal_jni",
         "../../sdk/android:libjingle_peerconnection_java",
@@ -429,3 +467,20 @@
     }
   }
 }
+
+if (!build_with_chromium && is_android) {
+  rtc_android_library("audio_device_java") {
+    sources = [
+      "android/java/src/org/webrtc/voiceengine/BuildInfo.java",
+      "android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java",
+      "android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java",
+      "android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java",
+      "android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java",
+      "android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java",
+    ]
+    deps = [
+      "../../rtc_base:base_java",
+      "//third_party/androidx:androidx_annotation_annotation_java",
+    ]
+  }
+}
diff --git a/modules/audio_device/DEPS b/modules/audio_device/DEPS
index b0571de..9cc627d 100644
--- a/modules/audio_device/DEPS
+++ b/modules/audio_device/DEPS
@@ -9,6 +9,5 @@
   ],
   "audio_device_impl\.cc": [
     "+sdk/objc",
-    "+sdk/android",
   ],
 }
diff --git a/modules/audio_device/android/aaudio_player.cc b/modules/audio_device/android/aaudio_player.cc
new file mode 100644
index 0000000..5257b2b
--- /dev/null
+++ b/modules/audio_device/android/aaudio_player.cc
@@ -0,0 +1,228 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/android/aaudio_player.h"
+
+#include <memory>
+
+#include "api/array_view.h"
+#include "modules/audio_device/android/audio_manager.h"
+#include "modules/audio_device/fine_audio_buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+enum AudioDeviceMessageType : uint32_t {
+  kMessageOutputStreamDisconnected,
+};
+
+AAudioPlayer::AAudioPlayer(AudioManager* audio_manager)
+    : main_thread_(rtc::Thread::Current()),
+      aaudio_(audio_manager, AAUDIO_DIRECTION_OUTPUT, this) {
+  RTC_LOG(LS_INFO) << "ctor";
+  thread_checker_aaudio_.Detach();
+}
+
+AAudioPlayer::~AAudioPlayer() {
+  RTC_LOG(LS_INFO) << "dtor";
+  RTC_DCHECK_RUN_ON(&main_thread_checker_);
+  Terminate();
+  RTC_LOG(LS_INFO) << "#detected underruns: " << underrun_count_;
+}
+
+int AAudioPlayer::Init() {
+  RTC_LOG(LS_INFO) << "Init";
+  RTC_DCHECK_RUN_ON(&main_thread_checker_);
+  if (aaudio_.audio_parameters().channels() == 2) {
+    RTC_DLOG(LS_WARNING) << "Stereo mode is enabled";
+  }
+  return 0;
+}
+
+int AAudioPlayer::Terminate() {
+  RTC_LOG(LS_INFO) << "Terminate";
+  RTC_DCHECK_RUN_ON(&main_thread_checker_);
+  StopPlayout();
+  return 0;
+}
+
+int AAudioPlayer::InitPlayout() {
+  RTC_LOG(LS_INFO) << "InitPlayout";
+  RTC_DCHECK_RUN_ON(&main_thread_checker_);
+  RTC_DCHECK(!initialized_);
+  RTC_DCHECK(!playing_);
+  if (!aaudio_.Init()) {
+    return -1;
+  }
+  initialized_ = true;
+  return 0;
+}
+
+bool AAudioPlayer::PlayoutIsInitialized() const {
+  RTC_DCHECK_RUN_ON(&main_thread_checker_);
+  return initialized_;
+}
+
+int AAudioPlayer::StartPlayout() {
+  RTC_LOG(LS_INFO) << "StartPlayout";
+  RTC_DCHECK_RUN_ON(&main_thread_checker_);
+  RTC_DCHECK(!playing_);
+  if (!initialized_) {
+    RTC_DLOG(LS_WARNING)
+        << "Playout can not start since InitPlayout must succeed first";
+    return 0;
+  }
+  if (fine_audio_buffer_) {
+    fine_audio_buffer_->ResetPlayout();
+  }
+  if (!aaudio_.Start()) {
+    return -1;
+  }
+  underrun_count_ = aaudio_.xrun_count();
+  first_data_callback_ = true;
+  playing_ = true;
+  return 0;
+}
+
+int AAudioPlayer::StopPlayout() {
+  RTC_LOG(LS_INFO) << "StopPlayout";
+  RTC_DCHECK_RUN_ON(&main_thread_checker_);
+  if (!initialized_ || !playing_) {
+    return 0;
+  }
+  if (!aaudio_.Stop()) {
+    RTC_LOG(LS_ERROR) << "StopPlayout failed";
+    return -1;
+  }
+  thread_checker_aaudio_.Detach();
+  initialized_ = false;
+  playing_ = false;
+  return 0;
+}
+
+bool AAudioPlayer::Playing() const {
+  RTC_DCHECK_RUN_ON(&main_thread_checker_);
+  return playing_;
+}
+
+void AAudioPlayer::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+  RTC_DLOG(LS_INFO) << "AttachAudioBuffer";
+  RTC_DCHECK_RUN_ON(&main_thread_checker_);
+  audio_device_buffer_ = audioBuffer;
+  const AudioParameters audio_parameters = aaudio_.audio_parameters();
+  audio_device_buffer_->SetPlayoutSampleRate(audio_parameters.sample_rate());
+  audio_device_buffer_->SetPlayoutChannels(audio_parameters.channels());
+  RTC_CHECK(audio_device_buffer_);
+  // Create a modified audio buffer class which allows us to ask for any number
+  // of samples (and not only multiple of 10ms) to match the optimal buffer
+  // size per callback used by AAudio.
+  fine_audio_buffer_ = std::make_unique<FineAudioBuffer>(audio_device_buffer_);
+}
+
+int AAudioPlayer::SpeakerVolumeIsAvailable(bool& available) {
+  available = false;
+  return 0;
+}
+
+void AAudioPlayer::OnErrorCallback(aaudio_result_t error) {
+  RTC_LOG(LS_ERROR) << "OnErrorCallback: " << AAudio_convertResultToText(error);
+  // TODO(henrika): investigate if we can use a thread checker here. Initial
+  // tests shows that this callback can sometimes be called on a unique thread
+  // but according to the documentation it should be on the same thread as the
+  // data callback.
+  // RTC_DCHECK_RUN_ON(&thread_checker_aaudio_);
+  if (aaudio_.stream_state() == AAUDIO_STREAM_STATE_DISCONNECTED) {
+    // The stream is disconnected and any attempt to use it will return
+    // AAUDIO_ERROR_DISCONNECTED.
+    RTC_LOG(LS_WARNING) << "Output stream disconnected";
+    // AAudio documentation states: "You should not close or reopen the stream
+    // from the callback, use another thread instead". A message is therefore
+    // sent to the main thread to do the restart operation.
+    RTC_DCHECK(main_thread_);
+    main_thread_->Post(RTC_FROM_HERE, this, kMessageOutputStreamDisconnected);
+  }
+}
+
+aaudio_data_callback_result_t AAudioPlayer::OnDataCallback(void* audio_data,
+                                                           int32_t num_frames) {
+  RTC_DCHECK_RUN_ON(&thread_checker_aaudio_);
+  // Log device id in first data callback to ensure that a valid device is
+  // utilized.
+  if (first_data_callback_) {
+    RTC_LOG(LS_INFO) << "--- First output data callback: "
+                        "device id="
+                     << aaudio_.device_id();
+    first_data_callback_ = false;
+  }
+
+  // Check if the underrun count has increased. If it has, increase the buffer
+  // size by adding the size of a burst. It will reduce the risk of underruns
+  // at the expense of an increased latency.
+  // TODO(henrika): enable possibility to disable and/or tune the algorithm.
+  const int32_t underrun_count = aaudio_.xrun_count();
+  if (underrun_count > underrun_count_) {
+    RTC_LOG(LS_ERROR) << "Underrun detected: " << underrun_count;
+    underrun_count_ = underrun_count;
+    aaudio_.IncreaseOutputBufferSize();
+  }
+
+  // Estimate latency between writing an audio frame to the output stream and
+  // the time that same frame is played out on the output audio device.
+  latency_millis_ = aaudio_.EstimateLatencyMillis();
+  // TODO(henrika): use for development only.
+  if (aaudio_.frames_written() % (1000 * aaudio_.frames_per_burst()) == 0) {
+    RTC_DLOG(LS_INFO) << "output latency: " << latency_millis_
+                      << ", num_frames: " << num_frames;
+  }
+
+  // Read audio data from the WebRTC source using the FineAudioBuffer object
+  // and write that data into `audio_data` to be played out by AAudio.
+  // Prime output with zeros during a short initial phase to avoid distortion.
+  // TODO(henrika): do more work to figure out of if the initial forced silence
+  // period is really needed.
+  if (aaudio_.frames_written() < 50 * aaudio_.frames_per_burst()) {
+    const size_t num_bytes =
+        sizeof(int16_t) * aaudio_.samples_per_frame() * num_frames;
+    memset(audio_data, 0, num_bytes);
+  } else {
+    fine_audio_buffer_->GetPlayoutData(
+        rtc::MakeArrayView(static_cast<int16_t*>(audio_data),
+                           aaudio_.samples_per_frame() * num_frames),
+        static_cast<int>(latency_millis_ + 0.5));
+  }
+
+  // TODO(henrika): possibly add trace here to be included in systrace.
+  // See https://developer.android.com/studio/profile/systrace-commandline.html.
+  return AAUDIO_CALLBACK_RESULT_CONTINUE;
+}
+
+void AAudioPlayer::OnMessage(rtc::Message* msg) {
+  RTC_DCHECK_RUN_ON(&main_thread_checker_);
+  switch (msg->message_id) {
+    case kMessageOutputStreamDisconnected:
+      HandleStreamDisconnected();
+      break;
+  }
+}
+
+void AAudioPlayer::HandleStreamDisconnected() {
+  RTC_DCHECK_RUN_ON(&main_thread_checker_);
+  RTC_DLOG(LS_INFO) << "HandleStreamDisconnected";
+  if (!initialized_ || !playing_) {
+    return;
+  }
+  // Perform a restart by first closing the disconnected stream and then start
+  // a new stream; this time using the new (preferred) audio output device.
+  StopPlayout();
+  InitPlayout();
+  StartPlayout();
+}
+}  // namespace webrtc
diff --git a/modules/audio_device/android/aaudio_player.h b/modules/audio_device/android/aaudio_player.h
new file mode 100644
index 0000000..4bf3ee3
--- /dev/null
+++ b/modules/audio_device/android/aaudio_player.h
@@ -0,0 +1,147 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_PLAYER_H_
+#define MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_PLAYER_H_
+
+#include <aaudio/AAudio.h>
+
+#include <memory>
+
+#include "api/sequence_checker.h"
+#include "modules/audio_device/android/aaudio_wrapper.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "rtc_base/message_handler.h"
+#include "rtc_base/thread.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+class AudioDeviceBuffer;
+class FineAudioBuffer;
+class AudioManager;
+
+// Implements low-latency 16-bit mono PCM audio output support for Android
+// using the C based AAudio API.
+//
+// An instance must be created and destroyed on one and the same thread.
+// All public methods must also be called on the same thread. A thread checker
+// will DCHECK if any method is called on an invalid thread. Audio buffers
+// are requested on a dedicated high-priority thread owned by AAudio.
+//
+// The existing design forces the user to call InitPlayout() after StopPlayout()
+// to be able to call StartPlayout() again. This is in line with how the Java-
+// based implementation works.
+//
+// An audio stream can be disconnected, e.g. when an audio device is removed.
+// This implementation will restart the audio stream using the new preferred
+// device if such an event happens.
+//
+// Also supports automatic buffer-size adjustment based on underrun detections
+// where the internal AAudio buffer can be increased when needed. It will
+// reduce the risk of underruns (~glitches) at the expense of an increased
+// latency.
+class AAudioPlayer final : public AAudioObserverInterface,
+                           public rtc::MessageHandler {
+ public:
+  explicit AAudioPlayer(AudioManager* audio_manager);
+  ~AAudioPlayer();
+
+  int Init();
+  int Terminate();
+
+  int InitPlayout();
+  bool PlayoutIsInitialized() const;
+
+  int StartPlayout();
+  int StopPlayout();
+  bool Playing() const;
+
+  void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
+
+  // Not implemented in AAudio.
+  int SpeakerVolumeIsAvailable(bool& available);  // NOLINT
+  int SetSpeakerVolume(uint32_t volume) { return -1; }
+  int SpeakerVolume(uint32_t& volume) const { return -1; }        // NOLINT
+  int MaxSpeakerVolume(uint32_t& maxVolume) const { return -1; }  // NOLINT
+  int MinSpeakerVolume(uint32_t& minVolume) const { return -1; }  // NOLINT
+
+ protected:
+  // AAudioObserverInterface implementation.
+
+  // For an output stream, this function should render and write `num_frames`
+  // of data in the streams current data format to the `audio_data` buffer.
+  // Called on a real-time thread owned by AAudio.
+  aaudio_data_callback_result_t OnDataCallback(void* audio_data,
+                                               int32_t num_frames) override;
+  // AAudio calls this functions if any error occurs on a callback thread.
+  // Called on a real-time thread owned by AAudio.
+  void OnErrorCallback(aaudio_result_t error) override;
+
+  // rtc::MessageHandler used for restart messages from the error-callback
+  // thread to the main (creating) thread.
+  void OnMessage(rtc::Message* msg) override;
+
+ private:
+  // Closes the existing stream and starts a new stream.
+  void HandleStreamDisconnected();
+
+  // Ensures that methods are called from the same thread as this object is
+  // created on.
+  SequenceChecker main_thread_checker_;
+
+  // Stores thread ID in first call to AAudioPlayer::OnDataCallback from a
+  // real-time thread owned by AAudio. Detached during construction of this
+  // object.
+  SequenceChecker thread_checker_aaudio_;
+
+  // The thread on which this object is created on.
+  rtc::Thread* main_thread_;
+
+  // Wraps all AAudio resources. Contains an output stream using the default
+  // output audio device. Can be accessed on both the main thread and the
+  // real-time thread owned by AAudio. See separate AAudio documentation about
+  // thread safety.
+  AAudioWrapper aaudio_;
+
+  // FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data
+  // in chunks of 10ms. It then allows for this data to be pulled in
+  // a finer or coarser granularity. I.e. interacting with this class instead
+  // of directly with the AudioDeviceBuffer one can ask for any number of
+  // audio data samples.
+  // Example: native buffer size can be 192 audio frames at 48kHz sample rate.
+  // WebRTC will provide 480 audio frames per 10ms but AAudio asks for 192
+  // in each callback (once every 4th ms). This class can then ask for 192 and
+  // the FineAudioBuffer will ask WebRTC for new data approximately only every
+  // second callback and also cache non-utilized audio.
+  std::unique_ptr<FineAudioBuffer> fine_audio_buffer_;
+
+  // Counts number of detected underrun events reported by AAudio.
+  int32_t underrun_count_ = 0;
+
+  // True only for the first data callback in each audio session.
+  bool first_data_callback_ = true;
+
+  // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
+  // AudioDeviceModuleImpl class and set by AudioDeviceModule::Create().
+  AudioDeviceBuffer* audio_device_buffer_ RTC_GUARDED_BY(main_thread_checker_) =
+      nullptr;
+
+  bool initialized_ RTC_GUARDED_BY(main_thread_checker_) = false;
+  bool playing_ RTC_GUARDED_BY(main_thread_checker_) = false;
+
+  // Estimated latency between writing an audio frame to the output stream and
+  // the time that same frame is played out on the output audio device.
+  double latency_millis_ RTC_GUARDED_BY(thread_checker_aaudio_) = 0;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_PLAYER_H_
diff --git a/modules/audio_device/android/aaudio_recorder.cc b/modules/audio_device/android/aaudio_recorder.cc
new file mode 100644
index 0000000..4757cf8
--- /dev/null
+++ b/modules/audio_device/android/aaudio_recorder.cc
@@ -0,0 +1,220 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/android/aaudio_recorder.h"
+
+#include <memory>
+
+#include "api/array_view.h"
+#include "modules/audio_device/android/audio_manager.h"
+#include "modules/audio_device/fine_audio_buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+
+namespace webrtc {
+
+enum AudioDeviceMessageType : uint32_t {
+  kMessageInputStreamDisconnected,
+};
+
+AAudioRecorder::AAudioRecorder(AudioManager* audio_manager)
+    : main_thread_(rtc::Thread::Current()),
+      aaudio_(audio_manager, AAUDIO_DIRECTION_INPUT, this) {
+  RTC_LOG(LS_INFO) << "ctor";
+  thread_checker_aaudio_.Detach();
+}
+
+AAudioRecorder::~AAudioRecorder() {
+  RTC_LOG(LS_INFO) << "dtor";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  Terminate();
+  RTC_LOG(LS_INFO) << "detected owerflows: " << overflow_count_;
+}
+
+int AAudioRecorder::Init() {
+  RTC_LOG(LS_INFO) << "Init";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  if (aaudio_.audio_parameters().channels() == 2) {
+    RTC_DLOG(LS_WARNING) << "Stereo mode is enabled";
+  }
+  return 0;
+}
+
+int AAudioRecorder::Terminate() {
+  RTC_LOG(LS_INFO) << "Terminate";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  StopRecording();
+  return 0;
+}
+
+int AAudioRecorder::InitRecording() {
+  RTC_LOG(LS_INFO) << "InitRecording";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  RTC_DCHECK(!initialized_);
+  RTC_DCHECK(!recording_);
+  if (!aaudio_.Init()) {
+    return -1;
+  }
+  initialized_ = true;
+  return 0;
+}
+
+int AAudioRecorder::StartRecording() {
+  RTC_LOG(LS_INFO) << "StartRecording";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  RTC_DCHECK(initialized_);
+  RTC_DCHECK(!recording_);
+  if (fine_audio_buffer_) {
+    fine_audio_buffer_->ResetPlayout();
+  }
+  if (!aaudio_.Start()) {
+    return -1;
+  }
+  overflow_count_ = aaudio_.xrun_count();
+  first_data_callback_ = true;
+  recording_ = true;
+  return 0;
+}
+
+int AAudioRecorder::StopRecording() {
+  RTC_LOG(LS_INFO) << "StopRecording";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  if (!initialized_ || !recording_) {
+    return 0;
+  }
+  if (!aaudio_.Stop()) {
+    return -1;
+  }
+  thread_checker_aaudio_.Detach();
+  initialized_ = false;
+  recording_ = false;
+  return 0;
+}
+
+void AAudioRecorder::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+  RTC_LOG(LS_INFO) << "AttachAudioBuffer";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  audio_device_buffer_ = audioBuffer;
+  const AudioParameters audio_parameters = aaudio_.audio_parameters();
+  audio_device_buffer_->SetRecordingSampleRate(audio_parameters.sample_rate());
+  audio_device_buffer_->SetRecordingChannels(audio_parameters.channels());
+  RTC_CHECK(audio_device_buffer_);
+  // Create a modified audio buffer class which allows us to deliver any number
+  // of samples (and not only multiples of 10ms which WebRTC uses) to match the
+  // native AAudio buffer size.
+  fine_audio_buffer_ = std::make_unique<FineAudioBuffer>(audio_device_buffer_);
+}
+
+int AAudioRecorder::EnableBuiltInAEC(bool enable) {
+  RTC_LOG(LS_INFO) << "EnableBuiltInAEC: " << enable;
+  RTC_LOG(LS_ERROR) << "Not implemented";
+  return -1;
+}
+
+int AAudioRecorder::EnableBuiltInAGC(bool enable) {
+  RTC_LOG(LS_INFO) << "EnableBuiltInAGC: " << enable;
+  RTC_LOG(LS_ERROR) << "Not implemented";
+  return -1;
+}
+
+int AAudioRecorder::EnableBuiltInNS(bool enable) {
+  RTC_LOG(LS_INFO) << "EnableBuiltInNS: " << enable;
+  RTC_LOG(LS_ERROR) << "Not implemented";
+  return -1;
+}
+
+void AAudioRecorder::OnErrorCallback(aaudio_result_t error) {
+  RTC_LOG(LS_ERROR) << "OnErrorCallback: " << AAudio_convertResultToText(error);
+  // RTC_DCHECK(thread_checker_aaudio_.IsCurrent());
+  if (aaudio_.stream_state() == AAUDIO_STREAM_STATE_DISCONNECTED) {
+    // The stream is disconnected and any attempt to use it will return
+    // AAUDIO_ERROR_DISCONNECTED..
+    RTC_LOG(LS_WARNING) << "Input stream disconnected => restart is required";
+    // AAudio documentation states: "You should not close or reopen the stream
+    // from the callback, use another thread instead". A message is therefore
+    // sent to the main thread to do the restart operation.
+    RTC_DCHECK(main_thread_);
+    main_thread_->Post(RTC_FROM_HERE, this, kMessageInputStreamDisconnected);
+  }
+}
+
+// Read and process `num_frames` of data from the `audio_data` buffer.
+// TODO(henrika): possibly add trace here to be included in systrace.
+// See https://developer.android.com/studio/profile/systrace-commandline.html.
+aaudio_data_callback_result_t AAudioRecorder::OnDataCallback(
+    void* audio_data,
+    int32_t num_frames) {
+  // TODO(henrika): figure out why we sometimes hit this one.
+  // RTC_DCHECK(thread_checker_aaudio_.IsCurrent());
+  // RTC_LOG(LS_INFO) << "OnDataCallback: " << num_frames;
+  // Drain the input buffer at first callback to ensure that it does not
+  // contain any old data. Will also ensure that the lowest possible latency
+  // is obtained.
+  if (first_data_callback_) {
+    RTC_LOG(LS_INFO) << "--- First input data callback: "
+                        "device id="
+                     << aaudio_.device_id();
+    aaudio_.ClearInputStream(audio_data, num_frames);
+    first_data_callback_ = false;
+  }
+  // Check if the overflow counter has increased and if so log a warning.
+  // TODO(henrika): possible add UMA stat or capacity extension.
+  const int32_t overflow_count = aaudio_.xrun_count();
+  if (overflow_count > overflow_count_) {
+    RTC_LOG(LS_ERROR) << "Overflow detected: " << overflow_count;
+    overflow_count_ = overflow_count;
+  }
+  // Estimated time between an audio frame was recorded by the input device and
+  // it can read on the input stream.
+  latency_millis_ = aaudio_.EstimateLatencyMillis();
+  // TODO(henrika): use for development only.
+  if (aaudio_.frames_read() % (1000 * aaudio_.frames_per_burst()) == 0) {
+    RTC_DLOG(LS_INFO) << "input latency: " << latency_millis_
+                      << ", num_frames: " << num_frames;
+  }
+  // Copy recorded audio in `audio_data` to the WebRTC sink using the
+  // FineAudioBuffer object.
+  fine_audio_buffer_->DeliverRecordedData(
+      rtc::MakeArrayView(static_cast<const int16_t*>(audio_data),
+                         aaudio_.samples_per_frame() * num_frames),
+      static_cast<int>(latency_millis_ + 0.5));
+
+  return AAUDIO_CALLBACK_RESULT_CONTINUE;
+}
+
+void AAudioRecorder::OnMessage(rtc::Message* msg) {
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  switch (msg->message_id) {
+    case kMessageInputStreamDisconnected:
+      HandleStreamDisconnected();
+      break;
+    default:
+      RTC_LOG(LS_ERROR) << "Invalid message id: " << msg->message_id;
+      break;
+  }
+}
+
+void AAudioRecorder::HandleStreamDisconnected() {
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  RTC_LOG(LS_INFO) << "HandleStreamDisconnected";
+  if (!initialized_ || !recording_) {
+    return;
+  }
+  // Perform a restart by first closing the disconnected stream and then start
+  // a new stream; this time using the new (preferred) audio input device.
+  // TODO(henrika): resolve issue where a one restart attempt leads to a long
+  // sequence of new calls to OnErrorCallback().
+  // See b/73148976 for details.
+  StopRecording();
+  InitRecording();
+  StartRecording();
+}
+}  // namespace webrtc
diff --git a/modules/audio_device/android/aaudio_recorder.h b/modules/audio_device/android/aaudio_recorder.h
new file mode 100644
index 0000000..d0ad6be
--- /dev/null
+++ b/modules/audio_device/android/aaudio_recorder.h
@@ -0,0 +1,129 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_RECORDER_H_
+#define MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_RECORDER_H_
+
+#include <aaudio/AAudio.h>
+
+#include <memory>
+
+#include "api/sequence_checker.h"
+#include "modules/audio_device/android/aaudio_wrapper.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "rtc_base/message_handler.h"
+#include "rtc_base/thread.h"
+
+namespace webrtc {
+
+class AudioDeviceBuffer;
+class FineAudioBuffer;
+class AudioManager;
+
+// Implements low-latency 16-bit mono PCM audio input support for Android
+// using the C based AAudio API.
+//
+// An instance must be created and destroyed on one and the same thread.
+// All public methods must also be called on the same thread. A thread checker
+// will RTC_DCHECK if any method is called on an invalid thread. Audio buffers
+// are delivered on a dedicated high-priority thread owned by AAudio.
+//
+// The existing design forces the user to call InitRecording() after
+// StopRecording() to be able to call StartRecording() again. This is in line
+// with how the Java- based implementation works.
+//
+// TODO(henrika): add comments about device changes and adaptive buffer
+// management.
+class AAudioRecorder : public AAudioObserverInterface,
+                       public rtc::MessageHandler {
+ public:
+  explicit AAudioRecorder(AudioManager* audio_manager);
+  ~AAudioRecorder();
+
+  int Init();
+  int Terminate();
+
+  int InitRecording();
+  bool RecordingIsInitialized() const { return initialized_; }
+
+  int StartRecording();
+  int StopRecording();
+  bool Recording() const { return recording_; }
+
+  void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
+
+  double latency_millis() const { return latency_millis_; }
+
+  // TODO(henrika): add support using AAudio APIs when available.
+  int EnableBuiltInAEC(bool enable);
+  int EnableBuiltInAGC(bool enable);
+  int EnableBuiltInNS(bool enable);
+
+ protected:
+  // AAudioObserverInterface implementation.
+
+  // For an input stream, this function should read `num_frames` of recorded
+  // data, in the stream's current data format, from the `audio_data` buffer.
+  // Called on a real-time thread owned by AAudio.
+  aaudio_data_callback_result_t OnDataCallback(void* audio_data,
+                                               int32_t num_frames) override;
+
+  // AAudio calls this function if any error occurs on a callback thread.
+  // Called on a real-time thread owned by AAudio.
+  void OnErrorCallback(aaudio_result_t error) override;
+
+  // rtc::MessageHandler used for restart messages.
+  void OnMessage(rtc::Message* msg) override;
+
+ private:
+  // Closes the existing stream and starts a new stream.
+  void HandleStreamDisconnected();
+
+  // Ensures that methods are called from the same thread as this object is
+  // created on.
+  SequenceChecker thread_checker_;
+
+  // Stores thread ID in first call to AAudioPlayer::OnDataCallback from a
+  // real-time thread owned by AAudio. Detached during construction of this
+  // object.
+  SequenceChecker thread_checker_aaudio_;
+
+  // The thread on which this object is created on.
+  rtc::Thread* main_thread_;
+
+  // Wraps all AAudio resources. Contains an input stream using the default
+  // input audio device.
+  AAudioWrapper aaudio_;
+
+  // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
+  // AudioDeviceModuleImpl class and called by AudioDeviceModule::Create().
+  AudioDeviceBuffer* audio_device_buffer_ = nullptr;
+
+  bool initialized_ = false;
+  bool recording_ = false;
+
+  // Consumes audio of native buffer size and feeds the WebRTC layer with 10ms
+  // chunks of audio.
+  std::unique_ptr<FineAudioBuffer> fine_audio_buffer_;
+
+  // Counts number of detected overflow events reported by AAudio.
+  int32_t overflow_count_ = 0;
+
+  // Estimated time between an audio frame was recorded by the input device and
+  // it can read on the input stream.
+  double latency_millis_ = 0;
+
+  // True only for the first data callback in each audio session.
+  bool first_data_callback_ = true;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_RECORDER_H_
diff --git a/modules/audio_device/android/aaudio_wrapper.cc b/modules/audio_device/android/aaudio_wrapper.cc
new file mode 100644
index 0000000..3d824b5
--- /dev/null
+++ b/modules/audio_device/android/aaudio_wrapper.cc
@@ -0,0 +1,499 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/android/aaudio_wrapper.h"
+
+#include "modules/audio_device/android/audio_manager.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/time_utils.h"
+
+#define LOG_ON_ERROR(op)                                                      \
+  do {                                                                        \
+    aaudio_result_t result = (op);                                            \
+    if (result != AAUDIO_OK) {                                                \
+      RTC_LOG(LS_ERROR) << #op << ": " << AAudio_convertResultToText(result); \
+    }                                                                         \
+  } while (0)
+
+#define RETURN_ON_ERROR(op, ...)                                              \
+  do {                                                                        \
+    aaudio_result_t result = (op);                                            \
+    if (result != AAUDIO_OK) {                                                \
+      RTC_LOG(LS_ERROR) << #op << ": " << AAudio_convertResultToText(result); \
+      return __VA_ARGS__;                                                     \
+    }                                                                         \
+  } while (0)
+
+namespace webrtc {
+
+namespace {
+
+const char* DirectionToString(aaudio_direction_t direction) {
+  switch (direction) {
+    case AAUDIO_DIRECTION_OUTPUT:
+      return "OUTPUT";
+    case AAUDIO_DIRECTION_INPUT:
+      return "INPUT";
+    default:
+      return "UNKNOWN";
+  }
+}
+
+const char* SharingModeToString(aaudio_sharing_mode_t mode) {
+  switch (mode) {
+    case AAUDIO_SHARING_MODE_EXCLUSIVE:
+      return "EXCLUSIVE";
+    case AAUDIO_SHARING_MODE_SHARED:
+      return "SHARED";
+    default:
+      return "UNKNOWN";
+  }
+}
+
+const char* PerformanceModeToString(aaudio_performance_mode_t mode) {
+  switch (mode) {
+    case AAUDIO_PERFORMANCE_MODE_NONE:
+      return "NONE";
+    case AAUDIO_PERFORMANCE_MODE_POWER_SAVING:
+      return "POWER_SAVING";
+    case AAUDIO_PERFORMANCE_MODE_LOW_LATENCY:
+      return "LOW_LATENCY";
+    default:
+      return "UNKNOWN";
+  }
+}
+
+const char* FormatToString(int32_t id) {
+  switch (id) {
+    case AAUDIO_FORMAT_INVALID:
+      return "INVALID";
+    case AAUDIO_FORMAT_UNSPECIFIED:
+      return "UNSPECIFIED";
+    case AAUDIO_FORMAT_PCM_I16:
+      return "PCM_I16";
+    case AAUDIO_FORMAT_PCM_FLOAT:
+      return "FLOAT";
+    default:
+      return "UNKNOWN";
+  }
+}
+
+void ErrorCallback(AAudioStream* stream,
+                   void* user_data,
+                   aaudio_result_t error) {
+  RTC_DCHECK(user_data);
+  AAudioWrapper* aaudio_wrapper = reinterpret_cast<AAudioWrapper*>(user_data);
+  RTC_LOG(LS_WARNING) << "ErrorCallback: "
+                      << DirectionToString(aaudio_wrapper->direction());
+  RTC_DCHECK(aaudio_wrapper->observer());
+  aaudio_wrapper->observer()->OnErrorCallback(error);
+}
+
+aaudio_data_callback_result_t DataCallback(AAudioStream* stream,
+                                           void* user_data,
+                                           void* audio_data,
+                                           int32_t num_frames) {
+  RTC_DCHECK(user_data);
+  RTC_DCHECK(audio_data);
+  AAudioWrapper* aaudio_wrapper = reinterpret_cast<AAudioWrapper*>(user_data);
+  RTC_DCHECK(aaudio_wrapper->observer());
+  return aaudio_wrapper->observer()->OnDataCallback(audio_data, num_frames);
+}
+
+// Wraps the stream builder object to ensure that it is released properly when
+// the stream builder goes out of scope.
+class ScopedStreamBuilder {
+ public:
+  ScopedStreamBuilder() {
+    LOG_ON_ERROR(AAudio_createStreamBuilder(&builder_));
+    RTC_DCHECK(builder_);
+  }
+  ~ScopedStreamBuilder() {
+    if (builder_) {
+      LOG_ON_ERROR(AAudioStreamBuilder_delete(builder_));
+    }
+  }
+
+  AAudioStreamBuilder* get() const { return builder_; }
+
+ private:
+  AAudioStreamBuilder* builder_ = nullptr;
+};
+
+}  // namespace
+
+AAudioWrapper::AAudioWrapper(AudioManager* audio_manager,
+                             aaudio_direction_t direction,
+                             AAudioObserverInterface* observer)
+    : direction_(direction), observer_(observer) {
+  RTC_LOG(LS_INFO) << "ctor";
+  RTC_DCHECK(observer_);
+  direction_ == AAUDIO_DIRECTION_OUTPUT
+      ? audio_parameters_ = audio_manager->GetPlayoutAudioParameters()
+      : audio_parameters_ = audio_manager->GetRecordAudioParameters();
+  aaudio_thread_checker_.Detach();
+  RTC_LOG(LS_INFO) << audio_parameters_.ToString();
+}
+
+AAudioWrapper::~AAudioWrapper() {
+  RTC_LOG(LS_INFO) << "dtor";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  RTC_DCHECK(!stream_);
+}
+
+bool AAudioWrapper::Init() {
+  RTC_LOG(LS_INFO) << "Init";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  // Creates a stream builder which can be used to open an audio stream.
+  ScopedStreamBuilder builder;
+  // Configures the stream builder using audio parameters given at construction.
+  SetStreamConfiguration(builder.get());
+  // Opens a stream based on options in the stream builder.
+  if (!OpenStream(builder.get())) {
+    return false;
+  }
+  // Ensures that the opened stream could activate the requested settings.
+  if (!VerifyStreamConfiguration()) {
+    return false;
+  }
+  // Optimizes the buffer scheme for lowest possible latency and creates
+  // additional buffer logic to match the 10ms buffer size used in WebRTC.
+  if (!OptimizeBuffers()) {
+    return false;
+  }
+  LogStreamState();
+  return true;
+}
+
+bool AAudioWrapper::Start() {
+  RTC_LOG(LS_INFO) << "Start";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  // TODO(henrika): this state check might not be needed.
+  aaudio_stream_state_t current_state = AAudioStream_getState(stream_);
+  if (current_state != AAUDIO_STREAM_STATE_OPEN) {
+    RTC_LOG(LS_ERROR) << "Invalid state: "
+                      << AAudio_convertStreamStateToText(current_state);
+    return false;
+  }
+  // Asynchronous request for the stream to start.
+  RETURN_ON_ERROR(AAudioStream_requestStart(stream_), false);
+  LogStreamState();
+  return true;
+}
+
+bool AAudioWrapper::Stop() {
+  RTC_LOG(LS_INFO) << "Stop: " << DirectionToString(direction());
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  // Asynchronous request for the stream to stop.
+  RETURN_ON_ERROR(AAudioStream_requestStop(stream_), false);
+  CloseStream();
+  aaudio_thread_checker_.Detach();
+  return true;
+}
+
+double AAudioWrapper::EstimateLatencyMillis() const {
+  RTC_DCHECK(stream_);
+  double latency_millis = 0.0;
+  if (direction() == AAUDIO_DIRECTION_INPUT) {
+    // For input streams. Best guess we can do is to use the current burst size
+    // as delay estimate.
+    latency_millis = static_cast<double>(frames_per_burst()) / sample_rate() *
+                     rtc::kNumMillisecsPerSec;
+  } else {
+    int64_t existing_frame_index;
+    int64_t existing_frame_presentation_time;
+    // Get the time at which a particular frame was presented to audio hardware.
+    aaudio_result_t result = AAudioStream_getTimestamp(
+        stream_, CLOCK_MONOTONIC, &existing_frame_index,
+        &existing_frame_presentation_time);
+    // Results are only valid when the stream is in AAUDIO_STREAM_STATE_STARTED.
+    if (result == AAUDIO_OK) {
+      // Get write index for next audio frame.
+      int64_t next_frame_index = frames_written();
+      // Number of frames between next frame and the existing frame.
+      int64_t frame_index_delta = next_frame_index - existing_frame_index;
+      // Assume the next frame will be written now.
+      int64_t next_frame_write_time = rtc::TimeNanos();
+      // Calculate time when next frame will be presented to the hardware taking
+      // sample rate into account.
+      int64_t frame_time_delta =
+          (frame_index_delta * rtc::kNumNanosecsPerSec) / sample_rate();
+      int64_t next_frame_presentation_time =
+          existing_frame_presentation_time + frame_time_delta;
+      // Derive a latency estimate given results above.
+      latency_millis = static_cast<double>(next_frame_presentation_time -
+                                           next_frame_write_time) /
+                       rtc::kNumNanosecsPerMillisec;
+    }
+  }
+  return latency_millis;
+}
+
+// Returns new buffer size or a negative error value if buffer size could not
+// be increased.
+bool AAudioWrapper::IncreaseOutputBufferSize() {
+  RTC_LOG(LS_INFO) << "IncreaseBufferSize";
+  RTC_DCHECK(stream_);
+  RTC_DCHECK(aaudio_thread_checker_.IsCurrent());
+  RTC_DCHECK_EQ(direction(), AAUDIO_DIRECTION_OUTPUT);
+  aaudio_result_t buffer_size = AAudioStream_getBufferSizeInFrames(stream_);
+  // Try to increase size of buffer with one burst to reduce risk of underrun.
+  buffer_size += frames_per_burst();
+  // Verify that the new buffer size is not larger than max capacity.
+  // TODO(henrika): keep track of case when we reach the capacity limit.
+  const int32_t max_buffer_size = buffer_capacity_in_frames();
+  if (buffer_size > max_buffer_size) {
+    RTC_LOG(LS_ERROR) << "Required buffer size (" << buffer_size
+                      << ") is higher than max: " << max_buffer_size;
+    return false;
+  }
+  RTC_LOG(LS_INFO) << "Updating buffer size to: " << buffer_size
+                   << " (max=" << max_buffer_size << ")";
+  buffer_size = AAudioStream_setBufferSizeInFrames(stream_, buffer_size);
+  if (buffer_size < 0) {
+    RTC_LOG(LS_ERROR) << "Failed to change buffer size: "
+                      << AAudio_convertResultToText(buffer_size);
+    return false;
+  }
+  RTC_LOG(LS_INFO) << "Buffer size changed to: " << buffer_size;
+  return true;
+}
+
+void AAudioWrapper::ClearInputStream(void* audio_data, int32_t num_frames) {
+  RTC_LOG(LS_INFO) << "ClearInputStream";
+  RTC_DCHECK(stream_);
+  RTC_DCHECK(aaudio_thread_checker_.IsCurrent());
+  RTC_DCHECK_EQ(direction(), AAUDIO_DIRECTION_INPUT);
+  aaudio_result_t cleared_frames = 0;
+  do {
+    cleared_frames = AAudioStream_read(stream_, audio_data, num_frames, 0);
+  } while (cleared_frames > 0);
+}
+
+AAudioObserverInterface* AAudioWrapper::observer() const {
+  return observer_;
+}
+
+AudioParameters AAudioWrapper::audio_parameters() const {
+  return audio_parameters_;
+}
+
+int32_t AAudioWrapper::samples_per_frame() const {
+  RTC_DCHECK(stream_);
+  return AAudioStream_getSamplesPerFrame(stream_);
+}
+
+int32_t AAudioWrapper::buffer_size_in_frames() const {
+  RTC_DCHECK(stream_);
+  return AAudioStream_getBufferSizeInFrames(stream_);
+}
+
+int32_t AAudioWrapper::buffer_capacity_in_frames() const {
+  RTC_DCHECK(stream_);
+  return AAudioStream_getBufferCapacityInFrames(stream_);
+}
+
+int32_t AAudioWrapper::device_id() const {
+  RTC_DCHECK(stream_);
+  return AAudioStream_getDeviceId(stream_);
+}
+
+int32_t AAudioWrapper::xrun_count() const {
+  RTC_DCHECK(stream_);
+  return AAudioStream_getXRunCount(stream_);
+}
+
+int32_t AAudioWrapper::format() const {
+  RTC_DCHECK(stream_);
+  return AAudioStream_getFormat(stream_);
+}
+
+int32_t AAudioWrapper::sample_rate() const {
+  RTC_DCHECK(stream_);
+  return AAudioStream_getSampleRate(stream_);
+}
+
+int32_t AAudioWrapper::channel_count() const {
+  RTC_DCHECK(stream_);
+  return AAudioStream_getChannelCount(stream_);
+}
+
+int32_t AAudioWrapper::frames_per_callback() const {
+  RTC_DCHECK(stream_);
+  return AAudioStream_getFramesPerDataCallback(stream_);
+}
+
+aaudio_sharing_mode_t AAudioWrapper::sharing_mode() const {
+  RTC_DCHECK(stream_);
+  return AAudioStream_getSharingMode(stream_);
+}
+
+aaudio_performance_mode_t AAudioWrapper::performance_mode() const {
+  RTC_DCHECK(stream_);
+  return AAudioStream_getPerformanceMode(stream_);
+}
+
+aaudio_stream_state_t AAudioWrapper::stream_state() const {
+  RTC_DCHECK(stream_);
+  return AAudioStream_getState(stream_);
+}
+
+int64_t AAudioWrapper::frames_written() const {
+  RTC_DCHECK(stream_);
+  return AAudioStream_getFramesWritten(stream_);
+}
+
+int64_t AAudioWrapper::frames_read() const {
+  RTC_DCHECK(stream_);
+  return AAudioStream_getFramesRead(stream_);
+}
+
+void AAudioWrapper::SetStreamConfiguration(AAudioStreamBuilder* builder) {
+  RTC_LOG(LS_INFO) << "SetStreamConfiguration";
+  RTC_DCHECK(builder);
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  // Request usage of default primary output/input device.
+  // TODO(henrika): verify that default device follows Java APIs.
+  // https://developer.android.com/reference/android/media/AudioDeviceInfo.html.
+  AAudioStreamBuilder_setDeviceId(builder, AAUDIO_UNSPECIFIED);
+  // Use preferred sample rate given by the audio parameters.
+  AAudioStreamBuilder_setSampleRate(builder, audio_parameters().sample_rate());
+  // Use preferred channel configuration given by the audio parameters.
+  AAudioStreamBuilder_setChannelCount(builder, audio_parameters().channels());
+  // Always use 16-bit PCM audio sample format.
+  AAudioStreamBuilder_setFormat(builder, AAUDIO_FORMAT_PCM_I16);
+  // TODO(henrika): investigate effect of using AAUDIO_SHARING_MODE_EXCLUSIVE.
+  // Ask for exclusive mode since this will give us the lowest possible latency.
+  // If exclusive mode isn't available, shared mode will be used instead.
+  AAudioStreamBuilder_setSharingMode(builder, AAUDIO_SHARING_MODE_SHARED);
+  // Use the direction that was given at construction.
+  AAudioStreamBuilder_setDirection(builder, direction_);
+  // TODO(henrika): investigate performance using different performance modes.
+  AAudioStreamBuilder_setPerformanceMode(builder,
+                                         AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
+  // Given that WebRTC applications require low latency, our audio stream uses
+  // an asynchronous callback function to transfer data to and from the
+  // application. AAudio executes the callback in a higher-priority thread that
+  // has better performance.
+  AAudioStreamBuilder_setDataCallback(builder, DataCallback, this);
+  // Request that AAudio calls this functions if any error occurs on a callback
+  // thread.
+  AAudioStreamBuilder_setErrorCallback(builder, ErrorCallback, this);
+}
+
+bool AAudioWrapper::OpenStream(AAudioStreamBuilder* builder) {
+  RTC_LOG(LS_INFO) << "OpenStream";
+  RTC_DCHECK(builder);
+  AAudioStream* stream = nullptr;
+  RETURN_ON_ERROR(AAudioStreamBuilder_openStream(builder, &stream), false);
+  stream_ = stream;
+  LogStreamConfiguration();
+  return true;
+}
+
+void AAudioWrapper::CloseStream() {
+  RTC_LOG(LS_INFO) << "CloseStream";
+  RTC_DCHECK(stream_);
+  LOG_ON_ERROR(AAudioStream_close(stream_));
+  stream_ = nullptr;
+}
+
+void AAudioWrapper::LogStreamConfiguration() {
+  RTC_DCHECK(stream_);
+  char ss_buf[1024];
+  rtc::SimpleStringBuilder ss(ss_buf);
+  ss << "Stream Configuration: ";
+  ss << "sample rate=" << sample_rate() << ", channels=" << channel_count();
+  ss << ", samples per frame=" << samples_per_frame();
+  ss << ", format=" << FormatToString(format());
+  ss << ", sharing mode=" << SharingModeToString(sharing_mode());
+  ss << ", performance mode=" << PerformanceModeToString(performance_mode());
+  ss << ", direction=" << DirectionToString(direction());
+  ss << ", device id=" << AAudioStream_getDeviceId(stream_);
+  ss << ", frames per callback=" << frames_per_callback();
+  RTC_LOG(LS_INFO) << ss.str();
+}
+
+void AAudioWrapper::LogStreamState() {
+  RTC_LOG(LS_INFO) << "AAudio stream state: "
+                   << AAudio_convertStreamStateToText(stream_state());
+}
+
+bool AAudioWrapper::VerifyStreamConfiguration() {
+  RTC_LOG(LS_INFO) << "VerifyStreamConfiguration";
+  RTC_DCHECK(stream_);
+  // TODO(henrika): should we verify device ID as well?
+  if (AAudioStream_getSampleRate(stream_) != audio_parameters().sample_rate()) {
+    RTC_LOG(LS_ERROR) << "Stream unable to use requested sample rate";
+    return false;
+  }
+  if (AAudioStream_getChannelCount(stream_) !=
+      static_cast<int32_t>(audio_parameters().channels())) {
+    RTC_LOG(LS_ERROR) << "Stream unable to use requested channel count";
+    return false;
+  }
+  if (AAudioStream_getFormat(stream_) != AAUDIO_FORMAT_PCM_I16) {
+    RTC_LOG(LS_ERROR) << "Stream unable to use requested format";
+    return false;
+  }
+  if (AAudioStream_getSharingMode(stream_) != AAUDIO_SHARING_MODE_SHARED) {
+    RTC_LOG(LS_ERROR) << "Stream unable to use requested sharing mode";
+    return false;
+  }
+  if (AAudioStream_getPerformanceMode(stream_) !=
+      AAUDIO_PERFORMANCE_MODE_LOW_LATENCY) {
+    RTC_LOG(LS_ERROR) << "Stream unable to use requested performance mode";
+    return false;
+  }
+  if (AAudioStream_getDirection(stream_) != direction()) {
+    RTC_LOG(LS_ERROR) << "Stream direction could not be set";
+    return false;
+  }
+  if (AAudioStream_getSamplesPerFrame(stream_) !=
+      static_cast<int32_t>(audio_parameters().channels())) {
+    RTC_LOG(LS_ERROR) << "Invalid number of samples per frame";
+    return false;
+  }
+  return true;
+}
+
+bool AAudioWrapper::OptimizeBuffers() {
+  RTC_LOG(LS_INFO) << "OptimizeBuffers";
+  RTC_DCHECK(stream_);
+  // Maximum number of frames that can be filled without blocking.
+  RTC_LOG(LS_INFO) << "max buffer capacity in frames: "
+                   << buffer_capacity_in_frames();
+  // Query the number of frames that the application should read or write at
+  // one time for optimal performance.
+  int32_t frames_per_burst = AAudioStream_getFramesPerBurst(stream_);
+  RTC_LOG(LS_INFO) << "frames per burst for optimal performance: "
+                   << frames_per_burst;
+  frames_per_burst_ = frames_per_burst;
+  if (direction() == AAUDIO_DIRECTION_INPUT) {
+    // There is no point in calling setBufferSizeInFrames() for input streams
+    // since it has no effect on the performance (latency in this case).
+    return true;
+  }
+  // Set buffer size to same as burst size to guarantee lowest possible latency.
+  // This size might change for output streams if underruns are detected and
+  // automatic buffer adjustment is enabled.
+  AAudioStream_setBufferSizeInFrames(stream_, frames_per_burst);
+  int32_t buffer_size = AAudioStream_getBufferSizeInFrames(stream_);
+  if (buffer_size != frames_per_burst) {
+    RTC_LOG(LS_ERROR) << "Failed to use optimal buffer burst size";
+    return false;
+  }
+  // Maximum number of frames that can be filled without blocking.
+  RTC_LOG(LS_INFO) << "buffer burst size in frames: " << buffer_size;
+  return true;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_device/android/aaudio_wrapper.h b/modules/audio_device/android/aaudio_wrapper.h
new file mode 100644
index 0000000..1f925b9
--- /dev/null
+++ b/modules/audio_device/android/aaudio_wrapper.h
@@ -0,0 +1,127 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_WRAPPER_H_
+#define MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_WRAPPER_H_
+
+#include <aaudio/AAudio.h>
+
+#include "api/sequence_checker.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+
+namespace webrtc {
+
+class AudioManager;
+
+// AAudio callback interface for audio transport to/from the AAudio stream.
+// The interface also contains an error callback method for notifications of
+// e.g. device changes.
+class AAudioObserverInterface {
+ public:
+  // Audio data will be passed in our out of this function dependning on the
+  // direction of the audio stream. This callback function will be called on a
+  // real-time thread owned by AAudio.
+  virtual aaudio_data_callback_result_t OnDataCallback(void* audio_data,
+                                                       int32_t num_frames) = 0;
+  // AAudio will call this functions if any error occurs on a callback thread.
+  // In response, this function could signal or launch another thread to reopen
+  // a stream on another device. Do not reopen the stream in this callback.
+  virtual void OnErrorCallback(aaudio_result_t error) = 0;
+
+ protected:
+  virtual ~AAudioObserverInterface() {}
+};
+
+// Utility class which wraps the C-based AAudio API into a more handy C++ class
+// where the underlying resources (AAudioStreamBuilder and AAudioStream) are
+// encapsulated. User must set the direction (in or out) at construction since
+// it defines the stream type and the direction of the data flow in the
+// AAudioObserverInterface.
+//
+// AAudio is a new Android C API introduced in the Android O (26) release.
+// It is designed for high-performance audio applications that require low
+// latency. Applications communicate with AAudio by reading and writing data
+// to streams.
+//
+// Each stream is attached to a single audio device, where each audio device
+// has a unique ID. The ID can be used to bind an audio stream to a specific
+// audio device but this implementation lets AAudio choose the default primary
+// device instead (device selection takes place in Java). A stream can only
+// move data in one direction. When a stream is opened, Android checks to
+// ensure that the audio device and stream direction agree.
+class AAudioWrapper {
+ public:
+  AAudioWrapper(AudioManager* audio_manager,
+                aaudio_direction_t direction,
+                AAudioObserverInterface* observer);
+  ~AAudioWrapper();
+
+  bool Init();
+  bool Start();
+  bool Stop();
+
+  // For output streams: estimates latency between writing an audio frame to
+  // the output stream and the time that same frame is played out on the output
+  // audio device.
+  // For input streams: estimates latency between reading an audio frame from
+  // the input stream and the time that same frame was recorded on the input
+  // audio device.
+  double EstimateLatencyMillis() const;
+
+  // Increases the internal buffer size for output streams by one burst size to
+  // reduce the risk of underruns. Can be used while a stream is active.
+  bool IncreaseOutputBufferSize();
+
+  // Drains the recording stream of any existing data by reading from it until
+  // it's empty. Can be used to clear out old data before starting a new audio
+  // session.
+  void ClearInputStream(void* audio_data, int32_t num_frames);
+
+  AAudioObserverInterface* observer() const;
+  AudioParameters audio_parameters() const;
+  int32_t samples_per_frame() const;
+  int32_t buffer_size_in_frames() const;
+  int32_t buffer_capacity_in_frames() const;
+  int32_t device_id() const;
+  int32_t xrun_count() const;
+  int32_t format() const;
+  int32_t sample_rate() const;
+  int32_t channel_count() const;
+  int32_t frames_per_callback() const;
+  aaudio_sharing_mode_t sharing_mode() const;
+  aaudio_performance_mode_t performance_mode() const;
+  aaudio_stream_state_t stream_state() const;
+  int64_t frames_written() const;
+  int64_t frames_read() const;
+  aaudio_direction_t direction() const { return direction_; }
+  AAudioStream* stream() const { return stream_; }
+  int32_t frames_per_burst() const { return frames_per_burst_; }
+
+ private:
+  void SetStreamConfiguration(AAudioStreamBuilder* builder);
+  bool OpenStream(AAudioStreamBuilder* builder);
+  void CloseStream();
+  void LogStreamConfiguration();
+  void LogStreamState();
+  bool VerifyStreamConfiguration();
+  bool OptimizeBuffers();
+
+  SequenceChecker thread_checker_;
+  SequenceChecker aaudio_thread_checker_;
+  AudioParameters audio_parameters_;
+  const aaudio_direction_t direction_;
+  AAudioObserverInterface* observer_ = nullptr;
+  AAudioStream* stream_ = nullptr;
+  int32_t frames_per_burst_ = 0;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_WRAPPER_H_
diff --git a/modules/audio_device/android/audio_common.h b/modules/audio_device/android/audio_common.h
new file mode 100644
index 0000000..81ea733
--- /dev/null
+++ b/modules/audio_device/android/audio_common.h
@@ -0,0 +1,28 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_ANDROID_AUDIO_COMMON_H_
+#define MODULES_AUDIO_DEVICE_ANDROID_AUDIO_COMMON_H_
+
+namespace webrtc {
+
+const int kDefaultSampleRate = 44100;
+// Delay estimates for the two different supported modes. These values are based
+// on real-time round-trip delay estimates on a large set of devices and they
+// are lower bounds since the filter length is 128 ms, so the AEC works for
+// delays in the range [50, ~170] ms and [150, ~270] ms. Note that, in most
+// cases, the lowest delay estimate will not be utilized since devices that
+// support low-latency output audio often supports HW AEC as well.
+const int kLowLatencyModeDelayEstimateInMilliseconds = 50;
+const int kHighLatencyModeDelayEstimateInMilliseconds = 150;
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_DEVICE_ANDROID_AUDIO_COMMON_H_
diff --git a/modules/audio_device/android/audio_device_template.h b/modules/audio_device/android/audio_device_template.h
new file mode 100644
index 0000000..999c587
--- /dev/null
+++ b/modules/audio_device/android/audio_device_template.h
@@ -0,0 +1,435 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_TEMPLATE_H_
+#define MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_TEMPLATE_H_
+
+#include "api/sequence_checker.h"
+#include "modules/audio_device/android/audio_manager.h"
+#include "modules/audio_device/audio_device_generic.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+// InputType/OutputType can be any class that implements the capturing/rendering
+// part of the AudioDeviceGeneric API.
+// Construction and destruction must be done on one and the same thread. Each
+// internal implementation of InputType and OutputType will RTC_DCHECK if that
+// is not the case. All implemented methods must also be called on the same
+// thread. See comments in each InputType/OutputType class for more info.
+// It is possible to call the two static methods (SetAndroidAudioDeviceObjects
+// and ClearAndroidAudioDeviceObjects) from a different thread but both will
+// RTC_CHECK that the calling thread is attached to a Java VM.
+
+template <class InputType, class OutputType>
+class AudioDeviceTemplate : public AudioDeviceGeneric {
+ public:
+  AudioDeviceTemplate(AudioDeviceModule::AudioLayer audio_layer,
+                      AudioManager* audio_manager)
+      : audio_layer_(audio_layer),
+        audio_manager_(audio_manager),
+        output_(audio_manager_),
+        input_(audio_manager_),
+        initialized_(false) {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    RTC_CHECK(audio_manager);
+    audio_manager_->SetActiveAudioLayer(audio_layer);
+  }
+
+  virtual ~AudioDeviceTemplate() { RTC_LOG(LS_INFO) << __FUNCTION__; }
+
+  int32_t ActiveAudioLayer(
+      AudioDeviceModule::AudioLayer& audioLayer) const override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    audioLayer = audio_layer_;
+    return 0;
+  }
+
+  InitStatus Init() override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    RTC_DCHECK(thread_checker_.IsCurrent());
+    RTC_DCHECK(!initialized_);
+    if (!audio_manager_->Init()) {
+      return InitStatus::OTHER_ERROR;
+    }
+    if (output_.Init() != 0) {
+      audio_manager_->Close();
+      return InitStatus::PLAYOUT_ERROR;
+    }
+    if (input_.Init() != 0) {
+      output_.Terminate();
+      audio_manager_->Close();
+      return InitStatus::RECORDING_ERROR;
+    }
+    initialized_ = true;
+    return InitStatus::OK;
+  }
+
+  int32_t Terminate() override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    RTC_DCHECK(thread_checker_.IsCurrent());
+    int32_t err = input_.Terminate();
+    err |= output_.Terminate();
+    err |= !audio_manager_->Close();
+    initialized_ = false;
+    RTC_DCHECK_EQ(err, 0);
+    return err;
+  }
+
+  bool Initialized() const override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    RTC_DCHECK(thread_checker_.IsCurrent());
+    return initialized_;
+  }
+
+  int16_t PlayoutDevices() override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    return 1;
+  }
+
+  int16_t RecordingDevices() override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    return 1;
+  }
+
+  int32_t PlayoutDeviceName(uint16_t index,
+                            char name[kAdmMaxDeviceNameSize],
+                            char guid[kAdmMaxGuidSize]) override {
+    RTC_CHECK_NOTREACHED();
+  }
+
+  int32_t RecordingDeviceName(uint16_t index,
+                              char name[kAdmMaxDeviceNameSize],
+                              char guid[kAdmMaxGuidSize]) override {
+    RTC_CHECK_NOTREACHED();
+  }
+
+  int32_t SetPlayoutDevice(uint16_t index) override {
+    // OK to use but it has no effect currently since device selection is
+    // done using Andoid APIs instead.
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    return 0;
+  }
+
+  int32_t SetPlayoutDevice(
+      AudioDeviceModule::WindowsDeviceType device) override {
+    RTC_CHECK_NOTREACHED();
+  }
+
+  int32_t SetRecordingDevice(uint16_t index) override {
+    // OK to use but it has no effect currently since device selection is
+    // done using Andoid APIs instead.
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    return 0;
+  }
+
+  int32_t SetRecordingDevice(
+      AudioDeviceModule::WindowsDeviceType device) override {
+    RTC_CHECK_NOTREACHED();
+  }
+
+  int32_t PlayoutIsAvailable(bool& available) override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    available = true;
+    return 0;
+  }
+
+  int32_t InitPlayout() override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    return output_.InitPlayout();
+  }
+
+  bool PlayoutIsInitialized() const override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    return output_.PlayoutIsInitialized();
+  }
+
+  int32_t RecordingIsAvailable(bool& available) override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    available = true;
+    return 0;
+  }
+
+  int32_t InitRecording() override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    return input_.InitRecording();
+  }
+
+  bool RecordingIsInitialized() const override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    return input_.RecordingIsInitialized();
+  }
+
+  int32_t StartPlayout() override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    if (!audio_manager_->IsCommunicationModeEnabled()) {
+      RTC_LOG(LS_WARNING)
+          << "The application should use MODE_IN_COMMUNICATION audio mode!";
+    }
+    return output_.StartPlayout();
+  }
+
+  int32_t StopPlayout() override {
+    // Avoid using audio manger (JNI/Java cost) if playout was inactive.
+    if (!Playing())
+      return 0;
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    int32_t err = output_.StopPlayout();
+    return err;
+  }
+
+  bool Playing() const override {
+    RTC_LOG(LS_INFO) << __FUNCTION__;
+    return output_.Playing();
+  }
+
+  int32_t StartRecording() override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    if (!audio_manager_->IsCommunicationModeEnabled()) {
+      RTC_LOG(LS_WARNING)
+          << "The application should use MODE_IN_COMMUNICATION audio mode!";
+    }
+    return input_.StartRecording();
+  }
+
+  int32_t StopRecording() override {
+    // Avoid using audio manger (JNI/Java cost) if recording was inactive.
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    if (!Recording())
+      return 0;
+    int32_t err = input_.StopRecording();
+    return err;
+  }
+
+  bool Recording() const override { return input_.Recording(); }
+
+  int32_t InitSpeaker() override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    return 0;
+  }
+
+  bool SpeakerIsInitialized() const override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    return true;
+  }
+
+  int32_t InitMicrophone() override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    return 0;
+  }
+
+  bool MicrophoneIsInitialized() const override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    return true;
+  }
+
+  int32_t SpeakerVolumeIsAvailable(bool& available) override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    return output_.SpeakerVolumeIsAvailable(available);
+  }
+
+  int32_t SetSpeakerVolume(uint32_t volume) override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    return output_.SetSpeakerVolume(volume);
+  }
+
+  int32_t SpeakerVolume(uint32_t& volume) const override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    return output_.SpeakerVolume(volume);
+  }
+
+  int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    return output_.MaxSpeakerVolume(maxVolume);
+  }
+
+  int32_t MinSpeakerVolume(uint32_t& minVolume) const override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    return output_.MinSpeakerVolume(minVolume);
+  }
+
+  int32_t MicrophoneVolumeIsAvailable(bool& available) override {
+    available = false;
+    return -1;
+  }
+
+  int32_t SetMicrophoneVolume(uint32_t volume) override {
+    RTC_CHECK_NOTREACHED();
+  }
+
+  int32_t MicrophoneVolume(uint32_t& volume) const override {
+    RTC_CHECK_NOTREACHED();
+    return -1;
+  }
+
+  int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const override {
+    RTC_CHECK_NOTREACHED();
+  }
+
+  int32_t MinMicrophoneVolume(uint32_t& minVolume) const override {
+    RTC_CHECK_NOTREACHED();
+  }
+
+  int32_t SpeakerMuteIsAvailable(bool& available) override {
+    RTC_CHECK_NOTREACHED();
+  }
+
+  int32_t SetSpeakerMute(bool enable) override { RTC_CHECK_NOTREACHED(); }
+
+  int32_t SpeakerMute(bool& enabled) const override { RTC_CHECK_NOTREACHED(); }
+
+  int32_t MicrophoneMuteIsAvailable(bool& available) override {
+    RTC_CHECK_NOTREACHED();
+  }
+
+  int32_t SetMicrophoneMute(bool enable) override { RTC_CHECK_NOTREACHED(); }
+
+  int32_t MicrophoneMute(bool& enabled) const override {
+    RTC_CHECK_NOTREACHED();
+  }
+
+  // Returns true if the audio manager has been configured to support stereo
+  // and false otherwised. Default is mono.
+  int32_t StereoPlayoutIsAvailable(bool& available) override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    available = audio_manager_->IsStereoPlayoutSupported();
+    return 0;
+  }
+
+  int32_t SetStereoPlayout(bool enable) override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    bool available = audio_manager_->IsStereoPlayoutSupported();
+    // Android does not support changes between mono and stero on the fly.
+    // Instead, the native audio layer is configured via the audio manager
+    // to either support mono or stereo. It is allowed to call this method
+    // if that same state is not modified.
+    return (enable == available) ? 0 : -1;
+  }
+
+  int32_t StereoPlayout(bool& enabled) const override {
+    enabled = audio_manager_->IsStereoPlayoutSupported();
+    return 0;
+  }
+
+  int32_t StereoRecordingIsAvailable(bool& available) override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    available = audio_manager_->IsStereoRecordSupported();
+    return 0;
+  }
+
+  int32_t SetStereoRecording(bool enable) override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    bool available = audio_manager_->IsStereoRecordSupported();
+    // Android does not support changes between mono and stero on the fly.
+    // Instead, the native audio layer is configured via the audio manager
+    // to either support mono or stereo. It is allowed to call this method
+    // if that same state is not modified.
+    return (enable == available) ? 0 : -1;
+  }
+
+  int32_t StereoRecording(bool& enabled) const override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    enabled = audio_manager_->IsStereoRecordSupported();
+    return 0;
+  }
+
+  int32_t PlayoutDelay(uint16_t& delay_ms) const override {
+    // Best guess we can do is to use half of the estimated total delay.
+    delay_ms = audio_manager_->GetDelayEstimateInMilliseconds() / 2;
+    RTC_DCHECK_GT(delay_ms, 0);
+    return 0;
+  }
+
+  void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    output_.AttachAudioBuffer(audioBuffer);
+    input_.AttachAudioBuffer(audioBuffer);
+  }
+
+  // Returns true if the device both supports built in AEC and the device
+  // is not blacklisted.
+  // Currently, if OpenSL ES is used in both directions, this method will still
+  // report the correct value and it has the correct effect. As an example:
+  // a device supports built in AEC and this method returns true. Libjingle
+  // will then disable the WebRTC based AEC and that will work for all devices
+  // (mainly Nexus) even when OpenSL ES is used for input since our current
+  // implementation will enable built-in AEC by default also for OpenSL ES.
+  // The only "bad" thing that happens today is that when Libjingle calls
+  // OpenSLESRecorder::EnableBuiltInAEC() it will not have any real effect and
+  // a "Not Implemented" log will be filed. This non-perfect state will remain
+  // until I have added full support for audio effects based on OpenSL ES APIs.
+  bool BuiltInAECIsAvailable() const override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    return audio_manager_->IsAcousticEchoCancelerSupported();
+  }
+
+  // TODO(henrika): add implementation for OpenSL ES based audio as well.
+  int32_t EnableBuiltInAEC(bool enable) override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
+    RTC_CHECK(BuiltInAECIsAvailable()) << "HW AEC is not available";
+    return input_.EnableBuiltInAEC(enable);
+  }
+
+  // Returns true if the device both supports built in AGC and the device
+  // is not blacklisted.
+  // TODO(henrika): add implementation for OpenSL ES based audio as well.
+  // In addition, see comments for BuiltInAECIsAvailable().
+  bool BuiltInAGCIsAvailable() const override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    return audio_manager_->IsAutomaticGainControlSupported();
+  }
+
+  // TODO(henrika): add implementation for OpenSL ES based audio as well.
+  int32_t EnableBuiltInAGC(bool enable) override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
+    RTC_CHECK(BuiltInAGCIsAvailable()) << "HW AGC is not available";
+    return input_.EnableBuiltInAGC(enable);
+  }
+
+  // Returns true if the device both supports built in NS and the device
+  // is not blacklisted.
+  // TODO(henrika): add implementation for OpenSL ES based audio as well.
+  // In addition, see comments for BuiltInAECIsAvailable().
+  bool BuiltInNSIsAvailable() const override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__;
+    return audio_manager_->IsNoiseSuppressorSupported();
+  }
+
+  // TODO(henrika): add implementation for OpenSL ES based audio as well.
+  int32_t EnableBuiltInNS(bool enable) override {
+    RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")";
+    RTC_CHECK(BuiltInNSIsAvailable()) << "HW NS is not available";
+    return input_.EnableBuiltInNS(enable);
+  }
+
+ private:
+  SequenceChecker thread_checker_;
+
+  // Local copy of the audio layer set during construction of the
+  // AudioDeviceModuleImpl instance. Read only value.
+  const AudioDeviceModule::AudioLayer audio_layer_;
+
+  // Non-owning raw pointer to AudioManager instance given to use at
+  // construction. The real object is owned by AudioDeviceModuleImpl and the
+  // life time is the same as that of the AudioDeviceModuleImpl, hence there
+  // is no risk of reading a NULL pointer at any time in this class.
+  AudioManager* const audio_manager_;
+
+  OutputType output_;
+
+  InputType input_;
+
+  bool initialized_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_TEMPLATE_H_
diff --git a/modules/audio_device/android/audio_device_unittest.cc b/modules/audio_device/android/audio_device_unittest.cc
new file mode 100644
index 0000000..4e607bc
--- /dev/null
+++ b/modules/audio_device/android/audio_device_unittest.cc
@@ -0,0 +1,1019 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/include/audio_device.h"
+
+#include <algorithm>
+#include <limits>
+#include <list>
+#include <memory>
+#include <numeric>
+#include <string>
+#include <vector>
+
+#include "api/scoped_refptr.h"
+#include "api/task_queue/default_task_queue_factory.h"
+#include "api/task_queue/task_queue_factory.h"
+#include "modules/audio_device/android/audio_common.h"
+#include "modules/audio_device/android/audio_manager.h"
+#include "modules/audio_device/android/build_info.h"
+#include "modules/audio_device/android/ensure_initialized.h"
+#include "modules/audio_device/audio_device_impl.h"
+#include "modules/audio_device/include/mock_audio_transport.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/event.h"
+#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/time_utils.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+using std::cout;
+using std::endl;
+using ::testing::_;
+using ::testing::AtLeast;
+using ::testing::Gt;
+using ::testing::Invoke;
+using ::testing::NiceMock;
+using ::testing::NotNull;
+using ::testing::Return;
+
+// #define ENABLE_DEBUG_PRINTF
+#ifdef ENABLE_DEBUG_PRINTF
+#define PRINTD(...) fprintf(stderr, __VA_ARGS__);
+#else
+#define PRINTD(...) ((void)0)
+#endif
+#define PRINT(...) fprintf(stderr, __VA_ARGS__);
+
+namespace webrtc {
+
+// Number of callbacks (input or output) the tests waits for before we set
+// an event indicating that the test was OK.
+static const size_t kNumCallbacks = 10;
+// Max amount of time we wait for an event to be set while counting callbacks.
+static const int kTestTimeOutInMilliseconds = 10 * 1000;
+// Average number of audio callbacks per second assuming 10ms packet size.
+static const size_t kNumCallbacksPerSecond = 100;
+// Play out a test file during this time (unit is in seconds).
+static const int kFilePlayTimeInSec = 5;
+static const size_t kBitsPerSample = 16;
+static const size_t kBytesPerSample = kBitsPerSample / 8;
+// Run the full-duplex test during this time (unit is in seconds).
+// Note that first `kNumIgnoreFirstCallbacks` are ignored.
+static const int kFullDuplexTimeInSec = 5;
+// Wait for the callback sequence to stabilize by ignoring this amount of the
+// initial callbacks (avoids initial FIFO access).
+// Only used in the RunPlayoutAndRecordingInFullDuplex test.
+static const size_t kNumIgnoreFirstCallbacks = 50;
+// Sets the number of impulses per second in the latency test.
+static const int kImpulseFrequencyInHz = 1;
+// Length of round-trip latency measurements. Number of transmitted impulses
+// is kImpulseFrequencyInHz * kMeasureLatencyTimeInSec - 1.
+static const int kMeasureLatencyTimeInSec = 11;
+// Utilized in round-trip latency measurements to avoid capturing noise samples.
+static const int kImpulseThreshold = 1000;
+static const char kTag[] = "[..........] ";
+
+enum TransportType {
+  kPlayout = 0x1,
+  kRecording = 0x2,
+};
+
+// Interface for processing the audio stream. Real implementations can e.g.
+// run audio in loopback, read audio from a file or perform latency
+// measurements.
+class AudioStreamInterface {
+ public:
+  virtual void Write(const void* source, size_t num_frames) = 0;
+  virtual void Read(void* destination, size_t num_frames) = 0;
+
+ protected:
+  virtual ~AudioStreamInterface() {}
+};
+
+// Reads audio samples from a PCM file where the file is stored in memory at
+// construction.
+class FileAudioStream : public AudioStreamInterface {
+ public:
+  FileAudioStream(size_t num_callbacks,
+                  const std::string& file_name,
+                  int sample_rate)
+      : file_size_in_bytes_(0), sample_rate_(sample_rate), file_pos_(0) {
+    file_size_in_bytes_ = test::GetFileSize(file_name);
+    sample_rate_ = sample_rate;
+    EXPECT_GE(file_size_in_callbacks(), num_callbacks)
+        << "Size of test file is not large enough to last during the test.";
+    const size_t num_16bit_samples =
+        test::GetFileSize(file_name) / kBytesPerSample;
+    file_.reset(new int16_t[num_16bit_samples]);
+    FILE* audio_file = fopen(file_name.c_str(), "rb");
+    EXPECT_NE(audio_file, nullptr);
+    size_t num_samples_read =
+        fread(file_.get(), sizeof(int16_t), num_16bit_samples, audio_file);
+    EXPECT_EQ(num_samples_read, num_16bit_samples);
+    fclose(audio_file);
+  }
+
+  // AudioStreamInterface::Write() is not implemented.
+  void Write(const void* source, size_t num_frames) override {}
+
+  // Read samples from file stored in memory (at construction) and copy
+  // `num_frames` (<=> 10ms) to the `destination` byte buffer.
+  void Read(void* destination, size_t num_frames) override {
+    memcpy(destination, static_cast<int16_t*>(&file_[file_pos_]),
+           num_frames * sizeof(int16_t));
+    file_pos_ += num_frames;
+  }
+
+  int file_size_in_seconds() const {
+    return static_cast<int>(file_size_in_bytes_ /
+                            (kBytesPerSample * sample_rate_));
+  }
+  size_t file_size_in_callbacks() const {
+    return file_size_in_seconds() * kNumCallbacksPerSecond;
+  }
+
+ private:
+  size_t file_size_in_bytes_;
+  int sample_rate_;
+  std::unique_ptr<int16_t[]> file_;
+  size_t file_pos_;
+};
+
+// Simple first in first out (FIFO) class that wraps a list of 16-bit audio
+// buffers of fixed size and allows Write and Read operations. The idea is to
+// store recorded audio buffers (using Write) and then read (using Read) these
+// stored buffers with as short delay as possible when the audio layer needs
+// data to play out. The number of buffers in the FIFO will stabilize under
+// normal conditions since there will be a balance between Write and Read calls.
+// The container is a std::list container and access is protected with a lock
+// since both sides (playout and recording) are driven by its own thread.
+class FifoAudioStream : public AudioStreamInterface {
+ public:
+  explicit FifoAudioStream(size_t frames_per_buffer)
+      : frames_per_buffer_(frames_per_buffer),
+        bytes_per_buffer_(frames_per_buffer_ * sizeof(int16_t)),
+        fifo_(new AudioBufferList),
+        largest_size_(0),
+        total_written_elements_(0),
+        write_count_(0) {
+    EXPECT_NE(fifo_.get(), nullptr);
+  }
+
+  ~FifoAudioStream() { Flush(); }
+
+  // Allocate new memory, copy `num_frames` samples from `source` into memory
+  // and add pointer to the memory location to end of the list.
+  // Increases the size of the FIFO by one element.
+  void Write(const void* source, size_t num_frames) override {
+    ASSERT_EQ(num_frames, frames_per_buffer_);
+    PRINTD("+");
+    if (write_count_++ < kNumIgnoreFirstCallbacks) {
+      return;
+    }
+    int16_t* memory = new int16_t[frames_per_buffer_];
+    memcpy(static_cast<int16_t*>(&memory[0]), source, bytes_per_buffer_);
+    MutexLock lock(&lock_);
+    fifo_->push_back(memory);
+    const size_t size = fifo_->size();
+    if (size > largest_size_) {
+      largest_size_ = size;
+      PRINTD("(%zu)", largest_size_);
+    }
+    total_written_elements_ += size;
+  }
+
+  // Read pointer to data buffer from front of list, copy `num_frames` of stored
+  // data into `destination` and delete the utilized memory allocation.
+  // Decreases the size of the FIFO by one element.
+  void Read(void* destination, size_t num_frames) override {
+    ASSERT_EQ(num_frames, frames_per_buffer_);
+    PRINTD("-");
+    MutexLock lock(&lock_);
+    if (fifo_->empty()) {
+      memset(destination, 0, bytes_per_buffer_);
+    } else {
+      int16_t* memory = fifo_->front();
+      fifo_->pop_front();
+      memcpy(destination, static_cast<int16_t*>(&memory[0]), bytes_per_buffer_);
+      delete memory;
+    }
+  }
+
+  size_t size() const { return fifo_->size(); }
+
+  size_t largest_size() const { return largest_size_; }
+
+  size_t average_size() const {
+    return (total_written_elements_ == 0)
+               ? 0.0
+               : 0.5 + static_cast<float>(total_written_elements_) /
+                           (write_count_ - kNumIgnoreFirstCallbacks);
+  }
+
+ private:
+  void Flush() {
+    for (auto it = fifo_->begin(); it != fifo_->end(); ++it) {
+      delete *it;
+    }
+    fifo_->clear();
+  }
+
+  using AudioBufferList = std::list<int16_t*>;
+  Mutex lock_;
+  const size_t frames_per_buffer_;
+  const size_t bytes_per_buffer_;
+  std::unique_ptr<AudioBufferList> fifo_;
+  size_t largest_size_;
+  size_t total_written_elements_;
+  size_t write_count_;
+};
+
+// Inserts periodic impulses and measures the latency between the time of
+// transmission and time of receiving the same impulse.
+// Usage requires a special hardware called Audio Loopback Dongle.
+// See http://source.android.com/devices/audio/loopback.html for details.
+class LatencyMeasuringAudioStream : public AudioStreamInterface {
+ public:
+  explicit LatencyMeasuringAudioStream(size_t frames_per_buffer)
+      : frames_per_buffer_(frames_per_buffer),
+        bytes_per_buffer_(frames_per_buffer_ * sizeof(int16_t)),
+        play_count_(0),
+        rec_count_(0),
+        pulse_time_(0) {}
+
+  // Insert periodic impulses in first two samples of `destination`.
+  void Read(void* destination, size_t num_frames) override {
+    ASSERT_EQ(num_frames, frames_per_buffer_);
+    if (play_count_ == 0) {
+      PRINT("[");
+    }
+    play_count_++;
+    memset(destination, 0, bytes_per_buffer_);
+    if (play_count_ % (kNumCallbacksPerSecond / kImpulseFrequencyInHz) == 0) {
+      if (pulse_time_ == 0) {
+        pulse_time_ = rtc::TimeMillis();
+      }
+      PRINT(".");
+      const int16_t impulse = std::numeric_limits<int16_t>::max();
+      int16_t* ptr16 = static_cast<int16_t*>(destination);
+      for (size_t i = 0; i < 2; ++i) {
+        ptr16[i] = impulse;
+      }
+    }
+  }
+
+  // Detect received impulses in `source`, derive time between transmission and
+  // detection and add the calculated delay to list of latencies.
+  void Write(const void* source, size_t num_frames) override {
+    ASSERT_EQ(num_frames, frames_per_buffer_);
+    rec_count_++;
+    if (pulse_time_ == 0) {
+      // Avoid detection of new impulse response until a new impulse has
+      // been transmitted (sets `pulse_time_` to value larger than zero).
+      return;
+    }
+    const int16_t* ptr16 = static_cast<const int16_t*>(source);
+    std::vector<int16_t> vec(ptr16, ptr16 + num_frames);
+    // Find max value in the audio buffer.
+    int max = *std::max_element(vec.begin(), vec.end());
+    // Find index (element position in vector) of the max element.
+    int index_of_max =
+        std::distance(vec.begin(), std::find(vec.begin(), vec.end(), max));
+    if (max > kImpulseThreshold) {
+      PRINTD("(%d,%d)", max, index_of_max);
+      int64_t now_time = rtc::TimeMillis();
+      int extra_delay = IndexToMilliseconds(static_cast<double>(index_of_max));
+      PRINTD("[%d]", static_cast<int>(now_time - pulse_time_));
+      PRINTD("[%d]", extra_delay);
+      // Total latency is the difference between transmit time and detection
+      // tome plus the extra delay within the buffer in which we detected the
+      // received impulse. It is transmitted at sample 0 but can be received
+      // at sample N where N > 0. The term `extra_delay` accounts for N and it
+      // is a value between 0 and 10ms.
+      latencies_.push_back(now_time - pulse_time_ + extra_delay);
+      pulse_time_ = 0;
+    } else {
+      PRINTD("-");
+    }
+  }
+
+  size_t num_latency_values() const { return latencies_.size(); }
+
+  int min_latency() const {
+    if (latencies_.empty())
+      return 0;
+    return *std::min_element(latencies_.begin(), latencies_.end());
+  }
+
+  int max_latency() const {
+    if (latencies_.empty())
+      return 0;
+    return *std::max_element(latencies_.begin(), latencies_.end());
+  }
+
+  int average_latency() const {
+    if (latencies_.empty())
+      return 0;
+    return 0.5 + static_cast<double>(
+                     std::accumulate(latencies_.begin(), latencies_.end(), 0)) /
+                     latencies_.size();
+  }
+
+  void PrintResults() const {
+    PRINT("] ");
+    for (auto it = latencies_.begin(); it != latencies_.end(); ++it) {
+      PRINT("%d ", *it);
+    }
+    PRINT("\n");
+    PRINT("%s[min, max, avg]=[%d, %d, %d] ms\n", kTag, min_latency(),
+          max_latency(), average_latency());
+  }
+
+  int IndexToMilliseconds(double index) const {
+    return static_cast<int>(10.0 * (index / frames_per_buffer_) + 0.5);
+  }
+
+ private:
+  const size_t frames_per_buffer_;
+  const size_t bytes_per_buffer_;
+  size_t play_count_;
+  size_t rec_count_;
+  int64_t pulse_time_;
+  std::vector<int> latencies_;
+};
+
+// Mocks the AudioTransport object and proxies actions for the two callbacks
+// (RecordedDataIsAvailable and NeedMorePlayData) to different implementations
+// of AudioStreamInterface.
+class MockAudioTransportAndroid : public test::MockAudioTransport {
+ public:
+  explicit MockAudioTransportAndroid(int type)
+      : num_callbacks_(0),
+        type_(type),
+        play_count_(0),
+        rec_count_(0),
+        audio_stream_(nullptr) {}
+
+  virtual ~MockAudioTransportAndroid() {}
+
+  // Set default actions of the mock object. We are delegating to fake
+  // implementations (of AudioStreamInterface) here.
+  void HandleCallbacks(rtc::Event* test_is_done,
+                       AudioStreamInterface* audio_stream,
+                       int num_callbacks) {
+    test_is_done_ = test_is_done;
+    audio_stream_ = audio_stream;
+    num_callbacks_ = num_callbacks;
+    if (play_mode()) {
+      ON_CALL(*this, NeedMorePlayData(_, _, _, _, _, _, _, _))
+          .WillByDefault(
+              Invoke(this, &MockAudioTransportAndroid::RealNeedMorePlayData));
+    }
+    if (rec_mode()) {
+      ON_CALL(*this, RecordedDataIsAvailable(_, _, _, _, _, _, _, _, _, _))
+          .WillByDefault(Invoke(
+              this, &MockAudioTransportAndroid::RealRecordedDataIsAvailable));
+    }
+  }
+
+  int32_t RealRecordedDataIsAvailable(const void* audioSamples,
+                                      const size_t nSamples,
+                                      const size_t nBytesPerSample,
+                                      const size_t nChannels,
+                                      const uint32_t samplesPerSec,
+                                      const uint32_t totalDelayMS,
+                                      const int32_t clockDrift,
+                                      const uint32_t currentMicLevel,
+                                      const bool keyPressed,
+                                      uint32_t& newMicLevel) {  // NOLINT
+    EXPECT_TRUE(rec_mode()) << "No test is expecting these callbacks.";
+    rec_count_++;
+    // Process the recorded audio stream if an AudioStreamInterface
+    // implementation exists.
+    if (audio_stream_) {
+      audio_stream_->Write(audioSamples, nSamples);
+    }
+    if (ReceivedEnoughCallbacks()) {
+      test_is_done_->Set();
+    }
+    return 0;
+  }
+
+  int32_t RealNeedMorePlayData(const size_t nSamples,
+                               const size_t nBytesPerSample,
+                               const size_t nChannels,
+                               const uint32_t samplesPerSec,
+                               void* audioSamples,
+                               size_t& nSamplesOut,  // NOLINT
+                               int64_t* elapsed_time_ms,
+                               int64_t* ntp_time_ms) {
+    EXPECT_TRUE(play_mode()) << "No test is expecting these callbacks.";
+    play_count_++;
+    nSamplesOut = nSamples;
+    // Read (possibly processed) audio stream samples to be played out if an
+    // AudioStreamInterface implementation exists.
+    if (audio_stream_) {
+      audio_stream_->Read(audioSamples, nSamples);
+    }
+    if (ReceivedEnoughCallbacks()) {
+      test_is_done_->Set();
+    }
+    return 0;
+  }
+
+  bool ReceivedEnoughCallbacks() {
+    bool recording_done = false;
+    if (rec_mode())
+      recording_done = rec_count_ >= num_callbacks_;
+    else
+      recording_done = true;
+
+    bool playout_done = false;
+    if (play_mode())
+      playout_done = play_count_ >= num_callbacks_;
+    else
+      playout_done = true;
+
+    return recording_done && playout_done;
+  }
+
+  bool play_mode() const { return type_ & kPlayout; }
+  bool rec_mode() const { return type_ & kRecording; }
+
+ private:
+  rtc::Event* test_is_done_;
+  size_t num_callbacks_;
+  int type_;
+  size_t play_count_;
+  size_t rec_count_;
+  AudioStreamInterface* audio_stream_;
+  std::unique_ptr<LatencyMeasuringAudioStream> latency_audio_stream_;
+};
+
+// AudioDeviceTest test fixture.
+class AudioDeviceTest : public ::testing::Test {
+ protected:
+  AudioDeviceTest() : task_queue_factory_(CreateDefaultTaskQueueFactory()) {
+    // One-time initialization of JVM and application context. Ensures that we
+    // can do calls between C++ and Java. Initializes both Java and OpenSL ES
+    // implementations.
+    webrtc::audiodevicemodule::EnsureInitialized();
+    // Creates an audio device using a default audio layer.
+    audio_device_ = CreateAudioDevice(AudioDeviceModule::kPlatformDefaultAudio);
+    EXPECT_NE(audio_device_.get(), nullptr);
+    EXPECT_EQ(0, audio_device_->Init());
+    playout_parameters_ = audio_manager()->GetPlayoutAudioParameters();
+    record_parameters_ = audio_manager()->GetRecordAudioParameters();
+    build_info_.reset(new BuildInfo());
+  }
+  virtual ~AudioDeviceTest() { EXPECT_EQ(0, audio_device_->Terminate()); }
+
+  int playout_sample_rate() const { return playout_parameters_.sample_rate(); }
+  int record_sample_rate() const { return record_parameters_.sample_rate(); }
+  size_t playout_channels() const { return playout_parameters_.channels(); }
+  size_t record_channels() const { return record_parameters_.channels(); }
+  size_t playout_frames_per_10ms_buffer() const {
+    return playout_parameters_.frames_per_10ms_buffer();
+  }
+  size_t record_frames_per_10ms_buffer() const {
+    return record_parameters_.frames_per_10ms_buffer();
+  }
+
+  int total_delay_ms() const {
+    return audio_manager()->GetDelayEstimateInMilliseconds();
+  }
+
+  rtc::scoped_refptr<AudioDeviceModule> audio_device() const {
+    return audio_device_;
+  }
+
+  AudioDeviceModuleImpl* audio_device_impl() const {
+    return static_cast<AudioDeviceModuleImpl*>(audio_device_.get());
+  }
+
+  AudioManager* audio_manager() const {
+    return audio_device_impl()->GetAndroidAudioManagerForTest();
+  }
+
+  AudioManager* GetAudioManager(AudioDeviceModule* adm) const {
+    return static_cast<AudioDeviceModuleImpl*>(adm)
+        ->GetAndroidAudioManagerForTest();
+  }
+
+  AudioDeviceBuffer* audio_device_buffer() const {
+    return audio_device_impl()->GetAudioDeviceBuffer();
+  }
+
+  rtc::scoped_refptr<AudioDeviceModule> CreateAudioDevice(
+      AudioDeviceModule::AudioLayer audio_layer) {
+    rtc::scoped_refptr<AudioDeviceModule> module(
+        AudioDeviceModule::Create(audio_layer, task_queue_factory_.get()));
+    return module;
+  }
+
+  // Returns file name relative to the resource root given a sample rate.
+  std::string GetFileName(int sample_rate) {
+    EXPECT_TRUE(sample_rate == 48000 || sample_rate == 44100);
+    char fname[64];
+    snprintf(fname, sizeof(fname), "audio_device/audio_short%d",
+             sample_rate / 1000);
+    std::string file_name(webrtc::test::ResourcePath(fname, "pcm"));
+    EXPECT_TRUE(test::FileExists(file_name));
+#ifdef ENABLE_PRINTF
+    PRINT("file name: %s\n", file_name.c_str());
+    const size_t bytes = test::GetFileSize(file_name);
+    PRINT("file size: %zu [bytes]\n", bytes);
+    PRINT("file size: %zu [samples]\n", bytes / kBytesPerSample);
+    const int seconds =
+        static_cast<int>(bytes / (sample_rate * kBytesPerSample));
+    PRINT("file size: %d [secs]\n", seconds);
+    PRINT("file size: %zu [callbacks]\n", seconds * kNumCallbacksPerSecond);
+#endif
+    return file_name;
+  }
+
+  AudioDeviceModule::AudioLayer GetActiveAudioLayer() const {
+    AudioDeviceModule::AudioLayer audio_layer;
+    EXPECT_EQ(0, audio_device()->ActiveAudioLayer(&audio_layer));
+    return audio_layer;
+  }
+
+  int TestDelayOnAudioLayer(
+      const AudioDeviceModule::AudioLayer& layer_to_test) {
+    rtc::scoped_refptr<AudioDeviceModule> audio_device;
+    audio_device = CreateAudioDevice(layer_to_test);
+    EXPECT_NE(audio_device.get(), nullptr);
+    AudioManager* audio_manager = GetAudioManager(audio_device.get());
+    EXPECT_NE(audio_manager, nullptr);
+    return audio_manager->GetDelayEstimateInMilliseconds();
+  }
+
+  AudioDeviceModule::AudioLayer TestActiveAudioLayer(
+      const AudioDeviceModule::AudioLayer& layer_to_test) {
+    rtc::scoped_refptr<AudioDeviceModule> audio_device;
+    audio_device = CreateAudioDevice(layer_to_test);
+    EXPECT_NE(audio_device.get(), nullptr);
+    AudioDeviceModule::AudioLayer active;
+    EXPECT_EQ(0, audio_device->ActiveAudioLayer(&active));
+    return active;
+  }
+
+  bool DisableTestForThisDevice(const std::string& model) {
+    return (build_info_->GetDeviceModel() == model);
+  }
+
+  // Volume control is currently only supported for the Java output audio layer.
+  // For OpenSL ES, the internal stream volume is always on max level and there
+  // is no need for this test to set it to max.
+  bool AudioLayerSupportsVolumeControl() const {
+    return GetActiveAudioLayer() == AudioDeviceModule::kAndroidJavaAudio;
+  }
+
+  void SetMaxPlayoutVolume() {
+    if (!AudioLayerSupportsVolumeControl())
+      return;
+    uint32_t max_volume;
+    EXPECT_EQ(0, audio_device()->MaxSpeakerVolume(&max_volume));
+    EXPECT_EQ(0, audio_device()->SetSpeakerVolume(max_volume));
+  }
+
+  void DisableBuiltInAECIfAvailable() {
+    if (audio_device()->BuiltInAECIsAvailable()) {
+      EXPECT_EQ(0, audio_device()->EnableBuiltInAEC(false));
+    }
+  }
+
+  void StartPlayout() {
+    EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
+    EXPECT_FALSE(audio_device()->Playing());
+    EXPECT_EQ(0, audio_device()->InitPlayout());
+    EXPECT_TRUE(audio_device()->PlayoutIsInitialized());
+    EXPECT_EQ(0, audio_device()->StartPlayout());
+    EXPECT_TRUE(audio_device()->Playing());
+  }
+
+  void StopPlayout() {
+    EXPECT_EQ(0, audio_device()->StopPlayout());
+    EXPECT_FALSE(audio_device()->Playing());
+    EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
+  }
+
+  void StartRecording() {
+    EXPECT_FALSE(audio_device()->RecordingIsInitialized());
+    EXPECT_FALSE(audio_device()->Recording());
+    EXPECT_EQ(0, audio_device()->InitRecording());
+    EXPECT_TRUE(audio_device()->RecordingIsInitialized());
+    EXPECT_EQ(0, audio_device()->StartRecording());
+    EXPECT_TRUE(audio_device()->Recording());
+  }
+
+  void StopRecording() {
+    EXPECT_EQ(0, audio_device()->StopRecording());
+    EXPECT_FALSE(audio_device()->Recording());
+  }
+
+  int GetMaxSpeakerVolume() const {
+    uint32_t max_volume(0);
+    EXPECT_EQ(0, audio_device()->MaxSpeakerVolume(&max_volume));
+    return max_volume;
+  }
+
+  int GetMinSpeakerVolume() const {
+    uint32_t min_volume(0);
+    EXPECT_EQ(0, audio_device()->MinSpeakerVolume(&min_volume));
+    return min_volume;
+  }
+
+  int GetSpeakerVolume() const {
+    uint32_t volume(0);
+    EXPECT_EQ(0, audio_device()->SpeakerVolume(&volume));
+    return volume;
+  }
+
+  rtc::Event test_is_done_;
+  std::unique_ptr<TaskQueueFactory> task_queue_factory_;
+  rtc::scoped_refptr<AudioDeviceModule> audio_device_;
+  AudioParameters playout_parameters_;
+  AudioParameters record_parameters_;
+  std::unique_ptr<BuildInfo> build_info_;
+};
+
+TEST_F(AudioDeviceTest, ConstructDestruct) {
+  // Using the test fixture to create and destruct the audio device module.
+}
+
+// We always ask for a default audio layer when the ADM is constructed. But the
+// ADM will then internally set the best suitable combination of audio layers,
+// for input and output based on if low-latency output and/or input audio in
+// combination with OpenSL ES is supported or not. This test ensures that the
+// correct selection is done.
+TEST_F(AudioDeviceTest, VerifyDefaultAudioLayer) {
+  const AudioDeviceModule::AudioLayer audio_layer = GetActiveAudioLayer();
+  bool low_latency_output = audio_manager()->IsLowLatencyPlayoutSupported();
+  bool low_latency_input = audio_manager()->IsLowLatencyRecordSupported();
+  bool aaudio = audio_manager()->IsAAudioSupported();
+  AudioDeviceModule::AudioLayer expected_audio_layer;
+  if (aaudio) {
+    expected_audio_layer = AudioDeviceModule::kAndroidAAudioAudio;
+  } else if (low_latency_output && low_latency_input) {
+    expected_audio_layer = AudioDeviceModule::kAndroidOpenSLESAudio;
+  } else if (low_latency_output && !low_latency_input) {
+    expected_audio_layer =
+        AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio;
+  } else {
+    expected_audio_layer = AudioDeviceModule::kAndroidJavaAudio;
+  }
+  EXPECT_EQ(expected_audio_layer, audio_layer);
+}
+
+// Verify that it is possible to explicitly create the two types of supported
+// ADMs. These two tests overrides the default selection of native audio layer
+// by ignoring if the device supports low-latency output or not.
+TEST_F(AudioDeviceTest, CorrectAudioLayerIsUsedForCombinedJavaOpenSLCombo) {
+  AudioDeviceModule::AudioLayer expected_layer =
+      AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio;
+  AudioDeviceModule::AudioLayer active_layer =
+      TestActiveAudioLayer(expected_layer);
+  EXPECT_EQ(expected_layer, active_layer);
+}
+
+TEST_F(AudioDeviceTest, CorrectAudioLayerIsUsedForJavaInBothDirections) {
+  AudioDeviceModule::AudioLayer expected_layer =
+      AudioDeviceModule::kAndroidJavaAudio;
+  AudioDeviceModule::AudioLayer active_layer =
+      TestActiveAudioLayer(expected_layer);
+  EXPECT_EQ(expected_layer, active_layer);
+}
+
+TEST_F(AudioDeviceTest, CorrectAudioLayerIsUsedForOpenSLInBothDirections) {
+  AudioDeviceModule::AudioLayer expected_layer =
+      AudioDeviceModule::kAndroidOpenSLESAudio;
+  AudioDeviceModule::AudioLayer active_layer =
+      TestActiveAudioLayer(expected_layer);
+  EXPECT_EQ(expected_layer, active_layer);
+}
+
+// TODO(bugs.webrtc.org/8914)
+#if !defined(WEBRTC_AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
+#define MAYBE_CorrectAudioLayerIsUsedForAAudioInBothDirections \
+  DISABLED_CorrectAudioLayerIsUsedForAAudioInBothDirections
+#else
+#define MAYBE_CorrectAudioLayerIsUsedForAAudioInBothDirections \
+  CorrectAudioLayerIsUsedForAAudioInBothDirections
+#endif
+TEST_F(AudioDeviceTest,
+       MAYBE_CorrectAudioLayerIsUsedForAAudioInBothDirections) {
+  AudioDeviceModule::AudioLayer expected_layer =
+      AudioDeviceModule::kAndroidAAudioAudio;
+  AudioDeviceModule::AudioLayer active_layer =
+      TestActiveAudioLayer(expected_layer);
+  EXPECT_EQ(expected_layer, active_layer);
+}
+
+// TODO(bugs.webrtc.org/8914)
+#if !defined(WEBRTC_AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
+#define MAYBE_CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo \
+  DISABLED_CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo
+#else
+#define MAYBE_CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo \
+  CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo
+#endif
+TEST_F(AudioDeviceTest,
+       MAYBE_CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo) {
+  AudioDeviceModule::AudioLayer expected_layer =
+      AudioDeviceModule::kAndroidJavaInputAndAAudioOutputAudio;
+  AudioDeviceModule::AudioLayer active_layer =
+      TestActiveAudioLayer(expected_layer);
+  EXPECT_EQ(expected_layer, active_layer);
+}
+
+// The Android ADM supports two different delay reporting modes. One for the
+// low-latency output path (in combination with OpenSL ES), and one for the
+// high-latency output path (Java backends in both directions). These two tests
+// verifies that the audio manager reports correct delay estimate given the
+// selected audio layer. Note that, this delay estimate will only be utilized
+// if the HW AEC is disabled.
+TEST_F(AudioDeviceTest, UsesCorrectDelayEstimateForHighLatencyOutputPath) {
+  EXPECT_EQ(kHighLatencyModeDelayEstimateInMilliseconds,
+            TestDelayOnAudioLayer(AudioDeviceModule::kAndroidJavaAudio));
+}
+
+TEST_F(AudioDeviceTest, UsesCorrectDelayEstimateForLowLatencyOutputPath) {
+  EXPECT_EQ(kLowLatencyModeDelayEstimateInMilliseconds,
+            TestDelayOnAudioLayer(
+                AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio));
+}
+
+// Ensure that the ADM internal audio device buffer is configured to use the
+// correct set of parameters.
+TEST_F(AudioDeviceTest, VerifyAudioDeviceBufferParameters) {
+  EXPECT_EQ(playout_parameters_.sample_rate(),
+            static_cast<int>(audio_device_buffer()->PlayoutSampleRate()));
+  EXPECT_EQ(record_parameters_.sample_rate(),
+            static_cast<int>(audio_device_buffer()->RecordingSampleRate()));
+  EXPECT_EQ(playout_parameters_.channels(),
+            audio_device_buffer()->PlayoutChannels());
+  EXPECT_EQ(record_parameters_.channels(),
+            audio_device_buffer()->RecordingChannels());
+}
+
+TEST_F(AudioDeviceTest, InitTerminate) {
+  // Initialization is part of the test fixture.
+  EXPECT_TRUE(audio_device()->Initialized());
+  EXPECT_EQ(0, audio_device()->Terminate());
+  EXPECT_FALSE(audio_device()->Initialized());
+}
+
+TEST_F(AudioDeviceTest, Devices) {
+  // Device enumeration is not supported. Verify fixed values only.
+  EXPECT_EQ(1, audio_device()->PlayoutDevices());
+  EXPECT_EQ(1, audio_device()->RecordingDevices());
+}
+
+TEST_F(AudioDeviceTest, SpeakerVolumeShouldBeAvailable) {
+  // The OpenSL ES output audio path does not support volume control.
+  if (!AudioLayerSupportsVolumeControl())
+    return;
+  bool available;
+  EXPECT_EQ(0, audio_device()->SpeakerVolumeIsAvailable(&available));
+  EXPECT_TRUE(available);
+}
+
+TEST_F(AudioDeviceTest, MaxSpeakerVolumeIsPositive) {
+  // The OpenSL ES output audio path does not support volume control.
+  if (!AudioLayerSupportsVolumeControl())
+    return;
+  StartPlayout();
+  EXPECT_GT(GetMaxSpeakerVolume(), 0);
+  StopPlayout();
+}
+
+TEST_F(AudioDeviceTest, MinSpeakerVolumeIsZero) {
+  // The OpenSL ES output audio path does not support volume control.
+  if (!AudioLayerSupportsVolumeControl())
+    return;
+  EXPECT_EQ(GetMinSpeakerVolume(), 0);
+}
+
+TEST_F(AudioDeviceTest, DefaultSpeakerVolumeIsWithinMinMax) {
+  // The OpenSL ES output audio path does not support volume control.
+  if (!AudioLayerSupportsVolumeControl())
+    return;
+  const int default_volume = GetSpeakerVolume();
+  EXPECT_GE(default_volume, GetMinSpeakerVolume());
+  EXPECT_LE(default_volume, GetMaxSpeakerVolume());
+}
+
+TEST_F(AudioDeviceTest, SetSpeakerVolumeActuallySetsVolume) {
+  // The OpenSL ES output audio path does not support volume control.
+  if (!AudioLayerSupportsVolumeControl())
+    return;
+  const int default_volume = GetSpeakerVolume();
+  const int max_volume = GetMaxSpeakerVolume();
+  EXPECT_EQ(0, audio_device()->SetSpeakerVolume(max_volume));
+  int new_volume = GetSpeakerVolume();
+  EXPECT_EQ(new_volume, max_volume);
+  EXPECT_EQ(0, audio_device()->SetSpeakerVolume(default_volume));
+}
+
+// Tests that playout can be initiated, started and stopped. No audio callback
+// is registered in this test.
+TEST_F(AudioDeviceTest, StartStopPlayout) {
+  StartPlayout();
+  StopPlayout();
+  StartPlayout();
+  StopPlayout();
+}
+
+// Tests that recording can be initiated, started and stopped. No audio callback
+// is registered in this test.
+TEST_F(AudioDeviceTest, StartStopRecording) {
+  StartRecording();
+  StopRecording();
+  StartRecording();
+  StopRecording();
+}
+
+// Verify that calling StopPlayout() will leave us in an uninitialized state
+// which will require a new call to InitPlayout(). This test does not call
+// StartPlayout() while being uninitialized since doing so will hit a
+// RTC_DCHECK and death tests are not supported on Android.
+TEST_F(AudioDeviceTest, StopPlayoutRequiresInitToRestart) {
+  EXPECT_EQ(0, audio_device()->InitPlayout());
+  EXPECT_EQ(0, audio_device()->StartPlayout());
+  EXPECT_EQ(0, audio_device()->StopPlayout());
+  EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
+}
+
+// Verify that calling StopRecording() will leave us in an uninitialized state
+// which will require a new call to InitRecording(). This test does not call
+// StartRecording() while being uninitialized since doing so will hit a
+// RTC_DCHECK and death tests are not supported on Android.
+TEST_F(AudioDeviceTest, StopRecordingRequiresInitToRestart) {
+  EXPECT_EQ(0, audio_device()->InitRecording());
+  EXPECT_EQ(0, audio_device()->StartRecording());
+  EXPECT_EQ(0, audio_device()->StopRecording());
+  EXPECT_FALSE(audio_device()->RecordingIsInitialized());
+}
+
+// Start playout and verify that the native audio layer starts asking for real
+// audio samples to play out using the NeedMorePlayData callback.
+TEST_F(AudioDeviceTest, StartPlayoutVerifyCallbacks) {
+  MockAudioTransportAndroid mock(kPlayout);
+  mock.HandleCallbacks(&test_is_done_, nullptr, kNumCallbacks);
+  EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_10ms_buffer(),
+                                     kBytesPerSample, playout_channels(),
+                                     playout_sample_rate(), NotNull(), _, _, _))
+      .Times(AtLeast(kNumCallbacks));
+  EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+  StartPlayout();
+  test_is_done_.Wait(kTestTimeOutInMilliseconds);
+  StopPlayout();
+}
+
+// Start recording and verify that the native audio layer starts feeding real
+// audio samples via the RecordedDataIsAvailable callback.
+// TODO(henrika): investigate if it is possible to perform a sanity check of
+// delay estimates as well (argument #6).
+TEST_F(AudioDeviceTest, StartRecordingVerifyCallbacks) {
+  MockAudioTransportAndroid mock(kRecording);
+  mock.HandleCallbacks(&test_is_done_, nullptr, kNumCallbacks);
+  EXPECT_CALL(
+      mock, RecordedDataIsAvailable(NotNull(), record_frames_per_10ms_buffer(),
+                                    kBytesPerSample, record_channels(),
+                                    record_sample_rate(), _, 0, 0, false, _, _))
+      .Times(AtLeast(kNumCallbacks));
+
+  EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+  StartRecording();
+  test_is_done_.Wait(kTestTimeOutInMilliseconds);
+  StopRecording();
+}
+
+// Start playout and recording (full-duplex audio) and verify that audio is
+// active in both directions.
+TEST_F(AudioDeviceTest, StartPlayoutAndRecordingVerifyCallbacks) {
+  MockAudioTransportAndroid mock(kPlayout | kRecording);
+  mock.HandleCallbacks(&test_is_done_, nullptr, kNumCallbacks);
+  EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_10ms_buffer(),
+                                     kBytesPerSample, playout_channels(),
+                                     playout_sample_rate(), NotNull(), _, _, _))
+      .Times(AtLeast(kNumCallbacks));
+  EXPECT_CALL(
+      mock, RecordedDataIsAvailable(NotNull(), record_frames_per_10ms_buffer(),
+                                    kBytesPerSample, record_channels(),
+                                    record_sample_rate(), _, 0, 0, false, _, _))
+      .Times(AtLeast(kNumCallbacks));
+  EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+  StartPlayout();
+  StartRecording();
+  test_is_done_.Wait(kTestTimeOutInMilliseconds);
+  StopRecording();
+  StopPlayout();
+}
+
+// Start playout and read audio from an external PCM file when the audio layer
+// asks for data to play out. Real audio is played out in this test but it does
+// not contain any explicit verification that the audio quality is perfect.
+TEST_F(AudioDeviceTest, RunPlayoutWithFileAsSource) {
+  // TODO(henrika): extend test when mono output is supported.
+  EXPECT_EQ(1u, playout_channels());
+  NiceMock<MockAudioTransportAndroid> mock(kPlayout);
+  const int num_callbacks = kFilePlayTimeInSec * kNumCallbacksPerSecond;
+  std::string file_name = GetFileName(playout_sample_rate());
+  std::unique_ptr<FileAudioStream> file_audio_stream(
+      new FileAudioStream(num_callbacks, file_name, playout_sample_rate()));
+  mock.HandleCallbacks(&test_is_done_, file_audio_stream.get(), num_callbacks);
+  // SetMaxPlayoutVolume();
+  EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+  StartPlayout();
+  test_is_done_.Wait(kTestTimeOutInMilliseconds);
+  StopPlayout();
+}
+
+// Start playout and recording and store recorded data in an intermediate FIFO
+// buffer from which the playout side then reads its samples in the same order
+// as they were stored. Under ideal circumstances, a callback sequence would
+// look like: ...+-+-+-+-+-+-+-..., where '+' means 'packet recorded' and '-'
+// means 'packet played'. Under such conditions, the FIFO would only contain
+// one packet on average. However, under more realistic conditions, the size
+// of the FIFO will vary more due to an unbalance between the two sides.
+// This test tries to verify that the device maintains a balanced callback-
+// sequence by running in loopback for ten seconds while measuring the size
+// (max and average) of the FIFO. The size of the FIFO is increased by the
+// recording side and decreased by the playout side.
+// TODO(henrika): tune the final test parameters after running tests on several
+// different devices.
+// Disabling this test on bots since it is difficult to come up with a robust
+// test condition that all worked as intended. The main issue is that, when
+// swarming is used, an initial latency can be built up when the both sides
+// starts at different times. Hence, the test can fail even if audio works
+// as intended. Keeping the test so it can be enabled manually.
+// http://bugs.webrtc.org/7744
+TEST_F(AudioDeviceTest, DISABLED_RunPlayoutAndRecordingInFullDuplex) {
+  EXPECT_EQ(record_channels(), playout_channels());
+  EXPECT_EQ(record_sample_rate(), playout_sample_rate());
+  NiceMock<MockAudioTransportAndroid> mock(kPlayout | kRecording);
+  std::unique_ptr<FifoAudioStream> fifo_audio_stream(
+      new FifoAudioStream(playout_frames_per_10ms_buffer()));
+  mock.HandleCallbacks(&test_is_done_, fifo_audio_stream.get(),
+                       kFullDuplexTimeInSec * kNumCallbacksPerSecond);
+  SetMaxPlayoutVolume();
+  EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+  StartRecording();
+  StartPlayout();
+  test_is_done_.Wait(
+      std::max(kTestTimeOutInMilliseconds, 1000 * kFullDuplexTimeInSec));
+  StopPlayout();
+  StopRecording();
+
+  // These thresholds are set rather high to accomodate differences in hardware
+  // in several devices, so this test can be used in swarming.
+  // See http://bugs.webrtc.org/6464
+  EXPECT_LE(fifo_audio_stream->average_size(), 60u);
+  EXPECT_LE(fifo_audio_stream->largest_size(), 70u);
+}
+
+// Measures loopback latency and reports the min, max and average values for
+// a full duplex audio session.
+// The latency is measured like so:
+// - Insert impulses periodically on the output side.
+// - Detect the impulses on the input side.
+// - Measure the time difference between the transmit time and receive time.
+// - Store time differences in a vector and calculate min, max and average.
+// This test requires a special hardware called Audio Loopback Dongle.
+// See http://source.android.com/devices/audio/loopback.html for details.
+TEST_F(AudioDeviceTest, DISABLED_MeasureLoopbackLatency) {
+  EXPECT_EQ(record_channels(), playout_channels());
+  EXPECT_EQ(record_sample_rate(), playout_sample_rate());
+  NiceMock<MockAudioTransportAndroid> mock(kPlayout | kRecording);
+  std::unique_ptr<LatencyMeasuringAudioStream> latency_audio_stream(
+      new LatencyMeasuringAudioStream(playout_frames_per_10ms_buffer()));
+  mock.HandleCallbacks(&test_is_done_, latency_audio_stream.get(),
+                       kMeasureLatencyTimeInSec * kNumCallbacksPerSecond);
+  EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+  SetMaxPlayoutVolume();
+  DisableBuiltInAECIfAvailable();
+  StartRecording();
+  StartPlayout();
+  test_is_done_.Wait(
+      std::max(kTestTimeOutInMilliseconds, 1000 * kMeasureLatencyTimeInSec));
+  StopPlayout();
+  StopRecording();
+  // Verify that the correct number of transmitted impulses are detected.
+  EXPECT_EQ(latency_audio_stream->num_latency_values(),
+            static_cast<size_t>(
+                kImpulseFrequencyInHz * kMeasureLatencyTimeInSec - 1));
+  latency_audio_stream->PrintResults();
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_device/android/audio_manager.cc b/modules/audio_device/android/audio_manager.cc
new file mode 100644
index 0000000..0b55496
--- /dev/null
+++ b/modules/audio_device/android/audio_manager.cc
@@ -0,0 +1,318 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/android/audio_manager.h"
+
+#include <utility>
+
+#include "modules/audio_device/android/audio_common.h"
+#include "modules/utility/include/helpers_android.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/platform_thread.h"
+
+namespace webrtc {
+
+// AudioManager::JavaAudioManager implementation
+AudioManager::JavaAudioManager::JavaAudioManager(
+    NativeRegistration* native_reg,
+    std::unique_ptr<GlobalRef> audio_manager)
+    : audio_manager_(std::move(audio_manager)),
+      init_(native_reg->GetMethodId("init", "()Z")),
+      dispose_(native_reg->GetMethodId("dispose", "()V")),
+      is_communication_mode_enabled_(
+          native_reg->GetMethodId("isCommunicationModeEnabled", "()Z")),
+      is_device_blacklisted_for_open_sles_usage_(
+          native_reg->GetMethodId("isDeviceBlacklistedForOpenSLESUsage",
+                                  "()Z")) {
+  RTC_LOG(LS_INFO) << "JavaAudioManager::ctor";
+}
+
+AudioManager::JavaAudioManager::~JavaAudioManager() {
+  RTC_LOG(LS_INFO) << "JavaAudioManager::~dtor";
+}
+
+bool AudioManager::JavaAudioManager::Init() {
+  return audio_manager_->CallBooleanMethod(init_);
+}
+
+void AudioManager::JavaAudioManager::Close() {
+  audio_manager_->CallVoidMethod(dispose_);
+}
+
+bool AudioManager::JavaAudioManager::IsCommunicationModeEnabled() {
+  return audio_manager_->CallBooleanMethod(is_communication_mode_enabled_);
+}
+
+bool AudioManager::JavaAudioManager::IsDeviceBlacklistedForOpenSLESUsage() {
+  return audio_manager_->CallBooleanMethod(
+      is_device_blacklisted_for_open_sles_usage_);
+}
+
+// AudioManager implementation
+AudioManager::AudioManager()
+    : j_environment_(JVM::GetInstance()->environment()),
+      audio_layer_(AudioDeviceModule::kPlatformDefaultAudio),
+      initialized_(false),
+      hardware_aec_(false),
+      hardware_agc_(false),
+      hardware_ns_(false),
+      low_latency_playout_(false),
+      low_latency_record_(false),
+      delay_estimate_in_milliseconds_(0) {
+  RTC_LOG(LS_INFO) << "ctor";
+  RTC_CHECK(j_environment_);
+  JNINativeMethod native_methods[] = {
+      {"nativeCacheAudioParameters", "(IIIZZZZZZZIIJ)V",
+       reinterpret_cast<void*>(&webrtc::AudioManager::CacheAudioParameters)}};
+  j_native_registration_ = j_environment_->RegisterNatives(
+      "org/webrtc/voiceengine/WebRtcAudioManager", native_methods,
+      arraysize(native_methods));
+  j_audio_manager_.reset(
+      new JavaAudioManager(j_native_registration_.get(),
+                           j_native_registration_->NewObject(
+                               "<init>", "(J)V", PointerTojlong(this))));
+}
+
+AudioManager::~AudioManager() {
+  RTC_LOG(LS_INFO) << "dtor";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  Close();
+}
+
+void AudioManager::SetActiveAudioLayer(
+    AudioDeviceModule::AudioLayer audio_layer) {
+  RTC_LOG(LS_INFO) << "SetActiveAudioLayer: " << audio_layer;
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  RTC_DCHECK(!initialized_);
+  // Store the currently utilized audio layer.
+  audio_layer_ = audio_layer;
+  // The delay estimate can take one of two fixed values depending on if the
+  // device supports low-latency output or not. However, it is also possible
+  // that the user explicitly selects the high-latency audio path, hence we use
+  // the selected `audio_layer` here to set the delay estimate.
+  delay_estimate_in_milliseconds_ =
+      (audio_layer == AudioDeviceModule::kAndroidJavaAudio)
+          ? kHighLatencyModeDelayEstimateInMilliseconds
+          : kLowLatencyModeDelayEstimateInMilliseconds;
+  RTC_LOG(LS_INFO) << "delay_estimate_in_milliseconds: "
+                   << delay_estimate_in_milliseconds_;
+}
+
+SLObjectItf AudioManager::GetOpenSLEngine() {
+  RTC_LOG(LS_INFO) << "GetOpenSLEngine";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  // Only allow usage of OpenSL ES if such an audio layer has been specified.
+  if (audio_layer_ != AudioDeviceModule::kAndroidOpenSLESAudio &&
+      audio_layer_ !=
+          AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio) {
+    RTC_LOG(LS_INFO)
+        << "Unable to create OpenSL engine for the current audio layer: "
+        << audio_layer_;
+    return nullptr;
+  }
+  // OpenSL ES for Android only supports a single engine per application.
+  // If one already has been created, return existing object instead of
+  // creating a new.
+  if (engine_object_.Get() != nullptr) {
+    RTC_LOG(LS_WARNING)
+        << "The OpenSL ES engine object has already been created";
+    return engine_object_.Get();
+  }
+  // Create the engine object in thread safe mode.
+  const SLEngineOption option[] = {
+      {SL_ENGINEOPTION_THREADSAFE, static_cast<SLuint32>(SL_BOOLEAN_TRUE)}};
+  SLresult result =
+      slCreateEngine(engine_object_.Receive(), 1, option, 0, NULL, NULL);
+  if (result != SL_RESULT_SUCCESS) {
+    RTC_LOG(LS_ERROR) << "slCreateEngine() failed: "
+                      << GetSLErrorString(result);
+    engine_object_.Reset();
+    return nullptr;
+  }
+  // Realize the SL Engine in synchronous mode.
+  result = engine_object_->Realize(engine_object_.Get(), SL_BOOLEAN_FALSE);
+  if (result != SL_RESULT_SUCCESS) {
+    RTC_LOG(LS_ERROR) << "Realize() failed: " << GetSLErrorString(result);
+    engine_object_.Reset();
+    return nullptr;
+  }
+  // Finally return the SLObjectItf interface of the engine object.
+  return engine_object_.Get();
+}
+
+bool AudioManager::Init() {
+  RTC_LOG(LS_INFO) << "Init";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  RTC_DCHECK(!initialized_);
+  RTC_DCHECK_NE(audio_layer_, AudioDeviceModule::kPlatformDefaultAudio);
+  if (!j_audio_manager_->Init()) {
+    RTC_LOG(LS_ERROR) << "Init() failed";
+    return false;
+  }
+  initialized_ = true;
+  return true;
+}
+
+bool AudioManager::Close() {
+  RTC_LOG(LS_INFO) << "Close";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  if (!initialized_)
+    return true;
+  j_audio_manager_->Close();
+  initialized_ = false;
+  return true;
+}
+
+bool AudioManager::IsCommunicationModeEnabled() const {
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  return j_audio_manager_->IsCommunicationModeEnabled();
+}
+
+bool AudioManager::IsAcousticEchoCancelerSupported() const {
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  return hardware_aec_;
+}
+
+bool AudioManager::IsAutomaticGainControlSupported() const {
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  return hardware_agc_;
+}
+
+bool AudioManager::IsNoiseSuppressorSupported() const {
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  return hardware_ns_;
+}
+
+bool AudioManager::IsLowLatencyPlayoutSupported() const {
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  // Some devices are blacklisted for usage of OpenSL ES even if they report
+  // that low-latency playout is supported. See b/21485703 for details.
+  return j_audio_manager_->IsDeviceBlacklistedForOpenSLESUsage()
+             ? false
+             : low_latency_playout_;
+}
+
+bool AudioManager::IsLowLatencyRecordSupported() const {
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  return low_latency_record_;
+}
+
+bool AudioManager::IsProAudioSupported() const {
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  // TODO(henrika): return the state independently of if OpenSL ES is
+  // blacklisted or not for now. We could use the same approach as in
+  // IsLowLatencyPlayoutSupported() but I can't see the need for it yet.
+  return pro_audio_;
+}
+
+// TODO(henrika): improve comments...
+bool AudioManager::IsAAudioSupported() const {
+#if defined(WEBRTC_AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
+  return a_audio_;
+#else
+  return false;
+#endif
+}
+
+bool AudioManager::IsStereoPlayoutSupported() const {
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  return (playout_parameters_.channels() == 2);
+}
+
+bool AudioManager::IsStereoRecordSupported() const {
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  return (record_parameters_.channels() == 2);
+}
+
+int AudioManager::GetDelayEstimateInMilliseconds() const {
+  return delay_estimate_in_milliseconds_;
+}
+
+JNI_FUNCTION_ALIGN
+void JNICALL AudioManager::CacheAudioParameters(JNIEnv* env,
+                                                jobject obj,
+                                                jint sample_rate,
+                                                jint output_channels,
+                                                jint input_channels,
+                                                jboolean hardware_aec,
+                                                jboolean hardware_agc,
+                                                jboolean hardware_ns,
+                                                jboolean low_latency_output,
+                                                jboolean low_latency_input,
+                                                jboolean pro_audio,
+                                                jboolean a_audio,
+                                                jint output_buffer_size,
+                                                jint input_buffer_size,
+                                                jlong native_audio_manager) {
+  webrtc::AudioManager* this_object =
+      reinterpret_cast<webrtc::AudioManager*>(native_audio_manager);
+  this_object->OnCacheAudioParameters(
+      env, sample_rate, output_channels, input_channels, hardware_aec,
+      hardware_agc, hardware_ns, low_latency_output, low_latency_input,
+      pro_audio, a_audio, output_buffer_size, input_buffer_size);
+}
+
+void AudioManager::OnCacheAudioParameters(JNIEnv* env,
+                                          jint sample_rate,
+                                          jint output_channels,
+                                          jint input_channels,
+                                          jboolean hardware_aec,
+                                          jboolean hardware_agc,
+                                          jboolean hardware_ns,
+                                          jboolean low_latency_output,
+                                          jboolean low_latency_input,
+                                          jboolean pro_audio,
+                                          jboolean a_audio,
+                                          jint output_buffer_size,
+                                          jint input_buffer_size) {
+  RTC_LOG(LS_INFO)
+      << "OnCacheAudioParameters: "
+         "hardware_aec: "
+      << static_cast<bool>(hardware_aec)
+      << ", hardware_agc: " << static_cast<bool>(hardware_agc)
+      << ", hardware_ns: " << static_cast<bool>(hardware_ns)
+      << ", low_latency_output: " << static_cast<bool>(low_latency_output)
+      << ", low_latency_input: " << static_cast<bool>(low_latency_input)
+      << ", pro_audio: " << static_cast<bool>(pro_audio)
+      << ", a_audio: " << static_cast<bool>(a_audio)
+      << ", sample_rate: " << static_cast<int>(sample_rate)
+      << ", output_channels: " << static_cast<int>(output_channels)
+      << ", input_channels: " << static_cast<int>(input_channels)
+      << ", output_buffer_size: " << static_cast<int>(output_buffer_size)
+      << ", input_buffer_size: " << static_cast<int>(input_buffer_size);
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  hardware_aec_ = hardware_aec;
+  hardware_agc_ = hardware_agc;
+  hardware_ns_ = hardware_ns;
+  low_latency_playout_ = low_latency_output;
+  low_latency_record_ = low_latency_input;
+  pro_audio_ = pro_audio;
+  a_audio_ = a_audio;
+  playout_parameters_.reset(sample_rate, static_cast<size_t>(output_channels),
+                            static_cast<size_t>(output_buffer_size));
+  record_parameters_.reset(sample_rate, static_cast<size_t>(input_channels),
+                           static_cast<size_t>(input_buffer_size));
+}
+
+const AudioParameters& AudioManager::GetPlayoutAudioParameters() {
+  RTC_CHECK(playout_parameters_.is_valid());
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  return playout_parameters_;
+}
+
+const AudioParameters& AudioManager::GetRecordAudioParameters() {
+  RTC_CHECK(record_parameters_.is_valid());
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  return record_parameters_;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_device/android/audio_manager.h b/modules/audio_device/android/audio_manager.h
new file mode 100644
index 0000000..900fc78
--- /dev/null
+++ b/modules/audio_device/android/audio_manager.h
@@ -0,0 +1,225 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_ANDROID_AUDIO_MANAGER_H_
+#define MODULES_AUDIO_DEVICE_ANDROID_AUDIO_MANAGER_H_
+
+#include <SLES/OpenSLES.h>
+#include <jni.h>
+
+#include <memory>
+
+#include "api/sequence_checker.h"
+#include "modules/audio_device/android/audio_common.h"
+#include "modules/audio_device/android/opensles_common.h"
+#include "modules/audio_device/audio_device_config.h"
+#include "modules/audio_device/audio_device_generic.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "modules/utility/include/helpers_android.h"
+#include "modules/utility/include/jvm_android.h"
+
+namespace webrtc {
+
+// Implements support for functions in the WebRTC audio stack for Android that
+// relies on the AudioManager in android.media. It also populates an
+// AudioParameter structure with native audio parameters detected at
+// construction. This class does not make any audio-related modifications
+// unless Init() is called. Caching audio parameters makes no changes but only
+// reads data from the Java side.
+class AudioManager {
+ public:
+  // Wraps the Java specific parts of the AudioManager into one helper class.
+  // Stores method IDs for all supported methods at construction and then
+  // allows calls like JavaAudioManager::Close() while hiding the Java/JNI
+  // parts that are associated with this call.
+  class JavaAudioManager {
+   public:
+    JavaAudioManager(NativeRegistration* native_registration,
+                     std::unique_ptr<GlobalRef> audio_manager);
+    ~JavaAudioManager();
+
+    bool Init();
+    void Close();
+    bool IsCommunicationModeEnabled();
+    bool IsDeviceBlacklistedForOpenSLESUsage();
+
+   private:
+    std::unique_ptr<GlobalRef> audio_manager_;
+    jmethodID init_;
+    jmethodID dispose_;
+    jmethodID is_communication_mode_enabled_;
+    jmethodID is_device_blacklisted_for_open_sles_usage_;
+  };
+
+  AudioManager();
+  ~AudioManager();
+
+  // Sets the currently active audio layer combination. Must be called before
+  // Init().
+  void SetActiveAudioLayer(AudioDeviceModule::AudioLayer audio_layer);
+
+  // Creates and realizes the main (global) Open SL engine object and returns
+  // a reference to it. The engine object is only created at the first call
+  // since OpenSL ES for Android only supports a single engine per application.
+  // Subsequent calls returns the already created engine. The SL engine object
+  // is destroyed when the AudioManager object is deleted. It means that the
+  // engine object will be the first OpenSL ES object to be created and last
+  // object to be destroyed.
+  // Note that NULL will be returned unless the audio layer is specified as
+  // AudioDeviceModule::kAndroidOpenSLESAudio or
+  // AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio.
+  SLObjectItf GetOpenSLEngine();
+
+  // Initializes the audio manager and stores the current audio mode.
+  bool Init();
+  // Revert any setting done by Init().
+  bool Close();
+
+  // Returns true if current audio mode is AudioManager.MODE_IN_COMMUNICATION.
+  bool IsCommunicationModeEnabled() const;
+
+  // Native audio parameters stored during construction.
+  const AudioParameters& GetPlayoutAudioParameters();
+  const AudioParameters& GetRecordAudioParameters();
+
+  // Returns true if the device supports built-in audio effects for AEC, AGC
+  // and NS. Some devices can also be blacklisted for use in combination with
+  // platform effects and these devices will return false.
+  // Can currently only be used in combination with a Java based audio backend
+  // for the recoring side (i.e. using the android.media.AudioRecord API).
+  bool IsAcousticEchoCancelerSupported() const;
+  bool IsAutomaticGainControlSupported() const;
+  bool IsNoiseSuppressorSupported() const;
+
+  // Returns true if the device supports the low-latency audio paths in
+  // combination with OpenSL ES.
+  bool IsLowLatencyPlayoutSupported() const;
+  bool IsLowLatencyRecordSupported() const;
+
+  // Returns true if the device supports (and has been configured for) stereo.
+  // Call the Java API WebRtcAudioManager.setStereoOutput/Input() with true as
+  // paramter to enable stereo. Default is mono in both directions and the
+  // setting is set once and for all when the audio manager object is created.
+  // TODO(henrika): stereo is not supported in combination with OpenSL ES.
+  bool IsStereoPlayoutSupported() const;
+  bool IsStereoRecordSupported() const;
+
+  // Returns true if the device supports pro-audio features in combination with
+  // OpenSL ES.
+  bool IsProAudioSupported() const;
+
+  // Returns true if the device supports AAudio.
+  bool IsAAudioSupported() const;
+
+  // Returns the estimated total delay of this device. Unit is in milliseconds.
+  // The vaule is set once at construction and never changes after that.
+  // Possible values are webrtc::kLowLatencyModeDelayEstimateInMilliseconds and
+  // webrtc::kHighLatencyModeDelayEstimateInMilliseconds.
+  int GetDelayEstimateInMilliseconds() const;
+
+ private:
+  // Called from Java side so we can cache the native audio parameters.
+  // This method will be called by the WebRtcAudioManager constructor, i.e.
+  // on the same thread that this object is created on.
+  static void JNICALL CacheAudioParameters(JNIEnv* env,
+                                           jobject obj,
+                                           jint sample_rate,
+                                           jint output_channels,
+                                           jint input_channels,
+                                           jboolean hardware_aec,
+                                           jboolean hardware_agc,
+                                           jboolean hardware_ns,
+                                           jboolean low_latency_output,
+                                           jboolean low_latency_input,
+                                           jboolean pro_audio,
+                                           jboolean a_audio,
+                                           jint output_buffer_size,
+                                           jint input_buffer_size,
+                                           jlong native_audio_manager);
+  void OnCacheAudioParameters(JNIEnv* env,
+                              jint sample_rate,
+                              jint output_channels,
+                              jint input_channels,
+                              jboolean hardware_aec,
+                              jboolean hardware_agc,
+                              jboolean hardware_ns,
+                              jboolean low_latency_output,
+                              jboolean low_latency_input,
+                              jboolean pro_audio,
+                              jboolean a_audio,
+                              jint output_buffer_size,
+                              jint input_buffer_size);
+
+  // Stores thread ID in the constructor.
+  // We can then use RTC_DCHECK_RUN_ON(&thread_checker_) to ensure that
+  // other methods are called from the same thread.
+  SequenceChecker thread_checker_;
+
+  // Calls JavaVM::AttachCurrentThread() if this thread is not attached at
+  // construction.
+  // Also ensures that DetachCurrentThread() is called at destruction.
+  JvmThreadConnector attach_thread_if_needed_;
+
+  // Wraps the JNI interface pointer and methods associated with it.
+  std::unique_ptr<JNIEnvironment> j_environment_;
+
+  // Contains factory method for creating the Java object.
+  std::unique_ptr<NativeRegistration> j_native_registration_;
+
+  // Wraps the Java specific parts of the AudioManager.
+  std::unique_ptr<AudioManager::JavaAudioManager> j_audio_manager_;
+
+  // Contains the selected audio layer specified by the AudioLayer enumerator
+  // in the AudioDeviceModule class.
+  AudioDeviceModule::AudioLayer audio_layer_;
+
+  // This object is the global entry point of the OpenSL ES API.
+  // After creating the engine object, the application can obtain this object‘s
+  // SLEngineItf interface. This interface contains creation methods for all
+  // the other object types in the API. None of these interface are realized
+  // by this class. It only provides access to the global engine object.
+  webrtc::ScopedSLObjectItf engine_object_;
+
+  // Set to true by Init() and false by Close().
+  bool initialized_;
+
+  // True if device supports hardware (or built-in) AEC.
+  bool hardware_aec_;
+  // True if device supports hardware (or built-in) AGC.
+  bool hardware_agc_;
+  // True if device supports hardware (or built-in) NS.
+  bool hardware_ns_;
+
+  // True if device supports the low-latency OpenSL ES audio path for output.
+  bool low_latency_playout_;
+
+  // True if device supports the low-latency OpenSL ES audio path for input.
+  bool low_latency_record_;
+
+  // True if device supports the low-latency OpenSL ES pro-audio path.
+  bool pro_audio_;
+
+  // True if device supports the low-latency AAudio audio path.
+  bool a_audio_;
+
+  // The delay estimate can take one of two fixed values depending on if the
+  // device supports low-latency output or not.
+  int delay_estimate_in_milliseconds_;
+
+  // Contains native parameters (e.g. sample rate, channel configuration).
+  // Set at construction in OnCacheAudioParameters() which is called from
+  // Java on the same thread as this object is created on.
+  AudioParameters playout_parameters_;
+  AudioParameters record_parameters_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_DEVICE_ANDROID_AUDIO_MANAGER_H_
diff --git a/modules/audio_device/android/audio_manager_unittest.cc b/modules/audio_device/android/audio_manager_unittest.cc
new file mode 100644
index 0000000..093eddd
--- /dev/null
+++ b/modules/audio_device/android/audio_manager_unittest.cc
@@ -0,0 +1,239 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/android/audio_manager.h"
+
+#include <SLES/OpenSLES_Android.h>
+
+#include "modules/audio_device/android/build_info.h"
+#include "modules/audio_device/android/ensure_initialized.h"
+#include "rtc_base/arraysize.h"
+#include "test/gtest.h"
+
+#define PRINT(...) fprintf(stderr, __VA_ARGS__);
+
+namespace webrtc {
+
+static const char kTag[] = "  ";
+
+class AudioManagerTest : public ::testing::Test {
+ protected:
+  AudioManagerTest() {
+    // One-time initialization of JVM and application context. Ensures that we
+    // can do calls between C++ and Java.
+    webrtc::audiodevicemodule::EnsureInitialized();
+    audio_manager_.reset(new AudioManager());
+    SetActiveAudioLayer();
+    playout_parameters_ = audio_manager()->GetPlayoutAudioParameters();
+    record_parameters_ = audio_manager()->GetRecordAudioParameters();
+  }
+
+  AudioManager* audio_manager() const { return audio_manager_.get(); }
+
+  // A valid audio layer must always be set before calling Init(), hence we
+  // might as well make it a part of the test fixture.
+  void SetActiveAudioLayer() {
+    EXPECT_EQ(0, audio_manager()->GetDelayEstimateInMilliseconds());
+    audio_manager()->SetActiveAudioLayer(AudioDeviceModule::kAndroidJavaAudio);
+    EXPECT_NE(0, audio_manager()->GetDelayEstimateInMilliseconds());
+  }
+
+  // One way to ensure that the engine object is valid is to create an
+  // SL Engine interface since it exposes creation methods of all the OpenSL ES
+  // object types and it is only supported on the engine object. This method
+  // also verifies that the engine interface supports at least one interface.
+  // Note that, the test below is not a full test of the SLEngineItf object
+  // but only a simple sanity test to check that the global engine object is OK.
+  void ValidateSLEngine(SLObjectItf engine_object) {
+    EXPECT_NE(nullptr, engine_object);
+    // Get the SL Engine interface which is exposed by the engine object.
+    SLEngineItf engine;
+    SLresult result =
+        (*engine_object)->GetInterface(engine_object, SL_IID_ENGINE, &engine);
+    EXPECT_EQ(result, SL_RESULT_SUCCESS) << "GetInterface() on engine failed";
+    // Ensure that the SL Engine interface exposes at least one interface.
+    SLuint32 object_id = SL_OBJECTID_ENGINE;
+    SLuint32 num_supported_interfaces = 0;
+    result = (*engine)->QueryNumSupportedInterfaces(engine, object_id,
+                                                    &num_supported_interfaces);
+    EXPECT_EQ(result, SL_RESULT_SUCCESS)
+        << "QueryNumSupportedInterfaces() failed";
+    EXPECT_GE(num_supported_interfaces, 1u);
+  }
+
+  std::unique_ptr<AudioManager> audio_manager_;
+  AudioParameters playout_parameters_;
+  AudioParameters record_parameters_;
+};
+
+TEST_F(AudioManagerTest, ConstructDestruct) {}
+
+// It should not be possible to create an OpenSL engine object if Java based
+// audio is requested in both directions.
+TEST_F(AudioManagerTest, GetOpenSLEngineShouldFailForJavaAudioLayer) {
+  audio_manager()->SetActiveAudioLayer(AudioDeviceModule::kAndroidJavaAudio);
+  SLObjectItf engine_object = audio_manager()->GetOpenSLEngine();
+  EXPECT_EQ(nullptr, engine_object);
+}
+
+// It should be possible to create an OpenSL engine object if OpenSL ES based
+// audio is requested in any direction.
+TEST_F(AudioManagerTest, GetOpenSLEngineShouldSucceedForOpenSLESAudioLayer) {
+  // List of supported audio layers that uses OpenSL ES audio.
+  const AudioDeviceModule::AudioLayer opensles_audio[] = {
+      AudioDeviceModule::kAndroidOpenSLESAudio,
+      AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio};
+  // Verify that the global (singleton) OpenSL Engine can be acquired for all
+  // audio layes that uses OpenSL ES. Note that the engine is only created once.
+  for (const AudioDeviceModule::AudioLayer audio_layer : opensles_audio) {
+    audio_manager()->SetActiveAudioLayer(audio_layer);
+    SLObjectItf engine_object = audio_manager()->GetOpenSLEngine();
+    EXPECT_NE(nullptr, engine_object);
+    // Perform a simple sanity check of the created engine object.
+    ValidateSLEngine(engine_object);
+  }
+}
+
+TEST_F(AudioManagerTest, InitClose) {
+  EXPECT_TRUE(audio_manager()->Init());
+  EXPECT_TRUE(audio_manager()->Close());
+}
+
+TEST_F(AudioManagerTest, IsAcousticEchoCancelerSupported) {
+  PRINT("%sAcoustic Echo Canceler support: %s\n", kTag,
+        audio_manager()->IsAcousticEchoCancelerSupported() ? "Yes" : "No");
+}
+
+TEST_F(AudioManagerTest, IsAutomaticGainControlSupported) {
+  EXPECT_FALSE(audio_manager()->IsAutomaticGainControlSupported());
+}
+
+TEST_F(AudioManagerTest, IsNoiseSuppressorSupported) {
+  PRINT("%sNoise Suppressor support: %s\n", kTag,
+        audio_manager()->IsNoiseSuppressorSupported() ? "Yes" : "No");
+}
+
+TEST_F(AudioManagerTest, IsLowLatencyPlayoutSupported) {
+  PRINT("%sLow latency output support: %s\n", kTag,
+        audio_manager()->IsLowLatencyPlayoutSupported() ? "Yes" : "No");
+}
+
+TEST_F(AudioManagerTest, IsLowLatencyRecordSupported) {
+  PRINT("%sLow latency input support: %s\n", kTag,
+        audio_manager()->IsLowLatencyRecordSupported() ? "Yes" : "No");
+}
+
+TEST_F(AudioManagerTest, IsProAudioSupported) {
+  PRINT("%sPro audio support: %s\n", kTag,
+        audio_manager()->IsProAudioSupported() ? "Yes" : "No");
+}
+
+// Verify that playout side is configured for mono by default.
+TEST_F(AudioManagerTest, IsStereoPlayoutSupported) {
+  EXPECT_FALSE(audio_manager()->IsStereoPlayoutSupported());
+}
+
+// Verify that recording side is configured for mono by default.
+TEST_F(AudioManagerTest, IsStereoRecordSupported) {
+  EXPECT_FALSE(audio_manager()->IsStereoRecordSupported());
+}
+
+TEST_F(AudioManagerTest, ShowAudioParameterInfo) {
+  const bool low_latency_out = audio_manager()->IsLowLatencyPlayoutSupported();
+  const bool low_latency_in = audio_manager()->IsLowLatencyRecordSupported();
+  PRINT("PLAYOUT:\n");
+  PRINT("%saudio layer: %s\n", kTag,
+        low_latency_out ? "Low latency OpenSL" : "Java/JNI based AudioTrack");
+  PRINT("%ssample rate: %d Hz\n", kTag, playout_parameters_.sample_rate());
+  PRINT("%schannels: %zu\n", kTag, playout_parameters_.channels());
+  PRINT("%sframes per buffer: %zu <=> %.2f ms\n", kTag,
+        playout_parameters_.frames_per_buffer(),
+        playout_parameters_.GetBufferSizeInMilliseconds());
+  PRINT("RECORD: \n");
+  PRINT("%saudio layer: %s\n", kTag,
+        low_latency_in ? "Low latency OpenSL" : "Java/JNI based AudioRecord");
+  PRINT("%ssample rate: %d Hz\n", kTag, record_parameters_.sample_rate());
+  PRINT("%schannels: %zu\n", kTag, record_parameters_.channels());
+  PRINT("%sframes per buffer: %zu <=> %.2f ms\n", kTag,
+        record_parameters_.frames_per_buffer(),
+        record_parameters_.GetBufferSizeInMilliseconds());
+}
+
+// The audio device module only suppors the same sample rate in both directions.
+// In addition, in full-duplex low-latency mode (OpenSL ES), both input and
+// output must use the same native buffer size to allow for usage of the fast
+// audio track in Android.
+TEST_F(AudioManagerTest, VerifyAudioParameters) {
+  const bool low_latency_out = audio_manager()->IsLowLatencyPlayoutSupported();
+  const bool low_latency_in = audio_manager()->IsLowLatencyRecordSupported();
+  EXPECT_EQ(playout_parameters_.sample_rate(),
+            record_parameters_.sample_rate());
+  if (low_latency_out && low_latency_in) {
+    EXPECT_EQ(playout_parameters_.frames_per_buffer(),
+              record_parameters_.frames_per_buffer());
+  }
+}
+
+// Add device-specific information to the test for logging purposes.
+TEST_F(AudioManagerTest, ShowDeviceInfo) {
+  BuildInfo build_info;
+  PRINT("%smodel: %s\n", kTag, build_info.GetDeviceModel().c_str());
+  PRINT("%sbrand: %s\n", kTag, build_info.GetBrand().c_str());
+  PRINT("%smanufacturer: %s\n", kTag,
+        build_info.GetDeviceManufacturer().c_str());
+}
+
+// Add Android build information to the test for logging purposes.
+TEST_F(AudioManagerTest, ShowBuildInfo) {
+  BuildInfo build_info;
+  PRINT("%sbuild release: %s\n", kTag, build_info.GetBuildRelease().c_str());
+  PRINT("%sbuild id: %s\n", kTag, build_info.GetAndroidBuildId().c_str());
+  PRINT("%sbuild type: %s\n", kTag, build_info.GetBuildType().c_str());
+  PRINT("%sSDK version: %d\n", kTag, build_info.GetSdkVersion());
+}
+
+// Basic test of the AudioParameters class using default construction where
+// all members are set to zero.
+TEST_F(AudioManagerTest, AudioParametersWithDefaultConstruction) {
+  AudioParameters params;
+  EXPECT_FALSE(params.is_valid());
+  EXPECT_EQ(0, params.sample_rate());
+  EXPECT_EQ(0U, params.channels());
+  EXPECT_EQ(0U, params.frames_per_buffer());
+  EXPECT_EQ(0U, params.frames_per_10ms_buffer());
+  EXPECT_EQ(0U, params.GetBytesPerFrame());
+  EXPECT_EQ(0U, params.GetBytesPerBuffer());
+  EXPECT_EQ(0U, params.GetBytesPer10msBuffer());
+  EXPECT_EQ(0.0f, params.GetBufferSizeInMilliseconds());
+}
+
+// Basic test of the AudioParameters class using non default construction.
+TEST_F(AudioManagerTest, AudioParametersWithNonDefaultConstruction) {
+  const int kSampleRate = 48000;
+  const size_t kChannels = 1;
+  const size_t kFramesPerBuffer = 480;
+  const size_t kFramesPer10msBuffer = 480;
+  const size_t kBytesPerFrame = 2;
+  const float kBufferSizeInMs = 10.0f;
+  AudioParameters params(kSampleRate, kChannels, kFramesPerBuffer);
+  EXPECT_TRUE(params.is_valid());
+  EXPECT_EQ(kSampleRate, params.sample_rate());
+  EXPECT_EQ(kChannels, params.channels());
+  EXPECT_EQ(kFramesPerBuffer, params.frames_per_buffer());
+  EXPECT_EQ(static_cast<size_t>(kSampleRate / 100),
+            params.frames_per_10ms_buffer());
+  EXPECT_EQ(kBytesPerFrame, params.GetBytesPerFrame());
+  EXPECT_EQ(kBytesPerFrame * kFramesPerBuffer, params.GetBytesPerBuffer());
+  EXPECT_EQ(kBytesPerFrame * kFramesPer10msBuffer,
+            params.GetBytesPer10msBuffer());
+  EXPECT_EQ(kBufferSizeInMs, params.GetBufferSizeInMilliseconds());
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_device/android/audio_record_jni.cc b/modules/audio_device/android/audio_record_jni.cc
new file mode 100644
index 0000000..919eabb
--- /dev/null
+++ b/modules/audio_device/android/audio_record_jni.cc
@@ -0,0 +1,280 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/android/audio_record_jni.h"
+
+#include <string>
+#include <utility>
+
+#include "modules/audio_device/android/audio_common.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/time_utils.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+
+namespace {
+// Scoped class which logs its time of life as a UMA statistic. It generates
+// a histogram which measures the time it takes for a method/scope to execute.
+class ScopedHistogramTimer {
+ public:
+  explicit ScopedHistogramTimer(const std::string& name)
+      : histogram_name_(name), start_time_ms_(rtc::TimeMillis()) {}
+  ~ScopedHistogramTimer() {
+    const int64_t life_time_ms = rtc::TimeSince(start_time_ms_);
+    RTC_HISTOGRAM_COUNTS_1000(histogram_name_, life_time_ms);
+    RTC_LOG(LS_INFO) << histogram_name_ << ": " << life_time_ms;
+  }
+
+ private:
+  const std::string histogram_name_;
+  int64_t start_time_ms_;
+};
+}  // namespace
+
+// AudioRecordJni::JavaAudioRecord implementation.
+AudioRecordJni::JavaAudioRecord::JavaAudioRecord(
+    NativeRegistration* native_reg,
+    std::unique_ptr<GlobalRef> audio_record)
+    : audio_record_(std::move(audio_record)),
+      init_recording_(native_reg->GetMethodId("initRecording", "(II)I")),
+      start_recording_(native_reg->GetMethodId("startRecording", "()Z")),
+      stop_recording_(native_reg->GetMethodId("stopRecording", "()Z")),
+      enable_built_in_aec_(native_reg->GetMethodId("enableBuiltInAEC", "(Z)Z")),
+      enable_built_in_ns_(native_reg->GetMethodId("enableBuiltInNS", "(Z)Z")) {}
+
+AudioRecordJni::JavaAudioRecord::~JavaAudioRecord() {}
+
+int AudioRecordJni::JavaAudioRecord::InitRecording(int sample_rate,
+                                                   size_t channels) {
+  return audio_record_->CallIntMethod(init_recording_,
+                                      static_cast<jint>(sample_rate),
+                                      static_cast<jint>(channels));
+}
+
+bool AudioRecordJni::JavaAudioRecord::StartRecording() {
+  return audio_record_->CallBooleanMethod(start_recording_);
+}
+
+bool AudioRecordJni::JavaAudioRecord::StopRecording() {
+  return audio_record_->CallBooleanMethod(stop_recording_);
+}
+
+bool AudioRecordJni::JavaAudioRecord::EnableBuiltInAEC(bool enable) {
+  return audio_record_->CallBooleanMethod(enable_built_in_aec_,
+                                          static_cast<jboolean>(enable));
+}
+
+bool AudioRecordJni::JavaAudioRecord::EnableBuiltInNS(bool enable) {
+  return audio_record_->CallBooleanMethod(enable_built_in_ns_,
+                                          static_cast<jboolean>(enable));
+}
+
+// AudioRecordJni implementation.
+AudioRecordJni::AudioRecordJni(AudioManager* audio_manager)
+    : j_environment_(JVM::GetInstance()->environment()),
+      audio_manager_(audio_manager),
+      audio_parameters_(audio_manager->GetRecordAudioParameters()),
+      total_delay_in_milliseconds_(0),
+      direct_buffer_address_(nullptr),
+      direct_buffer_capacity_in_bytes_(0),
+      frames_per_buffer_(0),
+      initialized_(false),
+      recording_(false),
+      audio_device_buffer_(nullptr) {
+  RTC_LOG(LS_INFO) << "ctor";
+  RTC_DCHECK(audio_parameters_.is_valid());
+  RTC_CHECK(j_environment_);
+  JNINativeMethod native_methods[] = {
+      {"nativeCacheDirectBufferAddress", "(Ljava/nio/ByteBuffer;J)V",
+       reinterpret_cast<void*>(
+           &webrtc::AudioRecordJni::CacheDirectBufferAddress)},
+      {"nativeDataIsRecorded", "(IJ)V",
+       reinterpret_cast<void*>(&webrtc::AudioRecordJni::DataIsRecorded)}};
+  j_native_registration_ = j_environment_->RegisterNatives(
+      "org/webrtc/voiceengine/WebRtcAudioRecord", native_methods,
+      arraysize(native_methods));
+  j_audio_record_.reset(
+      new JavaAudioRecord(j_native_registration_.get(),
+                          j_native_registration_->NewObject(
+                              "<init>", "(J)V", PointerTojlong(this))));
+  // Detach from this thread since we want to use the checker to verify calls
+  // from the Java based audio thread.
+  thread_checker_java_.Detach();
+}
+
+AudioRecordJni::~AudioRecordJni() {
+  RTC_LOG(LS_INFO) << "dtor";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  Terminate();
+}
+
+int32_t AudioRecordJni::Init() {
+  RTC_LOG(LS_INFO) << "Init";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  return 0;
+}
+
+int32_t AudioRecordJni::Terminate() {
+  RTC_LOG(LS_INFO) << "Terminate";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  StopRecording();
+  return 0;
+}
+
+int32_t AudioRecordJni::InitRecording() {
+  RTC_LOG(LS_INFO) << "InitRecording";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  RTC_DCHECK(!initialized_);
+  RTC_DCHECK(!recording_);
+  ScopedHistogramTimer timer("WebRTC.Audio.InitRecordingDurationMs");
+  int frames_per_buffer = j_audio_record_->InitRecording(
+      audio_parameters_.sample_rate(), audio_parameters_.channels());
+  if (frames_per_buffer < 0) {
+    direct_buffer_address_ = nullptr;
+    RTC_LOG(LS_ERROR) << "InitRecording failed";
+    return -1;
+  }
+  frames_per_buffer_ = static_cast<size_t>(frames_per_buffer);
+  RTC_LOG(LS_INFO) << "frames_per_buffer: " << frames_per_buffer_;
+  const size_t bytes_per_frame = audio_parameters_.channels() * sizeof(int16_t);
+  RTC_CHECK_EQ(direct_buffer_capacity_in_bytes_,
+               frames_per_buffer_ * bytes_per_frame);
+  RTC_CHECK_EQ(frames_per_buffer_, audio_parameters_.frames_per_10ms_buffer());
+  initialized_ = true;
+  return 0;
+}
+
+int32_t AudioRecordJni::StartRecording() {
+  RTC_LOG(LS_INFO) << "StartRecording";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  RTC_DCHECK(!recording_);
+  if (!initialized_) {
+    RTC_DLOG(LS_WARNING)
+        << "Recording can not start since InitRecording must succeed first";
+    return 0;
+  }
+  ScopedHistogramTimer timer("WebRTC.Audio.StartRecordingDurationMs");
+  if (!j_audio_record_->StartRecording()) {
+    RTC_LOG(LS_ERROR) << "StartRecording failed";
+    return -1;
+  }
+  recording_ = true;
+  return 0;
+}
+
+int32_t AudioRecordJni::StopRecording() {
+  RTC_LOG(LS_INFO) << "StopRecording";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  if (!initialized_ || !recording_) {
+    return 0;
+  }
+  if (!j_audio_record_->StopRecording()) {
+    RTC_LOG(LS_ERROR) << "StopRecording failed";
+    return -1;
+  }
+  // If we don't detach here, we will hit a RTC_DCHECK in OnDataIsRecorded()
+  // next time StartRecording() is called since it will create a new Java
+  // thread.
+  thread_checker_java_.Detach();
+  initialized_ = false;
+  recording_ = false;
+  direct_buffer_address_ = nullptr;
+  return 0;
+}
+
+void AudioRecordJni::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+  RTC_LOG(LS_INFO) << "AttachAudioBuffer";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  audio_device_buffer_ = audioBuffer;
+  const int sample_rate_hz = audio_parameters_.sample_rate();
+  RTC_LOG(LS_INFO) << "SetRecordingSampleRate(" << sample_rate_hz << ")";
+  audio_device_buffer_->SetRecordingSampleRate(sample_rate_hz);
+  const size_t channels = audio_parameters_.channels();
+  RTC_LOG(LS_INFO) << "SetRecordingChannels(" << channels << ")";
+  audio_device_buffer_->SetRecordingChannels(channels);
+  total_delay_in_milliseconds_ =
+      audio_manager_->GetDelayEstimateInMilliseconds();
+  RTC_DCHECK_GT(total_delay_in_milliseconds_, 0);
+  RTC_LOG(LS_INFO) << "total_delay_in_milliseconds: "
+                   << total_delay_in_milliseconds_;
+}
+
+int32_t AudioRecordJni::EnableBuiltInAEC(bool enable) {
+  RTC_LOG(LS_INFO) << "EnableBuiltInAEC(" << enable << ")";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  return j_audio_record_->EnableBuiltInAEC(enable) ? 0 : -1;
+}
+
+int32_t AudioRecordJni::EnableBuiltInAGC(bool enable) {
+  // TODO(henrika): possibly remove when no longer used by any client.
+  RTC_CHECK_NOTREACHED();
+}
+
+int32_t AudioRecordJni::EnableBuiltInNS(bool enable) {
+  RTC_LOG(LS_INFO) << "EnableBuiltInNS(" << enable << ")";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  return j_audio_record_->EnableBuiltInNS(enable) ? 0 : -1;
+}
+
+JNI_FUNCTION_ALIGN
+void JNICALL AudioRecordJni::CacheDirectBufferAddress(JNIEnv* env,
+                                                      jobject obj,
+                                                      jobject byte_buffer,
+                                                      jlong nativeAudioRecord) {
+  webrtc::AudioRecordJni* this_object =
+      reinterpret_cast<webrtc::AudioRecordJni*>(nativeAudioRecord);
+  this_object->OnCacheDirectBufferAddress(env, byte_buffer);
+}
+
+void AudioRecordJni::OnCacheDirectBufferAddress(JNIEnv* env,
+                                                jobject byte_buffer) {
+  RTC_LOG(LS_INFO) << "OnCacheDirectBufferAddress";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  RTC_DCHECK(!direct_buffer_address_);
+  direct_buffer_address_ = env->GetDirectBufferAddress(byte_buffer);
+  jlong capacity = env->GetDirectBufferCapacity(byte_buffer);
+  RTC_LOG(LS_INFO) << "direct buffer capacity: " << capacity;
+  direct_buffer_capacity_in_bytes_ = static_cast<size_t>(capacity);
+}
+
+JNI_FUNCTION_ALIGN
+void JNICALL AudioRecordJni::DataIsRecorded(JNIEnv* env,
+                                            jobject obj,
+                                            jint length,
+                                            jlong nativeAudioRecord) {
+  webrtc::AudioRecordJni* this_object =
+      reinterpret_cast<webrtc::AudioRecordJni*>(nativeAudioRecord);
+  this_object->OnDataIsRecorded(length);
+}
+
+// This method is called on a high-priority thread from Java. The name of
+// the thread is 'AudioRecordThread'.
+void AudioRecordJni::OnDataIsRecorded(int length) {
+  RTC_DCHECK(thread_checker_java_.IsCurrent());
+  if (!audio_device_buffer_) {
+    RTC_LOG(LS_ERROR) << "AttachAudioBuffer has not been called";
+    return;
+  }
+  audio_device_buffer_->SetRecordedBuffer(direct_buffer_address_,
+                                          frames_per_buffer_);
+  // We provide one (combined) fixed delay estimate for the APM and use the
+  // `playDelayMs` parameter only. Components like the AEC only sees the sum
+  // of `playDelayMs` and `recDelayMs`, hence the distributions does not matter.
+  audio_device_buffer_->SetVQEData(total_delay_in_milliseconds_, 0);
+  if (audio_device_buffer_->DeliverRecordedData() == -1) {
+    RTC_LOG(LS_INFO) << "AudioDeviceBuffer::DeliverRecordedData failed";
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_device/android/audio_record_jni.h b/modules/audio_device/android/audio_record_jni.h
new file mode 100644
index 0000000..66a6a89
--- /dev/null
+++ b/modules/audio_device/android/audio_record_jni.h
@@ -0,0 +1,168 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_ANDROID_AUDIO_RECORD_JNI_H_
+#define MODULES_AUDIO_DEVICE_ANDROID_AUDIO_RECORD_JNI_H_
+
+#include <jni.h>
+
+#include <memory>
+
+#include "api/sequence_checker.h"
+#include "modules/audio_device/android/audio_manager.h"
+#include "modules/audio_device/audio_device_generic.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "modules/utility/include/helpers_android.h"
+#include "modules/utility/include/jvm_android.h"
+
+namespace webrtc {
+
+// Implements 16-bit mono PCM audio input support for Android using the Java
+// AudioRecord interface. Most of the work is done by its Java counterpart in
+// WebRtcAudioRecord.java. This class is created and lives on a thread in
+// C++-land, but recorded audio buffers are delivered on a high-priority
+// thread managed by the Java class.
+//
+// The Java class makes use of AudioEffect features (mainly AEC) which are
+// first available in Jelly Bean. If it is instantiated running against earlier
+// SDKs, the AEC provided by the APM in WebRTC must be used and enabled
+// separately instead.
+//
+// An instance must be created and destroyed on one and the same thread.
+// All public methods must also be called on the same thread. A thread checker
+// will RTC_DCHECK if any method is called on an invalid thread.
+//
+// This class uses JvmThreadConnector to attach to a Java VM if needed
+// and detach when the object goes out of scope. Additional thread checking
+// guarantees that no other (possibly non attached) thread is used.
+class AudioRecordJni {
+ public:
+  // Wraps the Java specific parts of the AudioRecordJni into one helper class.
+  class JavaAudioRecord {
+   public:
+    JavaAudioRecord(NativeRegistration* native_registration,
+                    std::unique_ptr<GlobalRef> audio_track);
+    ~JavaAudioRecord();
+
+    int InitRecording(int sample_rate, size_t channels);
+    bool StartRecording();
+    bool StopRecording();
+    bool EnableBuiltInAEC(bool enable);
+    bool EnableBuiltInNS(bool enable);
+
+   private:
+    std::unique_ptr<GlobalRef> audio_record_;
+    jmethodID init_recording_;
+    jmethodID start_recording_;
+    jmethodID stop_recording_;
+    jmethodID enable_built_in_aec_;
+    jmethodID enable_built_in_ns_;
+  };
+
+  explicit AudioRecordJni(AudioManager* audio_manager);
+  ~AudioRecordJni();
+
+  int32_t Init();
+  int32_t Terminate();
+
+  int32_t InitRecording();
+  bool RecordingIsInitialized() const { return initialized_; }
+
+  int32_t StartRecording();
+  int32_t StopRecording();
+  bool Recording() const { return recording_; }
+
+  void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
+
+  int32_t EnableBuiltInAEC(bool enable);
+  int32_t EnableBuiltInAGC(bool enable);
+  int32_t EnableBuiltInNS(bool enable);
+
+ private:
+  // Called from Java side so we can cache the address of the Java-manged
+  // `byte_buffer` in `direct_buffer_address_`. The size of the buffer
+  // is also stored in `direct_buffer_capacity_in_bytes_`.
+  // This method will be called by the WebRtcAudioRecord constructor, i.e.,
+  // on the same thread that this object is created on.
+  static void JNICALL CacheDirectBufferAddress(JNIEnv* env,
+                                               jobject obj,
+                                               jobject byte_buffer,
+                                               jlong nativeAudioRecord);
+  void OnCacheDirectBufferAddress(JNIEnv* env, jobject byte_buffer);
+
+  // Called periodically by the Java based WebRtcAudioRecord object when
+  // recording has started. Each call indicates that there are `length` new
+  // bytes recorded in the memory area `direct_buffer_address_` and it is
+  // now time to send these to the consumer.
+  // This method is called on a high-priority thread from Java. The name of
+  // the thread is 'AudioRecordThread'.
+  static void JNICALL DataIsRecorded(JNIEnv* env,
+                                     jobject obj,
+                                     jint length,
+                                     jlong nativeAudioRecord);
+  void OnDataIsRecorded(int length);
+
+  // Stores thread ID in constructor.
+  SequenceChecker thread_checker_;
+
+  // Stores thread ID in first call to OnDataIsRecorded() from high-priority
+  // thread in Java. Detached during construction of this object.
+  SequenceChecker thread_checker_java_;
+
+  // Calls JavaVM::AttachCurrentThread() if this thread is not attached at
+  // construction.
+  // Also ensures that DetachCurrentThread() is called at destruction.
+  JvmThreadConnector attach_thread_if_needed_;
+
+  // Wraps the JNI interface pointer and methods associated with it.
+  std::unique_ptr<JNIEnvironment> j_environment_;
+
+  // Contains factory method for creating the Java object.
+  std::unique_ptr<NativeRegistration> j_native_registration_;
+
+  // Wraps the Java specific parts of the AudioRecordJni class.
+  std::unique_ptr<AudioRecordJni::JavaAudioRecord> j_audio_record_;
+
+  // Raw pointer to the audio manger.
+  const AudioManager* audio_manager_;
+
+  // Contains audio parameters provided to this class at construction by the
+  // AudioManager.
+  const AudioParameters audio_parameters_;
+
+  // Delay estimate of the total round-trip delay (input + output).
+  // Fixed value set once in AttachAudioBuffer() and it can take one out of two
+  // possible values. See audio_common.h for details.
+  int total_delay_in_milliseconds_;
+
+  // Cached copy of address to direct audio buffer owned by `j_audio_record_`.
+  void* direct_buffer_address_;
+
+  // Number of bytes in the direct audio buffer owned by `j_audio_record_`.
+  size_t direct_buffer_capacity_in_bytes_;
+
+  // Number audio frames per audio buffer. Each audio frame corresponds to
+  // one sample of PCM mono data at 16 bits per sample. Hence, each audio
+  // frame contains 2 bytes (given that the Java layer only supports mono).
+  // Example: 480 for 48000 Hz or 441 for 44100 Hz.
+  size_t frames_per_buffer_;
+
+  bool initialized_;
+
+  bool recording_;
+
+  // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
+  // AudioDeviceModuleImpl class and called by AudioDeviceModule::Create().
+  AudioDeviceBuffer* audio_device_buffer_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_DEVICE_ANDROID_AUDIO_RECORD_JNI_H_
diff --git a/modules/audio_device/android/audio_track_jni.cc b/modules/audio_device/android/audio_track_jni.cc
new file mode 100644
index 0000000..5afa1ec
--- /dev/null
+++ b/modules/audio_device/android/audio_track_jni.cc
@@ -0,0 +1,296 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/android/audio_track_jni.h"
+
+#include <utility>
+
+#include "modules/audio_device/android/audio_manager.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/platform_thread.h"
+#include "system_wrappers/include/field_trial.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+
+// AudioTrackJni::JavaAudioTrack implementation.
+AudioTrackJni::JavaAudioTrack::JavaAudioTrack(
+    NativeRegistration* native_reg,
+    std::unique_ptr<GlobalRef> audio_track)
+    : audio_track_(std::move(audio_track)),
+      init_playout_(native_reg->GetMethodId("initPlayout", "(IID)I")),
+      start_playout_(native_reg->GetMethodId("startPlayout", "()Z")),
+      stop_playout_(native_reg->GetMethodId("stopPlayout", "()Z")),
+      set_stream_volume_(native_reg->GetMethodId("setStreamVolume", "(I)Z")),
+      get_stream_max_volume_(
+          native_reg->GetMethodId("getStreamMaxVolume", "()I")),
+      get_stream_volume_(native_reg->GetMethodId("getStreamVolume", "()I")),
+      get_buffer_size_in_frames_(
+          native_reg->GetMethodId("getBufferSizeInFrames", "()I")) {}
+
+AudioTrackJni::JavaAudioTrack::~JavaAudioTrack() {}
+
+bool AudioTrackJni::JavaAudioTrack::InitPlayout(int sample_rate, int channels) {
+  double buffer_size_factor =
+      strtod(webrtc::field_trial::FindFullName(
+                 "WebRTC-AudioDevicePlayoutBufferSizeFactor")
+                 .c_str(),
+             nullptr);
+  if (buffer_size_factor == 0)
+    buffer_size_factor = 1.0;
+  int requested_buffer_size_bytes = audio_track_->CallIntMethod(
+      init_playout_, sample_rate, channels, buffer_size_factor);
+  // Update UMA histograms for both the requested and actual buffer size.
+  if (requested_buffer_size_bytes >= 0) {
+    // To avoid division by zero, we assume the sample rate is 48k if an invalid
+    // value is found.
+    sample_rate = sample_rate <= 0 ? 48000 : sample_rate;
+    // This calculation assumes that audio is mono.
+    const int requested_buffer_size_ms =
+        (requested_buffer_size_bytes * 1000) / (2 * sample_rate);
+    RTC_HISTOGRAM_COUNTS("WebRTC.Audio.AndroidNativeRequestedAudioBufferSizeMs",
+                         requested_buffer_size_ms, 0, 1000, 100);
+    int actual_buffer_size_frames =
+        audio_track_->CallIntMethod(get_buffer_size_in_frames_);
+    if (actual_buffer_size_frames >= 0) {
+      const int actual_buffer_size_ms =
+          actual_buffer_size_frames * 1000 / sample_rate;
+      RTC_HISTOGRAM_COUNTS("WebRTC.Audio.AndroidNativeAudioBufferSizeMs",
+                           actual_buffer_size_ms, 0, 1000, 100);
+    }
+    return true;
+  }
+  return false;
+}
+
+bool AudioTrackJni::JavaAudioTrack::StartPlayout() {
+  return audio_track_->CallBooleanMethod(start_playout_);
+}
+
+bool AudioTrackJni::JavaAudioTrack::StopPlayout() {
+  return audio_track_->CallBooleanMethod(stop_playout_);
+}
+
+bool AudioTrackJni::JavaAudioTrack::SetStreamVolume(int volume) {
+  return audio_track_->CallBooleanMethod(set_stream_volume_, volume);
+}
+
+int AudioTrackJni::JavaAudioTrack::GetStreamMaxVolume() {
+  return audio_track_->CallIntMethod(get_stream_max_volume_);
+}
+
+int AudioTrackJni::JavaAudioTrack::GetStreamVolume() {
+  return audio_track_->CallIntMethod(get_stream_volume_);
+}
+
+// TODO(henrika): possible extend usage of AudioManager and add it as member.
+AudioTrackJni::AudioTrackJni(AudioManager* audio_manager)
+    : j_environment_(JVM::GetInstance()->environment()),
+      audio_parameters_(audio_manager->GetPlayoutAudioParameters()),
+      direct_buffer_address_(nullptr),
+      direct_buffer_capacity_in_bytes_(0),
+      frames_per_buffer_(0),
+      initialized_(false),
+      playing_(false),
+      audio_device_buffer_(nullptr) {
+  RTC_LOG(LS_INFO) << "ctor";
+  RTC_DCHECK(audio_parameters_.is_valid());
+  RTC_CHECK(j_environment_);
+  JNINativeMethod native_methods[] = {
+      {"nativeCacheDirectBufferAddress", "(Ljava/nio/ByteBuffer;J)V",
+       reinterpret_cast<void*>(
+           &webrtc::AudioTrackJni::CacheDirectBufferAddress)},
+      {"nativeGetPlayoutData", "(IJ)V",
+       reinterpret_cast<void*>(&webrtc::AudioTrackJni::GetPlayoutData)}};
+  j_native_registration_ = j_environment_->RegisterNatives(
+      "org/webrtc/voiceengine/WebRtcAudioTrack", native_methods,
+      arraysize(native_methods));
+  j_audio_track_.reset(
+      new JavaAudioTrack(j_native_registration_.get(),
+                         j_native_registration_->NewObject(
+                             "<init>", "(J)V", PointerTojlong(this))));
+  // Detach from this thread since we want to use the checker to verify calls
+  // from the Java based audio thread.
+  thread_checker_java_.Detach();
+}
+
+AudioTrackJni::~AudioTrackJni() {
+  RTC_LOG(LS_INFO) << "dtor";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  Terminate();
+}
+
+int32_t AudioTrackJni::Init() {
+  RTC_LOG(LS_INFO) << "Init";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  return 0;
+}
+
+int32_t AudioTrackJni::Terminate() {
+  RTC_LOG(LS_INFO) << "Terminate";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  StopPlayout();
+  return 0;
+}
+
+int32_t AudioTrackJni::InitPlayout() {
+  RTC_LOG(LS_INFO) << "InitPlayout";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  RTC_DCHECK(!initialized_);
+  RTC_DCHECK(!playing_);
+  if (!j_audio_track_->InitPlayout(audio_parameters_.sample_rate(),
+                                   audio_parameters_.channels())) {
+    RTC_LOG(LS_ERROR) << "InitPlayout failed";
+    return -1;
+  }
+  initialized_ = true;
+  return 0;
+}
+
+int32_t AudioTrackJni::StartPlayout() {
+  RTC_LOG(LS_INFO) << "StartPlayout";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  RTC_DCHECK(!playing_);
+  if (!initialized_) {
+    RTC_DLOG(LS_WARNING)
+        << "Playout can not start since InitPlayout must succeed first";
+    return 0;
+  }
+  if (!j_audio_track_->StartPlayout()) {
+    RTC_LOG(LS_ERROR) << "StartPlayout failed";
+    return -1;
+  }
+  playing_ = true;
+  return 0;
+}
+
+int32_t AudioTrackJni::StopPlayout() {
+  RTC_LOG(LS_INFO) << "StopPlayout";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  if (!initialized_ || !playing_) {
+    return 0;
+  }
+  if (!j_audio_track_->StopPlayout()) {
+    RTC_LOG(LS_ERROR) << "StopPlayout failed";
+    return -1;
+  }
+  // If we don't detach here, we will hit a RTC_DCHECK in OnDataIsRecorded()
+  // next time StartRecording() is called since it will create a new Java
+  // thread.
+  thread_checker_java_.Detach();
+  initialized_ = false;
+  playing_ = false;
+  direct_buffer_address_ = nullptr;
+  return 0;
+}
+
+int AudioTrackJni::SpeakerVolumeIsAvailable(bool& available) {
+  available = true;
+  return 0;
+}
+
+int AudioTrackJni::SetSpeakerVolume(uint32_t volume) {
+  RTC_LOG(LS_INFO) << "SetSpeakerVolume(" << volume << ")";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  return j_audio_track_->SetStreamVolume(volume) ? 0 : -1;
+}
+
+int AudioTrackJni::MaxSpeakerVolume(uint32_t& max_volume) const {
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  max_volume = j_audio_track_->GetStreamMaxVolume();
+  return 0;
+}
+
+int AudioTrackJni::MinSpeakerVolume(uint32_t& min_volume) const {
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  min_volume = 0;
+  return 0;
+}
+
+int AudioTrackJni::SpeakerVolume(uint32_t& volume) const {
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  volume = j_audio_track_->GetStreamVolume();
+  RTC_LOG(LS_INFO) << "SpeakerVolume: " << volume;
+  return 0;
+}
+
+// TODO(henrika): possibly add stereo support.
+void AudioTrackJni::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+  RTC_LOG(LS_INFO) << "AttachAudioBuffer";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  audio_device_buffer_ = audioBuffer;
+  const int sample_rate_hz = audio_parameters_.sample_rate();
+  RTC_LOG(LS_INFO) << "SetPlayoutSampleRate(" << sample_rate_hz << ")";
+  audio_device_buffer_->SetPlayoutSampleRate(sample_rate_hz);
+  const size_t channels = audio_parameters_.channels();
+  RTC_LOG(LS_INFO) << "SetPlayoutChannels(" << channels << ")";
+  audio_device_buffer_->SetPlayoutChannels(channels);
+}
+
+JNI_FUNCTION_ALIGN
+void JNICALL AudioTrackJni::CacheDirectBufferAddress(JNIEnv* env,
+                                                     jobject obj,
+                                                     jobject byte_buffer,
+                                                     jlong nativeAudioTrack) {
+  webrtc::AudioTrackJni* this_object =
+      reinterpret_cast<webrtc::AudioTrackJni*>(nativeAudioTrack);
+  this_object->OnCacheDirectBufferAddress(env, byte_buffer);
+}
+
+void AudioTrackJni::OnCacheDirectBufferAddress(JNIEnv* env,
+                                               jobject byte_buffer) {
+  RTC_LOG(LS_INFO) << "OnCacheDirectBufferAddress";
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  RTC_DCHECK(!direct_buffer_address_);
+  direct_buffer_address_ = env->GetDirectBufferAddress(byte_buffer);
+  jlong capacity = env->GetDirectBufferCapacity(byte_buffer);
+  RTC_LOG(LS_INFO) << "direct buffer capacity: " << capacity;
+  direct_buffer_capacity_in_bytes_ = static_cast<size_t>(capacity);
+  const size_t bytes_per_frame = audio_parameters_.channels() * sizeof(int16_t);
+  frames_per_buffer_ = direct_buffer_capacity_in_bytes_ / bytes_per_frame;
+  RTC_LOG(LS_INFO) << "frames_per_buffer: " << frames_per_buffer_;
+}
+
+JNI_FUNCTION_ALIGN
+void JNICALL AudioTrackJni::GetPlayoutData(JNIEnv* env,
+                                           jobject obj,
+                                           jint length,
+                                           jlong nativeAudioTrack) {
+  webrtc::AudioTrackJni* this_object =
+      reinterpret_cast<webrtc::AudioTrackJni*>(nativeAudioTrack);
+  this_object->OnGetPlayoutData(static_cast<size_t>(length));
+}
+
+// This method is called on a high-priority thread from Java. The name of
+// the thread is 'AudioRecordTrack'.
+void AudioTrackJni::OnGetPlayoutData(size_t length) {
+  RTC_DCHECK(thread_checker_java_.IsCurrent());
+  const size_t bytes_per_frame = audio_parameters_.channels() * sizeof(int16_t);
+  RTC_DCHECK_EQ(frames_per_buffer_, length / bytes_per_frame);
+  if (!audio_device_buffer_) {
+    RTC_LOG(LS_ERROR) << "AttachAudioBuffer has not been called";
+    return;
+  }
+  // Pull decoded data (in 16-bit PCM format) from jitter buffer.
+  int samples = audio_device_buffer_->RequestPlayoutData(frames_per_buffer_);
+  if (samples <= 0) {
+    RTC_LOG(LS_ERROR) << "AudioDeviceBuffer::RequestPlayoutData failed";
+    return;
+  }
+  RTC_DCHECK_EQ(samples, frames_per_buffer_);
+  // Copy decoded data into common byte buffer to ensure that it can be
+  // written to the Java based audio track.
+  samples = audio_device_buffer_->GetPlayoutData(direct_buffer_address_);
+  RTC_DCHECK_EQ(length, bytes_per_frame * samples);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_device/android/audio_track_jni.h b/modules/audio_device/android/audio_track_jni.h
new file mode 100644
index 0000000..7eb6908
--- /dev/null
+++ b/modules/audio_device/android/audio_track_jni.h
@@ -0,0 +1,161 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_ANDROID_AUDIO_TRACK_JNI_H_
+#define MODULES_AUDIO_DEVICE_ANDROID_AUDIO_TRACK_JNI_H_
+
+#include <jni.h>
+
+#include <memory>
+
+#include "api/sequence_checker.h"
+#include "modules/audio_device/android/audio_common.h"
+#include "modules/audio_device/android/audio_manager.h"
+#include "modules/audio_device/audio_device_generic.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "modules/utility/include/helpers_android.h"
+#include "modules/utility/include/jvm_android.h"
+
+namespace webrtc {
+
+// Implements 16-bit mono PCM audio output support for Android using the Java
+// AudioTrack interface. Most of the work is done by its Java counterpart in
+// WebRtcAudioTrack.java. This class is created and lives on a thread in
+// C++-land, but decoded audio buffers are requested on a high-priority
+// thread managed by the Java class.
+//
+// An instance must be created and destroyed on one and the same thread.
+// All public methods must also be called on the same thread. A thread checker
+// will RTC_DCHECK if any method is called on an invalid thread.
+//
+// This class uses JvmThreadConnector to attach to a Java VM if needed
+// and detach when the object goes out of scope. Additional thread checking
+// guarantees that no other (possibly non attached) thread is used.
+class AudioTrackJni {
+ public:
+  // Wraps the Java specific parts of the AudioTrackJni into one helper class.
+  class JavaAudioTrack {
+   public:
+    JavaAudioTrack(NativeRegistration* native_registration,
+                   std::unique_ptr<GlobalRef> audio_track);
+    ~JavaAudioTrack();
+
+    bool InitPlayout(int sample_rate, int channels);
+    bool StartPlayout();
+    bool StopPlayout();
+    bool SetStreamVolume(int volume);
+    int GetStreamMaxVolume();
+    int GetStreamVolume();
+
+   private:
+    std::unique_ptr<GlobalRef> audio_track_;
+    jmethodID init_playout_;
+    jmethodID start_playout_;
+    jmethodID stop_playout_;
+    jmethodID set_stream_volume_;
+    jmethodID get_stream_max_volume_;
+    jmethodID get_stream_volume_;
+    jmethodID get_buffer_size_in_frames_;
+  };
+
+  explicit AudioTrackJni(AudioManager* audio_manager);
+  ~AudioTrackJni();
+
+  int32_t Init();
+  int32_t Terminate();
+
+  int32_t InitPlayout();
+  bool PlayoutIsInitialized() const { return initialized_; }
+
+  int32_t StartPlayout();
+  int32_t StopPlayout();
+  bool Playing() const { return playing_; }
+
+  int SpeakerVolumeIsAvailable(bool& available);
+  int SetSpeakerVolume(uint32_t volume);
+  int SpeakerVolume(uint32_t& volume) const;
+  int MaxSpeakerVolume(uint32_t& max_volume) const;
+  int MinSpeakerVolume(uint32_t& min_volume) const;
+
+  void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
+
+ private:
+  // Called from Java side so we can cache the address of the Java-manged
+  // `byte_buffer` in `direct_buffer_address_`. The size of the buffer
+  // is also stored in `direct_buffer_capacity_in_bytes_`.
+  // Called on the same thread as the creating thread.
+  static void JNICALL CacheDirectBufferAddress(JNIEnv* env,
+                                               jobject obj,
+                                               jobject byte_buffer,
+                                               jlong nativeAudioTrack);
+  void OnCacheDirectBufferAddress(JNIEnv* env, jobject byte_buffer);
+
+  // Called periodically by the Java based WebRtcAudioTrack object when
+  // playout has started. Each call indicates that `length` new bytes should
+  // be written to the memory area `direct_buffer_address_` for playout.
+  // This method is called on a high-priority thread from Java. The name of
+  // the thread is 'AudioTrackThread'.
+  static void JNICALL GetPlayoutData(JNIEnv* env,
+                                     jobject obj,
+                                     jint length,
+                                     jlong nativeAudioTrack);
+  void OnGetPlayoutData(size_t length);
+
+  // Stores thread ID in constructor.
+  SequenceChecker thread_checker_;
+
+  // Stores thread ID in first call to OnGetPlayoutData() from high-priority
+  // thread in Java. Detached during construction of this object.
+  SequenceChecker thread_checker_java_;
+
+  // Calls JavaVM::AttachCurrentThread() if this thread is not attached at
+  // construction.
+  // Also ensures that DetachCurrentThread() is called at destruction.
+  JvmThreadConnector attach_thread_if_needed_;
+
+  // Wraps the JNI interface pointer and methods associated with it.
+  std::unique_ptr<JNIEnvironment> j_environment_;
+
+  // Contains factory method for creating the Java object.
+  std::unique_ptr<NativeRegistration> j_native_registration_;
+
+  // Wraps the Java specific parts of the AudioTrackJni class.
+  std::unique_ptr<AudioTrackJni::JavaAudioTrack> j_audio_track_;
+
+  // Contains audio parameters provided to this class at construction by the
+  // AudioManager.
+  const AudioParameters audio_parameters_;
+
+  // Cached copy of address to direct audio buffer owned by `j_audio_track_`.
+  void* direct_buffer_address_;
+
+  // Number of bytes in the direct audio buffer owned by `j_audio_track_`.
+  size_t direct_buffer_capacity_in_bytes_;
+
+  // Number of audio frames per audio buffer. Each audio frame corresponds to
+  // one sample of PCM mono data at 16 bits per sample. Hence, each audio
+  // frame contains 2 bytes (given that the Java layer only supports mono).
+  // Example: 480 for 48000 Hz or 441 for 44100 Hz.
+  size_t frames_per_buffer_;
+
+  bool initialized_;
+
+  bool playing_;
+
+  // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
+  // AudioDeviceModuleImpl class and called by AudioDeviceModule::Create().
+  // The AudioDeviceBuffer is a member of the AudioDeviceModuleImpl instance
+  // and therefore outlives this object.
+  AudioDeviceBuffer* audio_device_buffer_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_DEVICE_ANDROID_AUDIO_TRACK_JNI_H_
diff --git a/modules/audio_device/android/build_info.cc b/modules/audio_device/android/build_info.cc
new file mode 100644
index 0000000..916be82
--- /dev/null
+++ b/modules/audio_device/android/build_info.cc
@@ -0,0 +1,59 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/android/build_info.h"
+
+#include "modules/utility/include/helpers_android.h"
+
+namespace webrtc {
+
+BuildInfo::BuildInfo()
+    : j_environment_(JVM::GetInstance()->environment()),
+      j_build_info_(
+          JVM::GetInstance()->GetClass("org/webrtc/voiceengine/BuildInfo")) {}
+
+std::string BuildInfo::GetStringFromJava(const char* name) {
+  jmethodID id = j_build_info_.GetStaticMethodId(name, "()Ljava/lang/String;");
+  jstring j_string =
+      static_cast<jstring>(j_build_info_.CallStaticObjectMethod(id));
+  return j_environment_->JavaToStdString(j_string);
+}
+
+std::string BuildInfo::GetDeviceModel() {
+  return GetStringFromJava("getDeviceModel");
+}
+
+std::string BuildInfo::GetBrand() {
+  return GetStringFromJava("getBrand");
+}
+
+std::string BuildInfo::GetDeviceManufacturer() {
+  return GetStringFromJava("getDeviceManufacturer");
+}
+
+std::string BuildInfo::GetAndroidBuildId() {
+  return GetStringFromJava("getAndroidBuildId");
+}
+
+std::string BuildInfo::GetBuildType() {
+  return GetStringFromJava("getBuildType");
+}
+
+std::string BuildInfo::GetBuildRelease() {
+  return GetStringFromJava("getBuildRelease");
+}
+
+SdkCode BuildInfo::GetSdkVersion() {
+  jmethodID id = j_build_info_.GetStaticMethodId("getSdkVersion", "()I");
+  jint j_version = j_build_info_.CallStaticIntMethod(id);
+  return static_cast<SdkCode>(j_version);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_device/android/build_info.h b/modules/audio_device/android/build_info.h
new file mode 100644
index 0000000..3647e56
--- /dev/null
+++ b/modules/audio_device/android/build_info.h
@@ -0,0 +1,86 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_ANDROID_BUILD_INFO_H_
+#define MODULES_AUDIO_DEVICE_ANDROID_BUILD_INFO_H_
+
+#include <jni.h>
+
+#include <memory>
+#include <string>
+
+#include "modules/utility/include/jvm_android.h"
+
+namespace webrtc {
+
+// This enumeration maps to the values returned by BuildInfo::GetSdkVersion(),
+// indicating the Android release associated with a given SDK version.
+// See https://developer.android.com/guide/topics/manifest/uses-sdk-element.html
+// for details.
+enum SdkCode {
+  SDK_CODE_JELLY_BEAN = 16,      // Android 4.1
+  SDK_CODE_JELLY_BEAN_MR1 = 17,  // Android 4.2
+  SDK_CODE_JELLY_BEAN_MR2 = 18,  // Android 4.3
+  SDK_CODE_KITKAT = 19,          // Android 4.4
+  SDK_CODE_WATCH = 20,           // Android 4.4W
+  SDK_CODE_LOLLIPOP = 21,        // Android 5.0
+  SDK_CODE_LOLLIPOP_MR1 = 22,    // Android 5.1
+  SDK_CODE_MARSHMALLOW = 23,     // Android 6.0
+  SDK_CODE_N = 24,
+};
+
+// Utility class used to query the Java class (org/webrtc/voiceengine/BuildInfo)
+// for device and Android build information.
+// The calling thread is attached to the JVM at construction if needed and a
+// valid Java environment object is also created.
+// All Get methods must be called on the creating thread. If not, the code will
+// hit RTC_DCHECKs when calling JNIEnvironment::JavaToStdString().
+class BuildInfo {
+ public:
+  BuildInfo();
+  ~BuildInfo() {}
+
+  // End-user-visible name for the end product (e.g. "Nexus 6").
+  std::string GetDeviceModel();
+  // Consumer-visible brand (e.g. "google").
+  std::string GetBrand();
+  // Manufacturer of the product/hardware (e.g. "motorola").
+  std::string GetDeviceManufacturer();
+  // Android build ID (e.g. LMY47D).
+  std::string GetAndroidBuildId();
+  // The type of build (e.g. "user" or "eng").
+  std::string GetBuildType();
+  // The user-visible version string (e.g. "5.1").
+  std::string GetBuildRelease();
+  // The user-visible SDK version of the framework (e.g. 21). See SdkCode enum
+  // for translation.
+  SdkCode GetSdkVersion();
+
+ private:
+  // Helper method which calls a static getter method with `name` and returns
+  // a string from Java.
+  std::string GetStringFromJava(const char* name);
+
+  // Ensures that this class can access a valid JNI interface pointer even
+  // if the creating thread was not attached to the JVM.
+  JvmThreadConnector attach_thread_if_needed_;
+
+  // Provides access to the JNIEnv interface pointer and the JavaToStdString()
+  // method which is used to translate Java strings to std strings.
+  std::unique_ptr<JNIEnvironment> j_environment_;
+
+  // Holds the jclass object and provides access to CallStaticObjectMethod().
+  // Used by GetStringFromJava() during construction only.
+  JavaClass j_build_info_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_DEVICE_ANDROID_BUILD_INFO_H_
diff --git a/modules/audio_device/android/ensure_initialized.cc b/modules/audio_device/android/ensure_initialized.cc
new file mode 100644
index 0000000..59e9c8f
--- /dev/null
+++ b/modules/audio_device/android/ensure_initialized.cc
@@ -0,0 +1,42 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/android/ensure_initialized.h"
+
+#include <jni.h>
+#include <pthread.h>
+#include <stddef.h>
+
+#include "modules/utility/include/jvm_android.h"
+#include "rtc_base/checks.h"
+#include "sdk/android/src/jni/jvm.h"
+
+namespace webrtc {
+namespace audiodevicemodule {
+
+static pthread_once_t g_initialize_once = PTHREAD_ONCE_INIT;
+
+void EnsureInitializedOnce() {
+  RTC_CHECK(::webrtc::jni::GetJVM() != nullptr);
+
+  JNIEnv* jni = ::webrtc::jni::AttachCurrentThreadIfNeeded();
+  JavaVM* jvm = NULL;
+  RTC_CHECK_EQ(0, jni->GetJavaVM(&jvm));
+
+  // Initialize the Java environment (currently only used by the audio manager).
+  webrtc::JVM::Initialize(jvm);
+}
+
+void EnsureInitialized() {
+  RTC_CHECK_EQ(0, pthread_once(&g_initialize_once, &EnsureInitializedOnce));
+}
+
+}  // namespace audiodevicemodule
+}  // namespace webrtc
diff --git a/modules/audio_device/android/ensure_initialized.h b/modules/audio_device/android/ensure_initialized.h
new file mode 100644
index 0000000..c1997b4
--- /dev/null
+++ b/modules/audio_device/android/ensure_initialized.h
@@ -0,0 +1,17 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+namespace webrtc {
+namespace audiodevicemodule {
+
+void EnsureInitialized();
+
+}  // namespace audiodevicemodule
+}  // namespace webrtc
diff --git a/modules/audio_device/android/java/src/org/webrtc/voiceengine/BuildInfo.java b/modules/audio_device/android/java/src/org/webrtc/voiceengine/BuildInfo.java
new file mode 100644
index 0000000..aed8a06
--- /dev/null
+++ b/modules/audio_device/android/java/src/org/webrtc/voiceengine/BuildInfo.java
@@ -0,0 +1,51 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc.voiceengine;
+
+import android.os.Build;
+
+public final class BuildInfo {
+  public static String getDevice() {
+    return Build.DEVICE;
+  }
+
+  public static String getDeviceModel() {
+    return Build.MODEL;
+  }
+
+  public static String getProduct() {
+    return Build.PRODUCT;
+  }
+
+  public static String getBrand() {
+    return Build.BRAND;
+  }
+
+  public static String getDeviceManufacturer() {
+    return Build.MANUFACTURER;
+  }
+
+  public static String getAndroidBuildId() {
+    return Build.ID;
+  }
+
+  public static String getBuildType() {
+    return Build.TYPE;
+  }
+
+  public static String getBuildRelease() {
+    return Build.VERSION.RELEASE;
+  }
+
+  public static int getSdkVersion() {
+    return Build.VERSION.SDK_INT;
+  }
+}
diff --git a/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java b/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java
new file mode 100644
index 0000000..92f1c93
--- /dev/null
+++ b/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java
@@ -0,0 +1,312 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc.voiceengine;
+
+import android.media.audiofx.AcousticEchoCanceler;
+import android.media.audiofx.AudioEffect;
+import android.media.audiofx.AudioEffect.Descriptor;
+import android.media.audiofx.NoiseSuppressor;
+import android.os.Build;
+import androidx.annotation.Nullable;
+import java.util.List;
+import java.util.UUID;
+import org.webrtc.Logging;
+
+// This class wraps control of three different platform effects. Supported
+// effects are: AcousticEchoCanceler (AEC) and NoiseSuppressor (NS).
+// Calling enable() will active all effects that are
+// supported by the device if the corresponding `shouldEnableXXX` member is set.
+public class WebRtcAudioEffects {
+  private static final boolean DEBUG = false;
+
+  private static final String TAG = "WebRtcAudioEffects";
+
+  // UUIDs for Software Audio Effects that we want to avoid using.
+  // The implementor field will be set to "The Android Open Source Project".
+  private static final UUID AOSP_ACOUSTIC_ECHO_CANCELER =
+      UUID.fromString("bb392ec0-8d4d-11e0-a896-0002a5d5c51b");
+  private static final UUID AOSP_NOISE_SUPPRESSOR =
+      UUID.fromString("c06c8400-8e06-11e0-9cb6-0002a5d5c51b");
+
+  // Contains the available effect descriptors returned from the
+  // AudioEffect.getEffects() call. This result is cached to avoid doing the
+  // slow OS call multiple times.
+  private static @Nullable Descriptor[] cachedEffects;
+
+  // Contains the audio effect objects. Created in enable() and destroyed
+  // in release().
+  private @Nullable AcousticEchoCanceler aec;
+  private @Nullable NoiseSuppressor ns;
+
+  // Affects the final state given to the setEnabled() method on each effect.
+  // The default state is set to "disabled" but each effect can also be enabled
+  // by calling setAEC() and setNS().
+  // To enable an effect, both the shouldEnableXXX member and the static
+  // canUseXXX() must be true.
+  private boolean shouldEnableAec;
+  private boolean shouldEnableNs;
+
+  // Checks if the device implements Acoustic Echo Cancellation (AEC).
+  // Returns true if the device implements AEC, false otherwise.
+  public static boolean isAcousticEchoCancelerSupported() {
+    // Note: we're using isAcousticEchoCancelerEffectAvailable() instead of
+    // AcousticEchoCanceler.isAvailable() to avoid the expensive getEffects()
+    // OS API call.
+    return isAcousticEchoCancelerEffectAvailable();
+  }
+
+  // Checks if the device implements Noise Suppression (NS).
+  // Returns true if the device implements NS, false otherwise.
+  public static boolean isNoiseSuppressorSupported() {
+    // Note: we're using isNoiseSuppressorEffectAvailable() instead of
+    // NoiseSuppressor.isAvailable() to avoid the expensive getEffects()
+    // OS API call.
+    return isNoiseSuppressorEffectAvailable();
+  }
+
+  // Returns true if the device is blacklisted for HW AEC usage.
+  public static boolean isAcousticEchoCancelerBlacklisted() {
+    List<String> blackListedModels = WebRtcAudioUtils.getBlackListedModelsForAecUsage();
+    boolean isBlacklisted = blackListedModels.contains(Build.MODEL);
+    if (isBlacklisted) {
+      Logging.w(TAG, Build.MODEL + " is blacklisted for HW AEC usage!");
+    }
+    return isBlacklisted;
+  }
+
+  // Returns true if the device is blacklisted for HW NS usage.
+  public static boolean isNoiseSuppressorBlacklisted() {
+    List<String> blackListedModels = WebRtcAudioUtils.getBlackListedModelsForNsUsage();
+    boolean isBlacklisted = blackListedModels.contains(Build.MODEL);
+    if (isBlacklisted) {
+      Logging.w(TAG, Build.MODEL + " is blacklisted for HW NS usage!");
+    }
+    return isBlacklisted;
+  }
+
+  // Returns true if the platform AEC should be excluded based on its UUID.
+  // AudioEffect.queryEffects() can throw IllegalStateException.
+  private static boolean isAcousticEchoCancelerExcludedByUUID() {
+    for (Descriptor d : getAvailableEffects()) {
+      if (d.type.equals(AudioEffect.EFFECT_TYPE_AEC)
+          && d.uuid.equals(AOSP_ACOUSTIC_ECHO_CANCELER)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  // Returns true if the platform NS should be excluded based on its UUID.
+  // AudioEffect.queryEffects() can throw IllegalStateException.
+  private static boolean isNoiseSuppressorExcludedByUUID() {
+    for (Descriptor d : getAvailableEffects()) {
+      if (d.type.equals(AudioEffect.EFFECT_TYPE_NS) && d.uuid.equals(AOSP_NOISE_SUPPRESSOR)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  // Returns true if the device supports Acoustic Echo Cancellation (AEC).
+  private static boolean isAcousticEchoCancelerEffectAvailable() {
+    return isEffectTypeAvailable(AudioEffect.EFFECT_TYPE_AEC);
+  }
+
+  // Returns true if the device supports Noise Suppression (NS).
+  private static boolean isNoiseSuppressorEffectAvailable() {
+    return isEffectTypeAvailable(AudioEffect.EFFECT_TYPE_NS);
+  }
+
+  // Returns true if all conditions for supporting the HW AEC are fulfilled.
+  // It will not be possible to enable the HW AEC if this method returns false.
+  public static boolean canUseAcousticEchoCanceler() {
+    boolean canUseAcousticEchoCanceler = isAcousticEchoCancelerSupported()
+        && !WebRtcAudioUtils.useWebRtcBasedAcousticEchoCanceler()
+        && !isAcousticEchoCancelerBlacklisted() && !isAcousticEchoCancelerExcludedByUUID();
+    Logging.d(TAG, "canUseAcousticEchoCanceler: " + canUseAcousticEchoCanceler);
+    return canUseAcousticEchoCanceler;
+  }
+
+  // Returns true if all conditions for supporting the HW NS are fulfilled.
+  // It will not be possible to enable the HW NS if this method returns false.
+  public static boolean canUseNoiseSuppressor() {
+    boolean canUseNoiseSuppressor = isNoiseSuppressorSupported()
+        && !WebRtcAudioUtils.useWebRtcBasedNoiseSuppressor() && !isNoiseSuppressorBlacklisted()
+        && !isNoiseSuppressorExcludedByUUID();
+    Logging.d(TAG, "canUseNoiseSuppressor: " + canUseNoiseSuppressor);
+    return canUseNoiseSuppressor;
+  }
+
+  public static WebRtcAudioEffects create() {
+    return new WebRtcAudioEffects();
+  }
+
+  private WebRtcAudioEffects() {
+    Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
+  }
+
+  // Call this method to enable or disable the platform AEC. It modifies
+  // `shouldEnableAec` which is used in enable() where the actual state
+  // of the AEC effect is modified. Returns true if HW AEC is supported and
+  // false otherwise.
+  public boolean setAEC(boolean enable) {
+    Logging.d(TAG, "setAEC(" + enable + ")");
+    if (!canUseAcousticEchoCanceler()) {
+      Logging.w(TAG, "Platform AEC is not supported");
+      shouldEnableAec = false;
+      return false;
+    }
+    if (aec != null && (enable != shouldEnableAec)) {
+      Logging.e(TAG, "Platform AEC state can't be modified while recording");
+      return false;
+    }
+    shouldEnableAec = enable;
+    return true;
+  }
+
+  // Call this method to enable or disable the platform NS. It modifies
+  // `shouldEnableNs` which is used in enable() where the actual state
+  // of the NS effect is modified. Returns true if HW NS is supported and
+  // false otherwise.
+  public boolean setNS(boolean enable) {
+    Logging.d(TAG, "setNS(" + enable + ")");
+    if (!canUseNoiseSuppressor()) {
+      Logging.w(TAG, "Platform NS is not supported");
+      shouldEnableNs = false;
+      return false;
+    }
+    if (ns != null && (enable != shouldEnableNs)) {
+      Logging.e(TAG, "Platform NS state can't be modified while recording");
+      return false;
+    }
+    shouldEnableNs = enable;
+    return true;
+  }
+
+  public void enable(int audioSession) {
+    Logging.d(TAG, "enable(audioSession=" + audioSession + ")");
+    assertTrue(aec == null);
+    assertTrue(ns == null);
+
+    if (DEBUG) {
+      // Add logging of supported effects but filter out "VoIP effects", i.e.,
+      // AEC, AEC and NS. Avoid calling AudioEffect.queryEffects() unless the
+      // DEBUG flag is set since we have seen crashes in this API.
+      for (Descriptor d : AudioEffect.queryEffects()) {
+        if (effectTypeIsVoIP(d.type)) {
+          Logging.d(TAG, "name: " + d.name + ", "
+                  + "mode: " + d.connectMode + ", "
+                  + "implementor: " + d.implementor + ", "
+                  + "UUID: " + d.uuid);
+        }
+      }
+    }
+
+    if (isAcousticEchoCancelerSupported()) {
+      // Create an AcousticEchoCanceler and attach it to the AudioRecord on
+      // the specified audio session.
+      aec = AcousticEchoCanceler.create(audioSession);
+      if (aec != null) {
+        boolean enabled = aec.getEnabled();
+        boolean enable = shouldEnableAec && canUseAcousticEchoCanceler();
+        if (aec.setEnabled(enable) != AudioEffect.SUCCESS) {
+          Logging.e(TAG, "Failed to set the AcousticEchoCanceler state");
+        }
+        Logging.d(TAG, "AcousticEchoCanceler: was " + (enabled ? "enabled" : "disabled")
+                + ", enable: " + enable + ", is now: "
+                + (aec.getEnabled() ? "enabled" : "disabled"));
+      } else {
+        Logging.e(TAG, "Failed to create the AcousticEchoCanceler instance");
+      }
+    }
+
+    if (isNoiseSuppressorSupported()) {
+      // Create an NoiseSuppressor and attach it to the AudioRecord on the
+      // specified audio session.
+      ns = NoiseSuppressor.create(audioSession);
+      if (ns != null) {
+        boolean enabled = ns.getEnabled();
+        boolean enable = shouldEnableNs && canUseNoiseSuppressor();
+        if (ns.setEnabled(enable) != AudioEffect.SUCCESS) {
+          Logging.e(TAG, "Failed to set the NoiseSuppressor state");
+        }
+        Logging.d(TAG, "NoiseSuppressor: was " + (enabled ? "enabled" : "disabled") + ", enable: "
+                + enable + ", is now: " + (ns.getEnabled() ? "enabled" : "disabled"));
+      } else {
+        Logging.e(TAG, "Failed to create the NoiseSuppressor instance");
+      }
+    }
+  }
+
+  // Releases all native audio effect resources. It is a good practice to
+  // release the effect engine when not in use as control can be returned
+  // to other applications or the native resources released.
+  public void release() {
+    Logging.d(TAG, "release");
+    if (aec != null) {
+      aec.release();
+      aec = null;
+    }
+    if (ns != null) {
+      ns.release();
+      ns = null;
+    }
+  }
+
+  // Returns true for effect types in `type` that are of "VoIP" types:
+  // Acoustic Echo Canceler (AEC) or Automatic Gain Control (AGC) or
+  // Noise Suppressor (NS). Note that, an extra check for support is needed
+  // in each comparison since some devices includes effects in the
+  // AudioEffect.Descriptor array that are actually not available on the device.
+  // As an example: Samsung Galaxy S6 includes an AGC in the descriptor but
+  // AutomaticGainControl.isAvailable() returns false.
+  private boolean effectTypeIsVoIP(UUID type) {
+    return (AudioEffect.EFFECT_TYPE_AEC.equals(type) && isAcousticEchoCancelerSupported())
+        || (AudioEffect.EFFECT_TYPE_NS.equals(type) && isNoiseSuppressorSupported());
+  }
+
+  // Helper method which throws an exception when an assertion has failed.
+  private static void assertTrue(boolean condition) {
+    if (!condition) {
+      throw new AssertionError("Expected condition to be true");
+    }
+  }
+
+  // Returns the cached copy of the audio effects array, if available, or
+  // queries the operating system for the list of effects.
+  private static @Nullable Descriptor[] getAvailableEffects() {
+    if (cachedEffects != null) {
+      return cachedEffects;
+    }
+    // The caching is best effort only - if this method is called from several
+    // threads in parallel, they may end up doing the underlying OS call
+    // multiple times. It's normally only called on one thread so there's no
+    // real need to optimize for the multiple threads case.
+    cachedEffects = AudioEffect.queryEffects();
+    return cachedEffects;
+  }
+
+  // Returns true if an effect of the specified type is available. Functionally
+  // equivalent to (NoiseSuppressor`AutomaticGainControl`...).isAvailable(), but
+  // faster as it avoids the expensive OS call to enumerate effects.
+  private static boolean isEffectTypeAvailable(UUID effectType) {
+    Descriptor[] effects = getAvailableEffects();
+    if (effects == null) {
+      return false;
+    }
+    for (Descriptor d : effects) {
+      if (d.type.equals(effectType)) {
+        return true;
+      }
+    }
+    return false;
+  }
+}
diff --git a/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java b/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java
new file mode 100644
index 0000000..43c416f
--- /dev/null
+++ b/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java
@@ -0,0 +1,371 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc.voiceengine;
+
+import android.content.Context;
+import android.content.pm.PackageManager;
+import android.media.AudioFormat;
+import android.media.AudioManager;
+import android.media.AudioRecord;
+import android.media.AudioTrack;
+import android.os.Build;
+import androidx.annotation.Nullable;
+import java.util.Timer;
+import java.util.TimerTask;
+import org.webrtc.ContextUtils;
+import org.webrtc.Logging;
+
+// WebRtcAudioManager handles tasks that uses android.media.AudioManager.
+// At construction, storeAudioParameters() is called and it retrieves
+// fundamental audio parameters like native sample rate and number of channels.
+// The result is then provided to the caller by nativeCacheAudioParameters().
+// It is also possible to call init() to set up the audio environment for best
+// possible "VoIP performance". All settings done in init() are reverted by
+// dispose(). This class can also be used without calling init() if the user
+// prefers to set up the audio environment separately. However, it is
+// recommended to always use AudioManager.MODE_IN_COMMUNICATION.
+public class WebRtcAudioManager {
+  private static final boolean DEBUG = false;
+
+  private static final String TAG = "WebRtcAudioManager";
+
+  // TODO(bugs.webrtc.org/8914): disabled by default until AAudio support has
+  // been completed. Goal is to always return false on Android O MR1 and higher.
+  private static final boolean blacklistDeviceForAAudioUsage = true;
+
+  // Use mono as default for both audio directions.
+  private static boolean useStereoOutput;
+  private static boolean useStereoInput;
+
+  private static boolean blacklistDeviceForOpenSLESUsage;
+  private static boolean blacklistDeviceForOpenSLESUsageIsOverridden;
+
+  // Call this method to override the default list of blacklisted devices
+  // specified in WebRtcAudioUtils.BLACKLISTED_OPEN_SL_ES_MODELS.
+  // Allows an app to take control over which devices to exclude from using
+  // the OpenSL ES audio output path
+  // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+  @SuppressWarnings("NoSynchronizedMethodCheck")
+  public static synchronized void setBlacklistDeviceForOpenSLESUsage(boolean enable) {
+    blacklistDeviceForOpenSLESUsageIsOverridden = true;
+    blacklistDeviceForOpenSLESUsage = enable;
+  }
+
+  // Call these methods to override the default mono audio modes for the specified direction(s)
+  // (input and/or output).
+  // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+  @SuppressWarnings("NoSynchronizedMethodCheck")
+  public static synchronized void setStereoOutput(boolean enable) {
+    Logging.w(TAG, "Overriding default output behavior: setStereoOutput(" + enable + ')');
+    useStereoOutput = enable;
+  }
+
+  // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+  @SuppressWarnings("NoSynchronizedMethodCheck")
+  public static synchronized void setStereoInput(boolean enable) {
+    Logging.w(TAG, "Overriding default input behavior: setStereoInput(" + enable + ')');
+    useStereoInput = enable;
+  }
+
+  // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+  @SuppressWarnings("NoSynchronizedMethodCheck")
+  public static synchronized boolean getStereoOutput() {
+    return useStereoOutput;
+  }
+
+  // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+  @SuppressWarnings("NoSynchronizedMethodCheck")
+  public static synchronized boolean getStereoInput() {
+    return useStereoInput;
+  }
+
+  // Default audio data format is PCM 16 bit per sample.
+  // Guaranteed to be supported by all devices.
+  private static final int BITS_PER_SAMPLE = 16;
+
+  private static final int DEFAULT_FRAME_PER_BUFFER = 256;
+
+  // Private utility class that periodically checks and logs the volume level
+  // of the audio stream that is currently controlled by the volume control.
+  // A timer triggers logs once every 30 seconds and the timer's associated
+  // thread is named "WebRtcVolumeLevelLoggerThread".
+  private static class VolumeLogger {
+    private static final String THREAD_NAME = "WebRtcVolumeLevelLoggerThread";
+    private static final int TIMER_PERIOD_IN_SECONDS = 30;
+
+    private final AudioManager audioManager;
+    private @Nullable Timer timer;
+
+    public VolumeLogger(AudioManager audioManager) {
+      this.audioManager = audioManager;
+    }
+
+    public void start() {
+      timer = new Timer(THREAD_NAME);
+      timer.schedule(new LogVolumeTask(audioManager.getStreamMaxVolume(AudioManager.STREAM_RING),
+                         audioManager.getStreamMaxVolume(AudioManager.STREAM_VOICE_CALL)),
+          0, TIMER_PERIOD_IN_SECONDS * 1000);
+    }
+
+    private class LogVolumeTask extends TimerTask {
+      private final int maxRingVolume;
+      private final int maxVoiceCallVolume;
+
+      LogVolumeTask(int maxRingVolume, int maxVoiceCallVolume) {
+        this.maxRingVolume = maxRingVolume;
+        this.maxVoiceCallVolume = maxVoiceCallVolume;
+      }
+
+      @Override
+      public void run() {
+        final int mode = audioManager.getMode();
+        if (mode == AudioManager.MODE_RINGTONE) {
+          Logging.d(TAG, "STREAM_RING stream volume: "
+                  + audioManager.getStreamVolume(AudioManager.STREAM_RING) + " (max="
+                  + maxRingVolume + ")");
+        } else if (mode == AudioManager.MODE_IN_COMMUNICATION) {
+          Logging.d(TAG, "VOICE_CALL stream volume: "
+                  + audioManager.getStreamVolume(AudioManager.STREAM_VOICE_CALL) + " (max="
+                  + maxVoiceCallVolume + ")");
+        }
+      }
+    }
+
+    private void stop() {
+      if (timer != null) {
+        timer.cancel();
+        timer = null;
+      }
+    }
+  }
+
+  private final long nativeAudioManager;
+  private final AudioManager audioManager;
+
+  private boolean initialized;
+  private int nativeSampleRate;
+  private int nativeChannels;
+
+  private boolean hardwareAEC;
+  private boolean hardwareAGC;
+  private boolean hardwareNS;
+  private boolean lowLatencyOutput;
+  private boolean lowLatencyInput;
+  private boolean proAudio;
+  private boolean aAudio;
+  private int sampleRate;
+  private int outputChannels;
+  private int inputChannels;
+  private int outputBufferSize;
+  private int inputBufferSize;
+
+  private final VolumeLogger volumeLogger;
+
+  WebRtcAudioManager(long nativeAudioManager) {
+    Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
+    this.nativeAudioManager = nativeAudioManager;
+    audioManager =
+        (AudioManager) ContextUtils.getApplicationContext().getSystemService(Context.AUDIO_SERVICE);
+    if (DEBUG) {
+      WebRtcAudioUtils.logDeviceInfo(TAG);
+    }
+    volumeLogger = new VolumeLogger(audioManager);
+    storeAudioParameters();
+    nativeCacheAudioParameters(sampleRate, outputChannels, inputChannels, hardwareAEC, hardwareAGC,
+        hardwareNS, lowLatencyOutput, lowLatencyInput, proAudio, aAudio, outputBufferSize,
+        inputBufferSize, nativeAudioManager);
+    WebRtcAudioUtils.logAudioState(TAG);
+  }
+
+  private boolean init() {
+    Logging.d(TAG, "init" + WebRtcAudioUtils.getThreadInfo());
+    if (initialized) {
+      return true;
+    }
+    Logging.d(TAG, "audio mode is: "
+        + WebRtcAudioUtils.modeToString(audioManager.getMode()));
+    initialized = true;
+    volumeLogger.start();
+    return true;
+  }
+
+  private void dispose() {
+    Logging.d(TAG, "dispose" + WebRtcAudioUtils.getThreadInfo());
+    if (!initialized) {
+      return;
+    }
+    volumeLogger.stop();
+  }
+
+  private boolean isCommunicationModeEnabled() {
+    return (audioManager.getMode() == AudioManager.MODE_IN_COMMUNICATION);
+  }
+
+  private boolean isDeviceBlacklistedForOpenSLESUsage() {
+    boolean blacklisted = blacklistDeviceForOpenSLESUsageIsOverridden
+        ? blacklistDeviceForOpenSLESUsage
+        : WebRtcAudioUtils.deviceIsBlacklistedForOpenSLESUsage();
+    if (blacklisted) {
+      Logging.d(TAG, Build.MODEL + " is blacklisted for OpenSL ES usage!");
+    }
+    return blacklisted;
+  }
+
+  private void storeAudioParameters() {
+    outputChannels = getStereoOutput() ? 2 : 1;
+    inputChannels = getStereoInput() ? 2 : 1;
+    sampleRate = getNativeOutputSampleRate();
+    hardwareAEC = isAcousticEchoCancelerSupported();
+    // TODO(henrika): use of hardware AGC is no longer supported. Currently
+    // hardcoded to false. To be removed.
+    hardwareAGC = false;
+    hardwareNS = isNoiseSuppressorSupported();
+    lowLatencyOutput = isLowLatencyOutputSupported();
+    lowLatencyInput = isLowLatencyInputSupported();
+    proAudio = isProAudioSupported();
+    aAudio = isAAudioSupported();
+    outputBufferSize = lowLatencyOutput ? getLowLatencyOutputFramesPerBuffer()
+                                        : getMinOutputFrameSize(sampleRate, outputChannels);
+    inputBufferSize = lowLatencyInput ? getLowLatencyInputFramesPerBuffer()
+                                      : getMinInputFrameSize(sampleRate, inputChannels);
+  }
+
+  // Gets the current earpiece state.
+  private boolean hasEarpiece() {
+    return ContextUtils.getApplicationContext().getPackageManager().hasSystemFeature(
+        PackageManager.FEATURE_TELEPHONY);
+  }
+
+  // Returns true if low-latency audio output is supported.
+  private boolean isLowLatencyOutputSupported() {
+    return ContextUtils.getApplicationContext().getPackageManager().hasSystemFeature(
+        PackageManager.FEATURE_AUDIO_LOW_LATENCY);
+  }
+
+  // Returns true if low-latency audio input is supported.
+  // TODO(henrika): remove the hardcoded false return value when OpenSL ES
+  // input performance has been evaluated and tested more.
+  public boolean isLowLatencyInputSupported() {
+    // TODO(henrika): investigate if some sort of device list is needed here
+    // as well. The NDK doc states that: "As of API level 21, lower latency
+    // audio input is supported on select devices. To take advantage of this
+    // feature, first confirm that lower latency output is available".
+    return isLowLatencyOutputSupported();
+  }
+
+  // Returns true if the device has professional audio level of functionality
+  // and therefore supports the lowest possible round-trip latency.
+  private boolean isProAudioSupported() {
+    return Build.VERSION.SDK_INT >= 23
+        && ContextUtils.getApplicationContext().getPackageManager().hasSystemFeature(
+               PackageManager.FEATURE_AUDIO_PRO);
+  }
+
+  // AAudio is supported on Androio Oreo MR1 (API 27) and higher.
+  // TODO(bugs.webrtc.org/8914): currently disabled by default.
+  private boolean isAAudioSupported() {
+    if (blacklistDeviceForAAudioUsage) {
+      Logging.w(TAG, "AAudio support is currently disabled on all devices!");
+    }
+    return !blacklistDeviceForAAudioUsage && Build.VERSION.SDK_INT >= 27;
+  }
+
+  // Returns the native output sample rate for this device's output stream.
+  private int getNativeOutputSampleRate() {
+    // Override this if we're running on an old emulator image which only
+    // supports 8 kHz and doesn't support PROPERTY_OUTPUT_SAMPLE_RATE.
+    if (WebRtcAudioUtils.runningOnEmulator()) {
+      Logging.d(TAG, "Running emulator, overriding sample rate to 8 kHz.");
+      return 8000;
+    }
+    // Default can be overriden by WebRtcAudioUtils.setDefaultSampleRateHz().
+    // If so, use that value and return here.
+    if (WebRtcAudioUtils.isDefaultSampleRateOverridden()) {
+      Logging.d(TAG, "Default sample rate is overriden to "
+              + WebRtcAudioUtils.getDefaultSampleRateHz() + " Hz");
+      return WebRtcAudioUtils.getDefaultSampleRateHz();
+    }
+    // No overrides available. Deliver best possible estimate based on default
+    // Android AudioManager APIs.
+    final int sampleRateHz = getSampleRateForApiLevel();
+    Logging.d(TAG, "Sample rate is set to " + sampleRateHz + " Hz");
+    return sampleRateHz;
+  }
+
+  private int getSampleRateForApiLevel() {
+    String sampleRateString = audioManager.getProperty(AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
+    return (sampleRateString == null) ? WebRtcAudioUtils.getDefaultSampleRateHz()
+                                      : Integer.parseInt(sampleRateString);
+  }
+
+  // Returns the native output buffer size for low-latency output streams.
+  private int getLowLatencyOutputFramesPerBuffer() {
+    assertTrue(isLowLatencyOutputSupported());
+    String framesPerBuffer =
+        audioManager.getProperty(AudioManager.PROPERTY_OUTPUT_FRAMES_PER_BUFFER);
+    return framesPerBuffer == null ? DEFAULT_FRAME_PER_BUFFER : Integer.parseInt(framesPerBuffer);
+  }
+
+  // Returns true if the device supports an audio effect (AEC or NS).
+  // Four conditions must be fulfilled if functions are to return true:
+  // 1) the platform must support the built-in (HW) effect,
+  // 2) explicit use (override) of a WebRTC based version must not be set,
+  // 3) the device must not be blacklisted for use of the effect, and
+  // 4) the UUID of the effect must be approved (some UUIDs can be excluded).
+  private static boolean isAcousticEchoCancelerSupported() {
+    return WebRtcAudioEffects.canUseAcousticEchoCanceler();
+  }
+  private static boolean isNoiseSuppressorSupported() {
+    return WebRtcAudioEffects.canUseNoiseSuppressor();
+  }
+
+  // Returns the minimum output buffer size for Java based audio (AudioTrack).
+  // This size can also be used for OpenSL ES implementations on devices that
+  // lacks support of low-latency output.
+  private static int getMinOutputFrameSize(int sampleRateInHz, int numChannels) {
+    final int bytesPerFrame = numChannels * (BITS_PER_SAMPLE / 8);
+    final int channelConfig =
+        (numChannels == 1 ? AudioFormat.CHANNEL_OUT_MONO : AudioFormat.CHANNEL_OUT_STEREO);
+    return AudioTrack.getMinBufferSize(
+               sampleRateInHz, channelConfig, AudioFormat.ENCODING_PCM_16BIT)
+        / bytesPerFrame;
+  }
+
+  // Returns the native input buffer size for input streams.
+  private int getLowLatencyInputFramesPerBuffer() {
+    assertTrue(isLowLatencyInputSupported());
+    return getLowLatencyOutputFramesPerBuffer();
+  }
+
+  // Returns the minimum input buffer size for Java based audio (AudioRecord).
+  // This size can calso be used for OpenSL ES implementations on devices that
+  // lacks support of low-latency input.
+  private static int getMinInputFrameSize(int sampleRateInHz, int numChannels) {
+    final int bytesPerFrame = numChannels * (BITS_PER_SAMPLE / 8);
+    final int channelConfig =
+        (numChannels == 1 ? AudioFormat.CHANNEL_IN_MONO : AudioFormat.CHANNEL_IN_STEREO);
+    return AudioRecord.getMinBufferSize(
+               sampleRateInHz, channelConfig, AudioFormat.ENCODING_PCM_16BIT)
+        / bytesPerFrame;
+  }
+
+  // Helper method which throws an exception  when an assertion has failed.
+  private static void assertTrue(boolean condition) {
+    if (!condition) {
+      throw new AssertionError("Expected condition to be true");
+    }
+  }
+
+  private native void nativeCacheAudioParameters(int sampleRate, int outputChannels,
+      int inputChannels, boolean hardwareAEC, boolean hardwareAGC, boolean hardwareNS,
+      boolean lowLatencyOutput, boolean lowLatencyInput, boolean proAudio, boolean aAudio,
+      int outputBufferSize, int inputBufferSize, long nativeAudioManager);
+}
diff --git a/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java b/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java
new file mode 100644
index 0000000..8eab01c
--- /dev/null
+++ b/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java
@@ -0,0 +1,409 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc.voiceengine;
+
+import android.media.AudioFormat;
+import android.media.AudioRecord;
+import android.media.MediaRecorder.AudioSource;
+import android.os.Build;
+import android.os.Process;
+import androidx.annotation.Nullable;
+import java.lang.System;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.concurrent.TimeUnit;
+import org.webrtc.Logging;
+import org.webrtc.ThreadUtils;
+
+public class WebRtcAudioRecord {
+  private static final boolean DEBUG = false;
+
+  private static final String TAG = "WebRtcAudioRecord";
+
+  // Default audio data format is PCM 16 bit per sample.
+  // Guaranteed to be supported by all devices.
+  private static final int BITS_PER_SAMPLE = 16;
+
+  // Requested size of each recorded buffer provided to the client.
+  private static final int CALLBACK_BUFFER_SIZE_MS = 10;
+
+  // Average number of callbacks per second.
+  private static final int BUFFERS_PER_SECOND = 1000 / CALLBACK_BUFFER_SIZE_MS;
+
+  // We ask for a native buffer size of BUFFER_SIZE_FACTOR * (minimum required
+  // buffer size). The extra space is allocated to guard against glitches under
+  // high load.
+  private static final int BUFFER_SIZE_FACTOR = 2;
+
+  // The AudioRecordJavaThread is allowed to wait for successful call to join()
+  // but the wait times out afther this amount of time.
+  private static final long AUDIO_RECORD_THREAD_JOIN_TIMEOUT_MS = 2000;
+
+  private static final int DEFAULT_AUDIO_SOURCE = getDefaultAudioSource();
+  private static int audioSource = DEFAULT_AUDIO_SOURCE;
+
+  private final long nativeAudioRecord;
+
+  private @Nullable WebRtcAudioEffects effects;
+
+  private ByteBuffer byteBuffer;
+
+  private @Nullable AudioRecord audioRecord;
+  private @Nullable AudioRecordThread audioThread;
+
+  private static volatile boolean microphoneMute;
+  private byte[] emptyBytes;
+
+  // Audio recording error handler functions.
+  public enum AudioRecordStartErrorCode {
+    AUDIO_RECORD_START_EXCEPTION,
+    AUDIO_RECORD_START_STATE_MISMATCH,
+  }
+
+  public static interface WebRtcAudioRecordErrorCallback {
+    void onWebRtcAudioRecordInitError(String errorMessage);
+    void onWebRtcAudioRecordStartError(AudioRecordStartErrorCode errorCode, String errorMessage);
+    void onWebRtcAudioRecordError(String errorMessage);
+  }
+
+  private static @Nullable WebRtcAudioRecordErrorCallback errorCallback;
+
+  public static void setErrorCallback(WebRtcAudioRecordErrorCallback errorCallback) {
+    Logging.d(TAG, "Set error callback");
+    WebRtcAudioRecord.errorCallback = errorCallback;
+  }
+
+  /**
+   * Contains audio sample information. Object is passed using {@link
+   * WebRtcAudioRecord.WebRtcAudioRecordSamplesReadyCallback}
+   */
+  public static class AudioSamples {
+    /** See {@link AudioRecord#getAudioFormat()} */
+    private final int audioFormat;
+    /** See {@link AudioRecord#getChannelCount()} */
+    private final int channelCount;
+    /** See {@link AudioRecord#getSampleRate()} */
+    private final int sampleRate;
+
+    private final byte[] data;
+
+    private AudioSamples(AudioRecord audioRecord, byte[] data) {
+      this.audioFormat = audioRecord.getAudioFormat();
+      this.channelCount = audioRecord.getChannelCount();
+      this.sampleRate = audioRecord.getSampleRate();
+      this.data = data;
+    }
+
+    public int getAudioFormat() {
+      return audioFormat;
+    }
+
+    public int getChannelCount() {
+      return channelCount;
+    }
+
+    public int getSampleRate() {
+      return sampleRate;
+    }
+
+    public byte[] getData() {
+      return data;
+    }
+  }
+
+  /** Called when new audio samples are ready. This should only be set for debug purposes */
+  public static interface WebRtcAudioRecordSamplesReadyCallback {
+    void onWebRtcAudioRecordSamplesReady(AudioSamples samples);
+  }
+
+  private static @Nullable WebRtcAudioRecordSamplesReadyCallback audioSamplesReadyCallback;
+
+  public static void setOnAudioSamplesReady(WebRtcAudioRecordSamplesReadyCallback callback) {
+    audioSamplesReadyCallback = callback;
+  }
+
+  /**
+   * Audio thread which keeps calling ByteBuffer.read() waiting for audio
+   * to be recorded. Feeds recorded data to the native counterpart as a
+   * periodic sequence of callbacks using DataIsRecorded().
+   * This thread uses a Process.THREAD_PRIORITY_URGENT_AUDIO priority.
+   */
+  private class AudioRecordThread extends Thread {
+    private volatile boolean keepAlive = true;
+
+    public AudioRecordThread(String name) {
+      super(name);
+    }
+
+    // TODO(titovartem) make correct fix during webrtc:9175
+    @SuppressWarnings("ByteBufferBackingArray")
+    @Override
+    public void run() {
+      Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO);
+      Logging.d(TAG, "AudioRecordThread" + WebRtcAudioUtils.getThreadInfo());
+      assertTrue(audioRecord.getRecordingState() == AudioRecord.RECORDSTATE_RECORDING);
+
+      long lastTime = System.nanoTime();
+      while (keepAlive) {
+        int bytesRead = audioRecord.read(byteBuffer, byteBuffer.capacity());
+        if (bytesRead == byteBuffer.capacity()) {
+          if (microphoneMute) {
+            byteBuffer.clear();
+            byteBuffer.put(emptyBytes);
+          }
+          // It's possible we've been shut down during the read, and stopRecording() tried and
+          // failed to join this thread. To be a bit safer, try to avoid calling any native methods
+          // in case they've been unregistered after stopRecording() returned.
+          if (keepAlive) {
+            nativeDataIsRecorded(bytesRead, nativeAudioRecord);
+          }
+          if (audioSamplesReadyCallback != null) {
+            // Copy the entire byte buffer array.  Assume that the start of the byteBuffer is
+            // at index 0.
+            byte[] data = Arrays.copyOf(byteBuffer.array(), byteBuffer.capacity());
+            audioSamplesReadyCallback.onWebRtcAudioRecordSamplesReady(
+                new AudioSamples(audioRecord, data));
+          }
+        } else {
+          String errorMessage = "AudioRecord.read failed: " + bytesRead;
+          Logging.e(TAG, errorMessage);
+          if (bytesRead == AudioRecord.ERROR_INVALID_OPERATION) {
+            keepAlive = false;
+            reportWebRtcAudioRecordError(errorMessage);
+          }
+        }
+        if (DEBUG) {
+          long nowTime = System.nanoTime();
+          long durationInMs = TimeUnit.NANOSECONDS.toMillis((nowTime - lastTime));
+          lastTime = nowTime;
+          Logging.d(TAG, "bytesRead[" + durationInMs + "] " + bytesRead);
+        }
+      }
+
+      try {
+        if (audioRecord != null) {
+          audioRecord.stop();
+        }
+      } catch (IllegalStateException e) {
+        Logging.e(TAG, "AudioRecord.stop failed: " + e.getMessage());
+      }
+    }
+
+    // Stops the inner thread loop and also calls AudioRecord.stop().
+    // Does not block the calling thread.
+    public void stopThread() {
+      Logging.d(TAG, "stopThread");
+      keepAlive = false;
+    }
+  }
+
+  WebRtcAudioRecord(long nativeAudioRecord) {
+    Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
+    this.nativeAudioRecord = nativeAudioRecord;
+    if (DEBUG) {
+      WebRtcAudioUtils.logDeviceInfo(TAG);
+    }
+    effects = WebRtcAudioEffects.create();
+  }
+
+  private boolean enableBuiltInAEC(boolean enable) {
+    Logging.d(TAG, "enableBuiltInAEC(" + enable + ')');
+    if (effects == null) {
+      Logging.e(TAG, "Built-in AEC is not supported on this platform");
+      return false;
+    }
+    return effects.setAEC(enable);
+  }
+
+  private boolean enableBuiltInNS(boolean enable) {
+    Logging.d(TAG, "enableBuiltInNS(" + enable + ')');
+    if (effects == null) {
+      Logging.e(TAG, "Built-in NS is not supported on this platform");
+      return false;
+    }
+    return effects.setNS(enable);
+  }
+
+  private int initRecording(int sampleRate, int channels) {
+    Logging.d(TAG, "initRecording(sampleRate=" + sampleRate + ", channels=" + channels + ")");
+    if (audioRecord != null) {
+      reportWebRtcAudioRecordInitError("InitRecording called twice without StopRecording.");
+      return -1;
+    }
+    final int bytesPerFrame = channels * (BITS_PER_SAMPLE / 8);
+    final int framesPerBuffer = sampleRate / BUFFERS_PER_SECOND;
+    byteBuffer = ByteBuffer.allocateDirect(bytesPerFrame * framesPerBuffer);
+    Logging.d(TAG, "byteBuffer.capacity: " + byteBuffer.capacity());
+    emptyBytes = new byte[byteBuffer.capacity()];
+    // Rather than passing the ByteBuffer with every callback (requiring
+    // the potentially expensive GetDirectBufferAddress) we simply have the
+    // the native class cache the address to the memory once.
+    nativeCacheDirectBufferAddress(byteBuffer, nativeAudioRecord);
+
+    // Get the minimum buffer size required for the successful creation of
+    // an AudioRecord object, in byte units.
+    // Note that this size doesn't guarantee a smooth recording under load.
+    final int channelConfig = channelCountToConfiguration(channels);
+    int minBufferSize =
+        AudioRecord.getMinBufferSize(sampleRate, channelConfig, AudioFormat.ENCODING_PCM_16BIT);
+    if (minBufferSize == AudioRecord.ERROR || minBufferSize == AudioRecord.ERROR_BAD_VALUE) {
+      reportWebRtcAudioRecordInitError("AudioRecord.getMinBufferSize failed: " + minBufferSize);
+      return -1;
+    }
+    Logging.d(TAG, "AudioRecord.getMinBufferSize: " + minBufferSize);
+
+    // Use a larger buffer size than the minimum required when creating the
+    // AudioRecord instance to ensure smooth recording under load. It has been
+    // verified that it does not increase the actual recording latency.
+    int bufferSizeInBytes = Math.max(BUFFER_SIZE_FACTOR * minBufferSize, byteBuffer.capacity());
+    Logging.d(TAG, "bufferSizeInBytes: " + bufferSizeInBytes);
+    try {
+      audioRecord = new AudioRecord(audioSource, sampleRate, channelConfig,
+          AudioFormat.ENCODING_PCM_16BIT, bufferSizeInBytes);
+    } catch (IllegalArgumentException e) {
+      reportWebRtcAudioRecordInitError("AudioRecord ctor error: " + e.getMessage());
+      releaseAudioResources();
+      return -1;
+    }
+    if (audioRecord == null || audioRecord.getState() != AudioRecord.STATE_INITIALIZED) {
+      reportWebRtcAudioRecordInitError("Failed to create a new AudioRecord instance");
+      releaseAudioResources();
+      return -1;
+    }
+    if (effects != null) {
+      effects.enable(audioRecord.getAudioSessionId());
+    }
+    logMainParameters();
+    logMainParametersExtended();
+    return framesPerBuffer;
+  }
+
+  private boolean startRecording() {
+    Logging.d(TAG, "startRecording");
+    assertTrue(audioRecord != null);
+    assertTrue(audioThread == null);
+    try {
+      audioRecord.startRecording();
+    } catch (IllegalStateException e) {
+      reportWebRtcAudioRecordStartError(AudioRecordStartErrorCode.AUDIO_RECORD_START_EXCEPTION,
+          "AudioRecord.startRecording failed: " + e.getMessage());
+      return false;
+    }
+    if (audioRecord.getRecordingState() != AudioRecord.RECORDSTATE_RECORDING) {
+      reportWebRtcAudioRecordStartError(
+          AudioRecordStartErrorCode.AUDIO_RECORD_START_STATE_MISMATCH,
+          "AudioRecord.startRecording failed - incorrect state :"
+          + audioRecord.getRecordingState());
+      return false;
+    }
+    audioThread = new AudioRecordThread("AudioRecordJavaThread");
+    audioThread.start();
+    return true;
+  }
+
+  private boolean stopRecording() {
+    Logging.d(TAG, "stopRecording");
+    assertTrue(audioThread != null);
+    audioThread.stopThread();
+    if (!ThreadUtils.joinUninterruptibly(audioThread, AUDIO_RECORD_THREAD_JOIN_TIMEOUT_MS)) {
+      Logging.e(TAG, "Join of AudioRecordJavaThread timed out");
+      WebRtcAudioUtils.logAudioState(TAG);
+    }
+    audioThread = null;
+    if (effects != null) {
+      effects.release();
+    }
+    releaseAudioResources();
+    return true;
+  }
+
+  private void logMainParameters() {
+    Logging.d(TAG, "AudioRecord: "
+            + "session ID: " + audioRecord.getAudioSessionId() + ", "
+            + "channels: " + audioRecord.getChannelCount() + ", "
+            + "sample rate: " + audioRecord.getSampleRate());
+  }
+
+  private void logMainParametersExtended() {
+    if (Build.VERSION.SDK_INT >= 23) {
+      Logging.d(TAG, "AudioRecord: "
+              // The frame count of the native AudioRecord buffer.
+              + "buffer size in frames: " + audioRecord.getBufferSizeInFrames());
+    }
+  }
+
+  // Helper method which throws an exception  when an assertion has failed.
+  private static void assertTrue(boolean condition) {
+    if (!condition) {
+      throw new AssertionError("Expected condition to be true");
+    }
+  }
+
+  private int channelCountToConfiguration(int channels) {
+    return (channels == 1 ? AudioFormat.CHANNEL_IN_MONO : AudioFormat.CHANNEL_IN_STEREO);
+  }
+
+  private native void nativeCacheDirectBufferAddress(ByteBuffer byteBuffer, long nativeAudioRecord);
+
+  private native void nativeDataIsRecorded(int bytes, long nativeAudioRecord);
+
+  @SuppressWarnings("NoSynchronizedMethodCheck")
+  public static synchronized void setAudioSource(int source) {
+    Logging.w(TAG, "Audio source is changed from: " + audioSource
+            + " to " + source);
+    audioSource = source;
+  }
+
+  private static int getDefaultAudioSource() {
+    return AudioSource.VOICE_COMMUNICATION;
+  }
+
+  // Sets all recorded samples to zero if `mute` is true, i.e., ensures that
+  // the microphone is muted.
+  public static void setMicrophoneMute(boolean mute) {
+    Logging.w(TAG, "setMicrophoneMute(" + mute + ")");
+    microphoneMute = mute;
+  }
+
+  // Releases the native AudioRecord resources.
+  private void releaseAudioResources() {
+    Logging.d(TAG, "releaseAudioResources");
+    if (audioRecord != null) {
+      audioRecord.release();
+      audioRecord = null;
+    }
+  }
+
+  private void reportWebRtcAudioRecordInitError(String errorMessage) {
+    Logging.e(TAG, "Init recording error: " + errorMessage);
+    WebRtcAudioUtils.logAudioState(TAG);
+    if (errorCallback != null) {
+      errorCallback.onWebRtcAudioRecordInitError(errorMessage);
+    }
+  }
+
+  private void reportWebRtcAudioRecordStartError(
+      AudioRecordStartErrorCode errorCode, String errorMessage) {
+    Logging.e(TAG, "Start recording error: " + errorCode + ". " + errorMessage);
+    WebRtcAudioUtils.logAudioState(TAG);
+    if (errorCallback != null) {
+      errorCallback.onWebRtcAudioRecordStartError(errorCode, errorMessage);
+    }
+  }
+
+  private void reportWebRtcAudioRecordError(String errorMessage) {
+    Logging.e(TAG, "Run-time recording error: " + errorMessage);
+    WebRtcAudioUtils.logAudioState(TAG);
+    if (errorCallback != null) {
+      errorCallback.onWebRtcAudioRecordError(errorMessage);
+    }
+  }
+}
diff --git a/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java b/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java
new file mode 100644
index 0000000..3e1875c
--- /dev/null
+++ b/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java
@@ -0,0 +1,494 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc.voiceengine;
+
+import android.content.Context;
+import android.media.AudioAttributes;
+import android.media.AudioFormat;
+import android.media.AudioManager;
+import android.media.AudioTrack;
+import android.os.Build;
+import android.os.Process;
+import androidx.annotation.Nullable;
+import java.lang.Thread;
+import java.nio.ByteBuffer;
+import org.webrtc.ContextUtils;
+import org.webrtc.Logging;
+import org.webrtc.ThreadUtils;
+
+public class WebRtcAudioTrack {
+  private static final boolean DEBUG = false;
+
+  private static final String TAG = "WebRtcAudioTrack";
+
+  // Default audio data format is PCM 16 bit per sample.
+  // Guaranteed to be supported by all devices.
+  private static final int BITS_PER_SAMPLE = 16;
+
+  // Requested size of each recorded buffer provided to the client.
+  private static final int CALLBACK_BUFFER_SIZE_MS = 10;
+
+  // Average number of callbacks per second.
+  private static final int BUFFERS_PER_SECOND = 1000 / CALLBACK_BUFFER_SIZE_MS;
+
+  // The AudioTrackThread is allowed to wait for successful call to join()
+  // but the wait times out afther this amount of time.
+  private static final long AUDIO_TRACK_THREAD_JOIN_TIMEOUT_MS = 2000;
+
+  // By default, WebRTC creates audio tracks with a usage attribute
+  // corresponding to voice communications, such as telephony or VoIP.
+  private static final int DEFAULT_USAGE = AudioAttributes.USAGE_VOICE_COMMUNICATION;
+  private static int usageAttribute = DEFAULT_USAGE;
+
+  // This method overrides the default usage attribute and allows the user
+  // to set it to something else than AudioAttributes.USAGE_VOICE_COMMUNICATION.
+  // NOTE: calling this method will most likely break existing VoIP tuning.
+  // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+  @SuppressWarnings("NoSynchronizedMethodCheck")
+  public static synchronized void setAudioTrackUsageAttribute(int usage) {
+    Logging.w(TAG, "Default usage attribute is changed from: "
+        + DEFAULT_USAGE + " to " + usage);
+    usageAttribute = usage;
+  }
+
+  private final long nativeAudioTrack;
+  private final AudioManager audioManager;
+  private final ThreadUtils.ThreadChecker threadChecker = new ThreadUtils.ThreadChecker();
+
+  private ByteBuffer byteBuffer;
+
+  private @Nullable AudioTrack audioTrack;
+  private @Nullable AudioTrackThread audioThread;
+
+  // Samples to be played are replaced by zeros if `speakerMute` is set to true.
+  // Can be used to ensure that the speaker is fully muted.
+  private static volatile boolean speakerMute;
+  private byte[] emptyBytes;
+
+  // Audio playout/track error handler functions.
+  public enum AudioTrackStartErrorCode {
+    AUDIO_TRACK_START_EXCEPTION,
+    AUDIO_TRACK_START_STATE_MISMATCH,
+  }
+
+  @Deprecated
+  public static interface WebRtcAudioTrackErrorCallback {
+    void onWebRtcAudioTrackInitError(String errorMessage);
+    void onWebRtcAudioTrackStartError(String errorMessage);
+    void onWebRtcAudioTrackError(String errorMessage);
+  }
+
+  // TODO(henrika): upgrade all clients to use this new interface instead.
+  public static interface ErrorCallback {
+    void onWebRtcAudioTrackInitError(String errorMessage);
+    void onWebRtcAudioTrackStartError(AudioTrackStartErrorCode errorCode, String errorMessage);
+    void onWebRtcAudioTrackError(String errorMessage);
+  }
+
+  private static @Nullable WebRtcAudioTrackErrorCallback errorCallbackOld;
+  private static @Nullable ErrorCallback errorCallback;
+
+  @Deprecated
+  public static void setErrorCallback(WebRtcAudioTrackErrorCallback errorCallback) {
+    Logging.d(TAG, "Set error callback (deprecated");
+    WebRtcAudioTrack.errorCallbackOld = errorCallback;
+  }
+
+  public static void setErrorCallback(ErrorCallback errorCallback) {
+    Logging.d(TAG, "Set extended error callback");
+    WebRtcAudioTrack.errorCallback = errorCallback;
+  }
+
+  /**
+   * Audio thread which keeps calling AudioTrack.write() to stream audio.
+   * Data is periodically acquired from the native WebRTC layer using the
+   * nativeGetPlayoutData callback function.
+   * This thread uses a Process.THREAD_PRIORITY_URGENT_AUDIO priority.
+   */
+  private class AudioTrackThread extends Thread {
+    private volatile boolean keepAlive = true;
+
+    public AudioTrackThread(String name) {
+      super(name);
+    }
+
+    @Override
+    public void run() {
+      Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO);
+      Logging.d(TAG, "AudioTrackThread" + WebRtcAudioUtils.getThreadInfo());
+      assertTrue(audioTrack.getPlayState() == AudioTrack.PLAYSTATE_PLAYING);
+
+      // Fixed size in bytes of each 10ms block of audio data that we ask for
+      // using callbacks to the native WebRTC client.
+      final int sizeInBytes = byteBuffer.capacity();
+
+      while (keepAlive) {
+        // Get 10ms of PCM data from the native WebRTC client. Audio data is
+        // written into the common ByteBuffer using the address that was
+        // cached at construction.
+        nativeGetPlayoutData(sizeInBytes, nativeAudioTrack);
+        // Write data until all data has been written to the audio sink.
+        // Upon return, the buffer position will have been advanced to reflect
+        // the amount of data that was successfully written to the AudioTrack.
+        assertTrue(sizeInBytes <= byteBuffer.remaining());
+        if (speakerMute) {
+          byteBuffer.clear();
+          byteBuffer.put(emptyBytes);
+          byteBuffer.position(0);
+        }
+        int bytesWritten = audioTrack.write(byteBuffer, sizeInBytes, AudioTrack.WRITE_BLOCKING);
+        if (bytesWritten != sizeInBytes) {
+          Logging.e(TAG, "AudioTrack.write played invalid number of bytes: " + bytesWritten);
+          // If a write() returns a negative value, an error has occurred.
+          // Stop playing and report an error in this case.
+          if (bytesWritten < 0) {
+            keepAlive = false;
+            reportWebRtcAudioTrackError("AudioTrack.write failed: " + bytesWritten);
+          }
+        }
+        // The byte buffer must be rewinded since byteBuffer.position() is
+        // increased at each call to AudioTrack.write(). If we don't do this,
+        // next call to AudioTrack.write() will fail.
+        byteBuffer.rewind();
+
+        // TODO(henrika): it is possible to create a delay estimate here by
+        // counting number of written frames and subtracting the result from
+        // audioTrack.getPlaybackHeadPosition().
+      }
+
+      // Stops playing the audio data. Since the instance was created in
+      // MODE_STREAM mode, audio will stop playing after the last buffer that
+      // was written has been played.
+      if (audioTrack != null) {
+        Logging.d(TAG, "Calling AudioTrack.stop...");
+        try {
+          audioTrack.stop();
+          Logging.d(TAG, "AudioTrack.stop is done.");
+        } catch (IllegalStateException e) {
+          Logging.e(TAG, "AudioTrack.stop failed: " + e.getMessage());
+        }
+      }
+    }
+
+    // Stops the inner thread loop which results in calling AudioTrack.stop().
+    // Does not block the calling thread.
+    public void stopThread() {
+      Logging.d(TAG, "stopThread");
+      keepAlive = false;
+    }
+  }
+
+  WebRtcAudioTrack(long nativeAudioTrack) {
+    threadChecker.checkIsOnValidThread();
+    Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
+    this.nativeAudioTrack = nativeAudioTrack;
+    audioManager =
+        (AudioManager) ContextUtils.getApplicationContext().getSystemService(Context.AUDIO_SERVICE);
+    if (DEBUG) {
+      WebRtcAudioUtils.logDeviceInfo(TAG);
+    }
+  }
+
+  private int initPlayout(int sampleRate, int channels, double bufferSizeFactor) {
+    threadChecker.checkIsOnValidThread();
+    Logging.d(TAG,
+        "initPlayout(sampleRate=" + sampleRate + ", channels=" + channels
+            + ", bufferSizeFactor=" + bufferSizeFactor + ")");
+    final int bytesPerFrame = channels * (BITS_PER_SAMPLE / 8);
+    byteBuffer = ByteBuffer.allocateDirect(bytesPerFrame * (sampleRate / BUFFERS_PER_SECOND));
+    Logging.d(TAG, "byteBuffer.capacity: " + byteBuffer.capacity());
+    emptyBytes = new byte[byteBuffer.capacity()];
+    // Rather than passing the ByteBuffer with every callback (requiring
+    // the potentially expensive GetDirectBufferAddress) we simply have the
+    // the native class cache the address to the memory once.
+    nativeCacheDirectBufferAddress(byteBuffer, nativeAudioTrack);
+
+    // Get the minimum buffer size required for the successful creation of an
+    // AudioTrack object to be created in the MODE_STREAM mode.
+    // Note that this size doesn't guarantee a smooth playback under load.
+    final int channelConfig = channelCountToConfiguration(channels);
+    final int minBufferSizeInBytes = (int) (AudioTrack.getMinBufferSize(sampleRate, channelConfig,
+                                                AudioFormat.ENCODING_PCM_16BIT)
+        * bufferSizeFactor);
+    Logging.d(TAG, "minBufferSizeInBytes: " + minBufferSizeInBytes);
+    // For the streaming mode, data must be written to the audio sink in
+    // chunks of size (given by byteBuffer.capacity()) less than or equal
+    // to the total buffer size `minBufferSizeInBytes`. But, we have seen
+    // reports of "getMinBufferSize(): error querying hardware". Hence, it
+    // can happen that `minBufferSizeInBytes` contains an invalid value.
+    if (minBufferSizeInBytes < byteBuffer.capacity()) {
+      reportWebRtcAudioTrackInitError("AudioTrack.getMinBufferSize returns an invalid value.");
+      return -1;
+    }
+
+    // Ensure that prevision audio session was stopped correctly before trying
+    // to create a new AudioTrack.
+    if (audioTrack != null) {
+      reportWebRtcAudioTrackInitError("Conflict with existing AudioTrack.");
+      return -1;
+    }
+    try {
+      // Create an AudioTrack object and initialize its associated audio buffer.
+      // The size of this buffer determines how long an AudioTrack can play
+      // before running out of data.
+      // As we are on API level 21 or higher, it is possible to use a special AudioTrack
+      // constructor that uses AudioAttributes and AudioFormat as input. It allows us to
+      // supersede the notion of stream types for defining the behavior of audio playback,
+      // and to allow certain platforms or routing policies to use this information for more
+      // refined volume or routing decisions.
+      audioTrack = createAudioTrack(sampleRate, channelConfig, minBufferSizeInBytes);
+    } catch (IllegalArgumentException e) {
+      reportWebRtcAudioTrackInitError(e.getMessage());
+      releaseAudioResources();
+      return -1;
+    }
+
+    // It can happen that an AudioTrack is created but it was not successfully
+    // initialized upon creation. Seems to be the case e.g. when the maximum
+    // number of globally available audio tracks is exceeded.
+    if (audioTrack == null || audioTrack.getState() != AudioTrack.STATE_INITIALIZED) {
+      reportWebRtcAudioTrackInitError("Initialization of audio track failed.");
+      releaseAudioResources();
+      return -1;
+    }
+    logMainParameters();
+    logMainParametersExtended();
+    return minBufferSizeInBytes;
+  }
+
+  private boolean startPlayout() {
+    threadChecker.checkIsOnValidThread();
+    Logging.d(TAG, "startPlayout");
+    assertTrue(audioTrack != null);
+    assertTrue(audioThread == null);
+
+    // Starts playing an audio track.
+    try {
+      audioTrack.play();
+    } catch (IllegalStateException e) {
+      reportWebRtcAudioTrackStartError(AudioTrackStartErrorCode.AUDIO_TRACK_START_EXCEPTION,
+          "AudioTrack.play failed: " + e.getMessage());
+      releaseAudioResources();
+      return false;
+    }
+    if (audioTrack.getPlayState() != AudioTrack.PLAYSTATE_PLAYING) {
+      reportWebRtcAudioTrackStartError(
+          AudioTrackStartErrorCode.AUDIO_TRACK_START_STATE_MISMATCH,
+          "AudioTrack.play failed - incorrect state :"
+          + audioTrack.getPlayState());
+      releaseAudioResources();
+      return false;
+    }
+
+    // Create and start new high-priority thread which calls AudioTrack.write()
+    // and where we also call the native nativeGetPlayoutData() callback to
+    // request decoded audio from WebRTC.
+    audioThread = new AudioTrackThread("AudioTrackJavaThread");
+    audioThread.start();
+    return true;
+  }
+
+  private boolean stopPlayout() {
+    threadChecker.checkIsOnValidThread();
+    Logging.d(TAG, "stopPlayout");
+    assertTrue(audioThread != null);
+    logUnderrunCount();
+    audioThread.stopThread();
+
+    Logging.d(TAG, "Stopping the AudioTrackThread...");
+    audioThread.interrupt();
+    if (!ThreadUtils.joinUninterruptibly(audioThread, AUDIO_TRACK_THREAD_JOIN_TIMEOUT_MS)) {
+      Logging.e(TAG, "Join of AudioTrackThread timed out.");
+      WebRtcAudioUtils.logAudioState(TAG);
+    }
+    Logging.d(TAG, "AudioTrackThread has now been stopped.");
+    audioThread = null;
+    releaseAudioResources();
+    return true;
+  }
+
+  // Get max possible volume index for a phone call audio stream.
+  private int getStreamMaxVolume() {
+    threadChecker.checkIsOnValidThread();
+    Logging.d(TAG, "getStreamMaxVolume");
+    assertTrue(audioManager != null);
+    return audioManager.getStreamMaxVolume(AudioManager.STREAM_VOICE_CALL);
+  }
+
+  // Set current volume level for a phone call audio stream.
+  private boolean setStreamVolume(int volume) {
+    threadChecker.checkIsOnValidThread();
+    Logging.d(TAG, "setStreamVolume(" + volume + ")");
+    assertTrue(audioManager != null);
+    if (audioManager.isVolumeFixed()) {
+      Logging.e(TAG, "The device implements a fixed volume policy.");
+      return false;
+    }
+    audioManager.setStreamVolume(AudioManager.STREAM_VOICE_CALL, volume, 0);
+    return true;
+  }
+
+  /** Get current volume level for a phone call audio stream. */
+  private int getStreamVolume() {
+    threadChecker.checkIsOnValidThread();
+    Logging.d(TAG, "getStreamVolume");
+    assertTrue(audioManager != null);
+    return audioManager.getStreamVolume(AudioManager.STREAM_VOICE_CALL);
+  }
+
+  private void logMainParameters() {
+    Logging.d(TAG, "AudioTrack: "
+            + "session ID: " + audioTrack.getAudioSessionId() + ", "
+            + "channels: " + audioTrack.getChannelCount() + ", "
+            + "sample rate: " + audioTrack.getSampleRate() + ", "
+            // Gain (>=1.0) expressed as linear multiplier on sample values.
+            + "max gain: " + AudioTrack.getMaxVolume());
+  }
+
+  // Creates and AudioTrack instance using AudioAttributes and AudioFormat as input.
+  // It allows certain platforms or routing policies to use this information for more
+  // refined volume or routing decisions.
+  private static AudioTrack createAudioTrack(
+      int sampleRateInHz, int channelConfig, int bufferSizeInBytes) {
+    Logging.d(TAG, "createAudioTrack");
+    // TODO(henrika): use setPerformanceMode(int) with PERFORMANCE_MODE_LOW_LATENCY to control
+    // performance when Android O is supported. Add some logging in the mean time.
+    final int nativeOutputSampleRate =
+        AudioTrack.getNativeOutputSampleRate(AudioManager.STREAM_VOICE_CALL);
+    Logging.d(TAG, "nativeOutputSampleRate: " + nativeOutputSampleRate);
+    if (sampleRateInHz != nativeOutputSampleRate) {
+      Logging.w(TAG, "Unable to use fast mode since requested sample rate is not native");
+    }
+    if (usageAttribute != DEFAULT_USAGE) {
+      Logging.w(TAG, "A non default usage attribute is used: " + usageAttribute);
+    }
+    // Create an audio track where the audio usage is for VoIP and the content type is speech.
+    return new AudioTrack(
+        new AudioAttributes.Builder()
+            .setUsage(usageAttribute)
+            .setContentType(AudioAttributes.CONTENT_TYPE_SPEECH)
+        .build(),
+        new AudioFormat.Builder()
+          .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
+          .setSampleRate(sampleRateInHz)
+          .setChannelMask(channelConfig)
+          .build(),
+        bufferSizeInBytes,
+        AudioTrack.MODE_STREAM,
+        AudioManager.AUDIO_SESSION_ID_GENERATE);
+  }
+
+  private void logBufferSizeInFrames() {
+    if (Build.VERSION.SDK_INT >= 23) {
+      Logging.d(TAG, "AudioTrack: "
+              // The effective size of the AudioTrack buffer that the app writes to.
+              + "buffer size in frames: " + audioTrack.getBufferSizeInFrames());
+    }
+  }
+
+  private int getBufferSizeInFrames() {
+    if (Build.VERSION.SDK_INT >= 23) {
+      return audioTrack.getBufferSizeInFrames();
+    }
+    return -1;
+  }
+
+  private void logBufferCapacityInFrames() {
+    if (Build.VERSION.SDK_INT >= 24) {
+      Logging.d(TAG,
+          "AudioTrack: "
+              // Maximum size of the AudioTrack buffer in frames.
+              + "buffer capacity in frames: " + audioTrack.getBufferCapacityInFrames());
+    }
+  }
+
+  private void logMainParametersExtended() {
+    logBufferSizeInFrames();
+    logBufferCapacityInFrames();
+  }
+
+  // Prints the number of underrun occurrences in the application-level write
+  // buffer since the AudioTrack was created. An underrun occurs if the app does
+  // not write audio data quickly enough, causing the buffer to underflow and a
+  // potential audio glitch.
+  // TODO(henrika): keep track of this value in the field and possibly add new
+  // UMA stat if needed.
+  private void logUnderrunCount() {
+    if (Build.VERSION.SDK_INT >= 24) {
+      Logging.d(TAG, "underrun count: " + audioTrack.getUnderrunCount());
+    }
+  }
+
+  // Helper method which throws an exception  when an assertion has failed.
+  private static void assertTrue(boolean condition) {
+    if (!condition) {
+      throw new AssertionError("Expected condition to be true");
+    }
+  }
+
+  private int channelCountToConfiguration(int channels) {
+    return (channels == 1 ? AudioFormat.CHANNEL_OUT_MONO : AudioFormat.CHANNEL_OUT_STEREO);
+  }
+
+  private native void nativeCacheDirectBufferAddress(ByteBuffer byteBuffer, long nativeAudioRecord);
+
+  private native void nativeGetPlayoutData(int bytes, long nativeAudioRecord);
+
+  // Sets all samples to be played out to zero if `mute` is true, i.e.,
+  // ensures that the speaker is muted.
+  public static void setSpeakerMute(boolean mute) {
+    Logging.w(TAG, "setSpeakerMute(" + mute + ")");
+    speakerMute = mute;
+  }
+
+  // Releases the native AudioTrack resources.
+  private void releaseAudioResources() {
+    Logging.d(TAG, "releaseAudioResources");
+    if (audioTrack != null) {
+      audioTrack.release();
+      audioTrack = null;
+    }
+  }
+
+  private void reportWebRtcAudioTrackInitError(String errorMessage) {
+    Logging.e(TAG, "Init playout error: " + errorMessage);
+    WebRtcAudioUtils.logAudioState(TAG);
+    if (errorCallbackOld != null) {
+      errorCallbackOld.onWebRtcAudioTrackInitError(errorMessage);
+    }
+    if (errorCallback != null) {
+      errorCallback.onWebRtcAudioTrackInitError(errorMessage);
+    }
+  }
+
+  private void reportWebRtcAudioTrackStartError(
+      AudioTrackStartErrorCode errorCode, String errorMessage) {
+    Logging.e(TAG, "Start playout error: "  + errorCode + ". " + errorMessage);
+    WebRtcAudioUtils.logAudioState(TAG);
+    if (errorCallbackOld != null) {
+      errorCallbackOld.onWebRtcAudioTrackStartError(errorMessage);
+    }
+    if (errorCallback != null) {
+      errorCallback.onWebRtcAudioTrackStartError(errorCode, errorMessage);
+    }
+  }
+
+  private void reportWebRtcAudioTrackError(String errorMessage) {
+    Logging.e(TAG, "Run-time playback error: " + errorMessage);
+    WebRtcAudioUtils.logAudioState(TAG);
+    if (errorCallbackOld != null) {
+      errorCallbackOld.onWebRtcAudioTrackError(errorMessage);
+    }
+    if (errorCallback != null) {
+      errorCallback.onWebRtcAudioTrackError(errorMessage);
+    }
+  }
+}
diff --git a/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java b/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java
new file mode 100644
index 0000000..0472114
--- /dev/null
+++ b/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java
@@ -0,0 +1,377 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc.voiceengine;
+
+import static android.media.AudioManager.MODE_IN_CALL;
+import static android.media.AudioManager.MODE_IN_COMMUNICATION;
+import static android.media.AudioManager.MODE_NORMAL;
+import static android.media.AudioManager.MODE_RINGTONE;
+
+import android.content.Context;
+import android.content.pm.PackageManager;
+import android.media.AudioDeviceInfo;
+import android.media.AudioManager;
+import android.os.Build;
+import java.lang.Thread;
+import java.util.Arrays;
+import java.util.List;
+import org.webrtc.ContextUtils;
+import org.webrtc.Logging;
+
+public final class WebRtcAudioUtils {
+  private static final String TAG = "WebRtcAudioUtils";
+
+  // List of devices where we have seen issues (e.g. bad audio quality) using
+  // the low latency output mode in combination with OpenSL ES.
+  // The device name is given by Build.MODEL.
+  private static final String[] BLACKLISTED_OPEN_SL_ES_MODELS = new String[] {
+      // It is recommended to maintain a list of blacklisted models outside
+      // this package and instead call
+      // WebRtcAudioManager.setBlacklistDeviceForOpenSLESUsage(true)
+      // from the client for devices where OpenSL ES shall be disabled.
+  };
+
+  // List of devices where it has been verified that the built-in effect
+  // bad and where it makes sense to avoid using it and instead rely on the
+  // native WebRTC version instead. The device name is given by Build.MODEL.
+  private static final String[] BLACKLISTED_AEC_MODELS = new String[] {
+      // It is recommended to maintain a list of blacklisted models outside
+      // this package and instead call setWebRtcBasedAcousticEchoCanceler(true)
+      // from the client for devices where the built-in AEC shall be disabled.
+  };
+  private static final String[] BLACKLISTED_NS_MODELS = new String[] {
+    // It is recommended to maintain a list of blacklisted models outside
+    // this package and instead call setWebRtcBasedNoiseSuppressor(true)
+    // from the client for devices where the built-in NS shall be disabled.
+  };
+
+  // Use 16kHz as the default sample rate. A higher sample rate might prevent
+  // us from supporting communication mode on some older (e.g. ICS) devices.
+  private static final int DEFAULT_SAMPLE_RATE_HZ = 16000;
+  private static int defaultSampleRateHz = DEFAULT_SAMPLE_RATE_HZ;
+  // Set to true if setDefaultSampleRateHz() has been called.
+  private static boolean isDefaultSampleRateOverridden;
+
+  // By default, utilize hardware based audio effects for AEC and NS when
+  // available.
+  private static boolean useWebRtcBasedAcousticEchoCanceler;
+  private static boolean useWebRtcBasedNoiseSuppressor;
+
+  // Call these methods if any hardware based effect shall be replaced by a
+  // software based version provided by the WebRTC stack instead.
+  // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+  @SuppressWarnings("NoSynchronizedMethodCheck")
+  public static synchronized void setWebRtcBasedAcousticEchoCanceler(boolean enable) {
+    useWebRtcBasedAcousticEchoCanceler = enable;
+  }
+
+    // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+  @SuppressWarnings("NoSynchronizedMethodCheck")
+  public static synchronized void setWebRtcBasedNoiseSuppressor(boolean enable) {
+    useWebRtcBasedNoiseSuppressor = enable;
+  }
+
+  // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+  @SuppressWarnings("NoSynchronizedMethodCheck")
+  public static synchronized void setWebRtcBasedAutomaticGainControl(boolean enable) {
+    // TODO(henrika): deprecated; remove when no longer used by any client.
+    Logging.w(TAG, "setWebRtcBasedAutomaticGainControl() is deprecated");
+  }
+
+  // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+  @SuppressWarnings("NoSynchronizedMethodCheck")
+  public static synchronized boolean useWebRtcBasedAcousticEchoCanceler() {
+    if (useWebRtcBasedAcousticEchoCanceler) {
+      Logging.w(TAG, "Overriding default behavior; now using WebRTC AEC!");
+    }
+    return useWebRtcBasedAcousticEchoCanceler;
+  }
+
+  // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+  @SuppressWarnings("NoSynchronizedMethodCheck")
+  public static synchronized boolean useWebRtcBasedNoiseSuppressor() {
+    if (useWebRtcBasedNoiseSuppressor) {
+      Logging.w(TAG, "Overriding default behavior; now using WebRTC NS!");
+    }
+    return useWebRtcBasedNoiseSuppressor;
+  }
+
+  // TODO(henrika): deprecated; remove when no longer used by any client.
+  // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+  @SuppressWarnings("NoSynchronizedMethodCheck")
+  public static synchronized boolean useWebRtcBasedAutomaticGainControl() {
+    // Always return true here to avoid trying to use any built-in AGC.
+    return true;
+  }
+
+  // Returns true if the device supports an audio effect (AEC or NS).
+  // Four conditions must be fulfilled if functions are to return true:
+  // 1) the platform must support the built-in (HW) effect,
+  // 2) explicit use (override) of a WebRTC based version must not be set,
+  // 3) the device must not be blacklisted for use of the effect, and
+  // 4) the UUID of the effect must be approved (some UUIDs can be excluded).
+  public static boolean isAcousticEchoCancelerSupported() {
+    return WebRtcAudioEffects.canUseAcousticEchoCanceler();
+  }
+  public static boolean isNoiseSuppressorSupported() {
+    return WebRtcAudioEffects.canUseNoiseSuppressor();
+  }
+  // TODO(henrika): deprecated; remove when no longer used by any client.
+  public static boolean isAutomaticGainControlSupported() {
+    // Always return false here to avoid trying to use any built-in AGC.
+    return false;
+  }
+
+  // Call this method if the default handling of querying the native sample
+  // rate shall be overridden. Can be useful on some devices where the
+  // available Android APIs are known to return invalid results.
+  // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+  @SuppressWarnings("NoSynchronizedMethodCheck")
+  public static synchronized void setDefaultSampleRateHz(int sampleRateHz) {
+    isDefaultSampleRateOverridden = true;
+    defaultSampleRateHz = sampleRateHz;
+  }
+
+  // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+  @SuppressWarnings("NoSynchronizedMethodCheck")
+  public static synchronized boolean isDefaultSampleRateOverridden() {
+    return isDefaultSampleRateOverridden;
+  }
+
+  // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+  @SuppressWarnings("NoSynchronizedMethodCheck")
+  public static synchronized int getDefaultSampleRateHz() {
+    return defaultSampleRateHz;
+  }
+
+  public static List<String> getBlackListedModelsForAecUsage() {
+    return Arrays.asList(WebRtcAudioUtils.BLACKLISTED_AEC_MODELS);
+  }
+
+  public static List<String> getBlackListedModelsForNsUsage() {
+    return Arrays.asList(WebRtcAudioUtils.BLACKLISTED_NS_MODELS);
+  }
+
+  // Helper method for building a string of thread information.
+  public static String getThreadInfo() {
+    return "@[name=" + Thread.currentThread().getName() + ", id=" + Thread.currentThread().getId()
+        + "]";
+  }
+
+  // Returns true if we're running on emulator.
+  public static boolean runningOnEmulator() {
+    return Build.HARDWARE.equals("goldfish") && Build.BRAND.startsWith("generic_");
+  }
+
+  // Returns true if the device is blacklisted for OpenSL ES usage.
+  public static boolean deviceIsBlacklistedForOpenSLESUsage() {
+    List<String> blackListedModels = Arrays.asList(BLACKLISTED_OPEN_SL_ES_MODELS);
+    return blackListedModels.contains(Build.MODEL);
+  }
+
+  // Information about the current build, taken from system properties.
+  static void logDeviceInfo(String tag) {
+    Logging.d(tag, "Android SDK: " + Build.VERSION.SDK_INT + ", "
+            + "Release: " + Build.VERSION.RELEASE + ", "
+            + "Brand: " + Build.BRAND + ", "
+            + "Device: " + Build.DEVICE + ", "
+            + "Id: " + Build.ID + ", "
+            + "Hardware: " + Build.HARDWARE + ", "
+            + "Manufacturer: " + Build.MANUFACTURER + ", "
+            + "Model: " + Build.MODEL + ", "
+            + "Product: " + Build.PRODUCT);
+  }
+
+  // Logs information about the current audio state. The idea is to call this
+  // method when errors are detected to log under what conditions the error
+  // occurred. Hopefully it will provide clues to what might be the root cause.
+  static void logAudioState(String tag) {
+    logDeviceInfo(tag);
+    final Context context = ContextUtils.getApplicationContext();
+    final AudioManager audioManager =
+        (AudioManager) context.getSystemService(Context.AUDIO_SERVICE);
+    logAudioStateBasic(tag, audioManager);
+    logAudioStateVolume(tag, audioManager);
+    logAudioDeviceInfo(tag, audioManager);
+  }
+
+  // Reports basic audio statistics.
+  private static void logAudioStateBasic(String tag, AudioManager audioManager) {
+    Logging.d(tag, "Audio State: "
+            + "audio mode: " + modeToString(audioManager.getMode()) + ", "
+            + "has mic: " + hasMicrophone() + ", "
+            + "mic muted: " + audioManager.isMicrophoneMute() + ", "
+            + "music active: " + audioManager.isMusicActive() + ", "
+            + "speakerphone: " + audioManager.isSpeakerphoneOn() + ", "
+            + "BT SCO: " + audioManager.isBluetoothScoOn());
+  }
+
+  // Adds volume information for all possible stream types.
+  private static void logAudioStateVolume(String tag, AudioManager audioManager) {
+    final int[] streams = {
+        AudioManager.STREAM_VOICE_CALL,
+        AudioManager.STREAM_MUSIC,
+        AudioManager.STREAM_RING,
+        AudioManager.STREAM_ALARM,
+        AudioManager.STREAM_NOTIFICATION,
+        AudioManager.STREAM_SYSTEM
+    };
+    Logging.d(tag, "Audio State: ");
+    // Some devices may not have volume controls and might use a fixed volume.
+    boolean fixedVolume = audioManager.isVolumeFixed();
+    Logging.d(tag, "  fixed volume=" + fixedVolume);
+    if (!fixedVolume) {
+      for (int stream : streams) {
+        StringBuilder info = new StringBuilder();
+        info.append("  " + streamTypeToString(stream) + ": ");
+        info.append("volume=").append(audioManager.getStreamVolume(stream));
+        info.append(", max=").append(audioManager.getStreamMaxVolume(stream));
+        logIsStreamMute(tag, audioManager, stream, info);
+        Logging.d(tag, info.toString());
+      }
+    }
+  }
+
+  private static void logIsStreamMute(
+      String tag, AudioManager audioManager, int stream, StringBuilder info) {
+    if (Build.VERSION.SDK_INT >= 23) {
+      info.append(", muted=").append(audioManager.isStreamMute(stream));
+    }
+  }
+
+  private static void logAudioDeviceInfo(String tag, AudioManager audioManager) {
+    if (Build.VERSION.SDK_INT < 23) {
+      return;
+    }
+    final AudioDeviceInfo[] devices =
+        audioManager.getDevices(AudioManager.GET_DEVICES_ALL);
+    if (devices.length == 0) {
+      return;
+    }
+    Logging.d(tag, "Audio Devices: ");
+    for (AudioDeviceInfo device : devices) {
+      StringBuilder info = new StringBuilder();
+      info.append("  ").append(deviceTypeToString(device.getType()));
+      info.append(device.isSource() ? "(in): " : "(out): ");
+      // An empty array indicates that the device supports arbitrary channel counts.
+      if (device.getChannelCounts().length > 0) {
+        info.append("channels=").append(Arrays.toString(device.getChannelCounts()));
+        info.append(", ");
+      }
+      if (device.getEncodings().length > 0) {
+        // Examples: ENCODING_PCM_16BIT = 2, ENCODING_PCM_FLOAT = 4.
+        info.append("encodings=").append(Arrays.toString(device.getEncodings()));
+        info.append(", ");
+      }
+      if (device.getSampleRates().length > 0) {
+        info.append("sample rates=").append(Arrays.toString(device.getSampleRates()));
+        info.append(", ");
+      }
+      info.append("id=").append(device.getId());
+      Logging.d(tag, info.toString());
+    }
+  }
+
+  // Converts media.AudioManager modes into local string representation.
+  static String modeToString(int mode) {
+    switch (mode) {
+      case MODE_IN_CALL:
+        return "MODE_IN_CALL";
+      case MODE_IN_COMMUNICATION:
+        return "MODE_IN_COMMUNICATION";
+      case MODE_NORMAL:
+        return "MODE_NORMAL";
+      case MODE_RINGTONE:
+        return "MODE_RINGTONE";
+      default:
+        return "MODE_INVALID";
+    }
+  }
+
+  private static String streamTypeToString(int stream) {
+    switch(stream) {
+      case AudioManager.STREAM_VOICE_CALL:
+        return "STREAM_VOICE_CALL";
+      case AudioManager.STREAM_MUSIC:
+        return "STREAM_MUSIC";
+      case AudioManager.STREAM_RING:
+        return "STREAM_RING";
+      case AudioManager.STREAM_ALARM:
+        return "STREAM_ALARM";
+      case AudioManager.STREAM_NOTIFICATION:
+        return "STREAM_NOTIFICATION";
+      case AudioManager.STREAM_SYSTEM:
+        return "STREAM_SYSTEM";
+      default:
+        return "STREAM_INVALID";
+    }
+  }
+
+  // Converts AudioDeviceInfo types to local string representation.
+  private static String deviceTypeToString(int type) {
+    switch (type) {
+      case AudioDeviceInfo.TYPE_UNKNOWN:
+        return "TYPE_UNKNOWN";
+      case AudioDeviceInfo.TYPE_BUILTIN_EARPIECE:
+        return "TYPE_BUILTIN_EARPIECE";
+      case AudioDeviceInfo.TYPE_BUILTIN_SPEAKER:
+        return "TYPE_BUILTIN_SPEAKER";
+      case AudioDeviceInfo.TYPE_WIRED_HEADSET:
+        return "TYPE_WIRED_HEADSET";
+      case AudioDeviceInfo.TYPE_WIRED_HEADPHONES:
+        return "TYPE_WIRED_HEADPHONES";
+      case AudioDeviceInfo.TYPE_LINE_ANALOG:
+        return "TYPE_LINE_ANALOG";
+      case AudioDeviceInfo.TYPE_LINE_DIGITAL:
+        return "TYPE_LINE_DIGITAL";
+      case AudioDeviceInfo.TYPE_BLUETOOTH_SCO:
+        return "TYPE_BLUETOOTH_SCO";
+      case AudioDeviceInfo.TYPE_BLUETOOTH_A2DP:
+        return "TYPE_BLUETOOTH_A2DP";
+      case AudioDeviceInfo.TYPE_HDMI:
+        return "TYPE_HDMI";
+      case AudioDeviceInfo.TYPE_HDMI_ARC:
+        return "TYPE_HDMI_ARC";
+      case AudioDeviceInfo.TYPE_USB_DEVICE:
+        return "TYPE_USB_DEVICE";
+      case AudioDeviceInfo.TYPE_USB_ACCESSORY:
+        return "TYPE_USB_ACCESSORY";
+      case AudioDeviceInfo.TYPE_DOCK:
+        return "TYPE_DOCK";
+      case AudioDeviceInfo.TYPE_FM:
+        return "TYPE_FM";
+      case AudioDeviceInfo.TYPE_BUILTIN_MIC:
+        return "TYPE_BUILTIN_MIC";
+      case AudioDeviceInfo.TYPE_FM_TUNER:
+        return "TYPE_FM_TUNER";
+      case AudioDeviceInfo.TYPE_TV_TUNER:
+        return "TYPE_TV_TUNER";
+      case AudioDeviceInfo.TYPE_TELEPHONY:
+        return "TYPE_TELEPHONY";
+      case AudioDeviceInfo.TYPE_AUX_LINE:
+        return "TYPE_AUX_LINE";
+      case AudioDeviceInfo.TYPE_IP:
+        return "TYPE_IP";
+      case AudioDeviceInfo.TYPE_BUS:
+        return "TYPE_BUS";
+      case AudioDeviceInfo.TYPE_USB_HEADSET:
+        return "TYPE_USB_HEADSET";
+      default:
+        return "TYPE_UNKNOWN";
+    }
+  }
+
+  // Returns true if the device can record audio via a microphone.
+  private static boolean hasMicrophone() {
+    return ContextUtils.getApplicationContext().getPackageManager().hasSystemFeature(
+        PackageManager.FEATURE_MICROPHONE);
+  }
+}
diff --git a/modules/audio_device/android/opensles_common.cc b/modules/audio_device/android/opensles_common.cc
new file mode 100644
index 0000000..019714d
--- /dev/null
+++ b/modules/audio_device/android/opensles_common.cc
@@ -0,0 +1,103 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/android/opensles_common.h"
+
+#include <SLES/OpenSLES.h>
+
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+// Returns a string representation given an integer SL_RESULT_XXX code.
+// The mapping can be found in <SLES/OpenSLES.h>.
+const char* GetSLErrorString(size_t code) {
+  static const char* sl_error_strings[] = {
+      "SL_RESULT_SUCCESS",                 // 0
+      "SL_RESULT_PRECONDITIONS_VIOLATED",  // 1
+      "SL_RESULT_PARAMETER_INVALID",       // 2
+      "SL_RESULT_MEMORY_FAILURE",          // 3
+      "SL_RESULT_RESOURCE_ERROR",          // 4
+      "SL_RESULT_RESOURCE_LOST",           // 5
+      "SL_RESULT_IO_ERROR",                // 6
+      "SL_RESULT_BUFFER_INSUFFICIENT",     // 7
+      "SL_RESULT_CONTENT_CORRUPTED",       // 8
+      "SL_RESULT_CONTENT_UNSUPPORTED",     // 9
+      "SL_RESULT_CONTENT_NOT_FOUND",       // 10
+      "SL_RESULT_PERMISSION_DENIED",       // 11
+      "SL_RESULT_FEATURE_UNSUPPORTED",     // 12
+      "SL_RESULT_INTERNAL_ERROR",          // 13
+      "SL_RESULT_UNKNOWN_ERROR",           // 14
+      "SL_RESULT_OPERATION_ABORTED",       // 15
+      "SL_RESULT_CONTROL_LOST",            // 16
+  };
+
+  if (code >= arraysize(sl_error_strings)) {
+    return "SL_RESULT_UNKNOWN_ERROR";
+  }
+  return sl_error_strings[code];
+}
+
+SLDataFormat_PCM CreatePCMConfiguration(size_t channels,
+                                        int sample_rate,
+                                        size_t bits_per_sample) {
+  RTC_CHECK_EQ(bits_per_sample, SL_PCMSAMPLEFORMAT_FIXED_16);
+  SLDataFormat_PCM format;
+  format.formatType = SL_DATAFORMAT_PCM;
+  format.numChannels = static_cast<SLuint32>(channels);
+  // Note that, the unit of sample rate is actually in milliHertz and not Hertz.
+  switch (sample_rate) {
+    case 8000:
+      format.samplesPerSec = SL_SAMPLINGRATE_8;
+      break;
+    case 16000:
+      format.samplesPerSec = SL_SAMPLINGRATE_16;
+      break;
+    case 22050:
+      format.samplesPerSec = SL_SAMPLINGRATE_22_05;
+      break;
+    case 32000:
+      format.samplesPerSec = SL_SAMPLINGRATE_32;
+      break;
+    case 44100:
+      format.samplesPerSec = SL_SAMPLINGRATE_44_1;
+      break;
+    case 48000:
+      format.samplesPerSec = SL_SAMPLINGRATE_48;
+      break;
+    case 64000:
+      format.samplesPerSec = SL_SAMPLINGRATE_64;
+      break;
+    case 88200:
+      format.samplesPerSec = SL_SAMPLINGRATE_88_2;
+      break;
+    case 96000:
+      format.samplesPerSec = SL_SAMPLINGRATE_96;
+      break;
+    default:
+      RTC_CHECK(false) << "Unsupported sample rate: " << sample_rate;
+      break;
+  }
+  format.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16;
+  format.containerSize = SL_PCMSAMPLEFORMAT_FIXED_16;
+  format.endianness = SL_BYTEORDER_LITTLEENDIAN;
+  if (format.numChannels == 1) {
+    format.channelMask = SL_SPEAKER_FRONT_CENTER;
+  } else if (format.numChannels == 2) {
+    format.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
+  } else {
+    RTC_CHECK(false) << "Unsupported number of channels: "
+                     << format.numChannels;
+  }
+  return format;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_device/android/opensles_common.h b/modules/audio_device/android/opensles_common.h
new file mode 100644
index 0000000..438c522
--- /dev/null
+++ b/modules/audio_device/android/opensles_common.h
@@ -0,0 +1,62 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_COMMON_H_
+#define MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_COMMON_H_
+
+#include <SLES/OpenSLES.h>
+#include <stddef.h>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+// Returns a string representation given an integer SL_RESULT_XXX code.
+// The mapping can be found in <SLES/OpenSLES.h>.
+const char* GetSLErrorString(size_t code);
+
+// Configures an SL_DATAFORMAT_PCM structure based on native audio parameters.
+SLDataFormat_PCM CreatePCMConfiguration(size_t channels,
+                                        int sample_rate,
+                                        size_t bits_per_sample);
+
+// Helper class for using SLObjectItf interfaces.
+template <typename SLType, typename SLDerefType>
+class ScopedSLObject {
+ public:
+  ScopedSLObject() : obj_(nullptr) {}
+
+  ~ScopedSLObject() { Reset(); }
+
+  SLType* Receive() {
+    RTC_DCHECK(!obj_);
+    return &obj_;
+  }
+
+  SLDerefType operator->() { return *obj_; }
+
+  SLType Get() const { return obj_; }
+
+  void Reset() {
+    if (obj_) {
+      (*obj_)->Destroy(obj_);
+      obj_ = nullptr;
+    }
+  }
+
+ private:
+  SLType obj_;
+};
+
+typedef ScopedSLObject<SLObjectItf, const SLObjectItf_*> ScopedSLObjectItf;
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_COMMON_H_
diff --git a/modules/audio_device/android/opensles_player.cc b/modules/audio_device/android/opensles_player.cc
new file mode 100644
index 0000000..f2b3a37
--- /dev/null
+++ b/modules/audio_device/android/opensles_player.cc
@@ -0,0 +1,434 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/android/opensles_player.h"
+
+#include <android/log.h>
+
+#include <memory>
+
+#include "api/array_view.h"
+#include "modules/audio_device/android/audio_common.h"
+#include "modules/audio_device/android/audio_manager.h"
+#include "modules/audio_device/fine_audio_buffer.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/time_utils.h"
+
+#define TAG "OpenSLESPlayer"
+#define ALOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__)
+#define ALOGD(...) __android_log_print(ANDROID_LOG_DEBUG, TAG, __VA_ARGS__)
+#define ALOGE(...) __android_log_print(ANDROID_LOG_ERROR, TAG, __VA_ARGS__)
+#define ALOGW(...) __android_log_print(ANDROID_LOG_WARN, TAG, __VA_ARGS__)
+#define ALOGI(...) __android_log_print(ANDROID_LOG_INFO, TAG, __VA_ARGS__)
+
+#define RETURN_ON_ERROR(op, ...)                          \
+  do {                                                    \
+    SLresult err = (op);                                  \
+    if (err != SL_RESULT_SUCCESS) {                       \
+      ALOGE("%s failed: %s", #op, GetSLErrorString(err)); \
+      return __VA_ARGS__;                                 \
+    }                                                     \
+  } while (0)
+
+namespace webrtc {
+
+OpenSLESPlayer::OpenSLESPlayer(AudioManager* audio_manager)
+    : audio_manager_(audio_manager),
+      audio_parameters_(audio_manager->GetPlayoutAudioParameters()),
+      audio_device_buffer_(nullptr),
+      initialized_(false),
+      playing_(false),
+      buffer_index_(0),
+      engine_(nullptr),
+      player_(nullptr),
+      simple_buffer_queue_(nullptr),
+      volume_(nullptr),
+      last_play_time_(0) {
+  ALOGD("ctor[tid=%d]", rtc::CurrentThreadId());
+  // Use native audio output parameters provided by the audio manager and
+  // define the PCM format structure.
+  pcm_format_ = CreatePCMConfiguration(audio_parameters_.channels(),
+                                       audio_parameters_.sample_rate(),
+                                       audio_parameters_.bits_per_sample());
+  // Detach from this thread since we want to use the checker to verify calls
+  // from the internal  audio thread.
+  thread_checker_opensles_.Detach();
+}
+
+OpenSLESPlayer::~OpenSLESPlayer() {
+  ALOGD("dtor[tid=%d]", rtc::CurrentThreadId());
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  Terminate();
+  DestroyAudioPlayer();
+  DestroyMix();
+  engine_ = nullptr;
+  RTC_DCHECK(!engine_);
+  RTC_DCHECK(!output_mix_.Get());
+  RTC_DCHECK(!player_);
+  RTC_DCHECK(!simple_buffer_queue_);
+  RTC_DCHECK(!volume_);
+}
+
+int OpenSLESPlayer::Init() {
+  ALOGD("Init[tid=%d]", rtc::CurrentThreadId());
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  if (audio_parameters_.channels() == 2) {
+    ALOGW("Stereo mode is enabled");
+  }
+  return 0;
+}
+
+int OpenSLESPlayer::Terminate() {
+  ALOGD("Terminate[tid=%d]", rtc::CurrentThreadId());
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  StopPlayout();
+  return 0;
+}
+
+int OpenSLESPlayer::InitPlayout() {
+  ALOGD("InitPlayout[tid=%d]", rtc::CurrentThreadId());
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  RTC_DCHECK(!initialized_);
+  RTC_DCHECK(!playing_);
+  if (!ObtainEngineInterface()) {
+    ALOGE("Failed to obtain SL Engine interface");
+    return -1;
+  }
+  CreateMix();
+  initialized_ = true;
+  buffer_index_ = 0;
+  return 0;
+}
+
+int OpenSLESPlayer::StartPlayout() {
+  ALOGD("StartPlayout[tid=%d]", rtc::CurrentThreadId());
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  RTC_DCHECK(initialized_);
+  RTC_DCHECK(!playing_);
+  if (fine_audio_buffer_) {
+    fine_audio_buffer_->ResetPlayout();
+  }
+  // The number of lower latency audio players is limited, hence we create the
+  // audio player in Start() and destroy it in Stop().
+  CreateAudioPlayer();
+  // Fill up audio buffers to avoid initial glitch and to ensure that playback
+  // starts when mode is later changed to SL_PLAYSTATE_PLAYING.
+  // TODO(henrika): we can save some delay by only making one call to
+  // EnqueuePlayoutData. Most likely not worth the risk of adding a glitch.
+  last_play_time_ = rtc::Time();
+  for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
+    EnqueuePlayoutData(true);
+  }
+  // Start streaming data by setting the play state to SL_PLAYSTATE_PLAYING.
+  // For a player object, when the object is in the SL_PLAYSTATE_PLAYING
+  // state, adding buffers will implicitly start playback.
+  RETURN_ON_ERROR((*player_)->SetPlayState(player_, SL_PLAYSTATE_PLAYING), -1);
+  playing_ = (GetPlayState() == SL_PLAYSTATE_PLAYING);
+  RTC_DCHECK(playing_);
+  return 0;
+}
+
+int OpenSLESPlayer::StopPlayout() {
+  ALOGD("StopPlayout[tid=%d]", rtc::CurrentThreadId());
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  if (!initialized_ || !playing_) {
+    return 0;
+  }
+  // Stop playing by setting the play state to SL_PLAYSTATE_STOPPED.
+  RETURN_ON_ERROR((*player_)->SetPlayState(player_, SL_PLAYSTATE_STOPPED), -1);
+  // Clear the buffer queue to flush out any remaining data.
+  RETURN_ON_ERROR((*simple_buffer_queue_)->Clear(simple_buffer_queue_), -1);
+#if RTC_DCHECK_IS_ON
+  // Verify that the buffer queue is in fact cleared as it should.
+  SLAndroidSimpleBufferQueueState buffer_queue_state;
+  (*simple_buffer_queue_)->GetState(simple_buffer_queue_, &buffer_queue_state);
+  RTC_DCHECK_EQ(0, buffer_queue_state.count);
+  RTC_DCHECK_EQ(0, buffer_queue_state.index);
+#endif
+  // The number of lower latency audio players is limited, hence we create the
+  // audio player in Start() and destroy it in Stop().
+  DestroyAudioPlayer();
+  thread_checker_opensles_.Detach();
+  initialized_ = false;
+  playing_ = false;
+  return 0;
+}
+
+int OpenSLESPlayer::SpeakerVolumeIsAvailable(bool& available) {
+  available = false;
+  return 0;
+}
+
+int OpenSLESPlayer::MaxSpeakerVolume(uint32_t& maxVolume) const {
+  return -1;
+}
+
+int OpenSLESPlayer::MinSpeakerVolume(uint32_t& minVolume) const {
+  return -1;
+}
+
+int OpenSLESPlayer::SetSpeakerVolume(uint32_t volume) {
+  return -1;
+}
+
+int OpenSLESPlayer::SpeakerVolume(uint32_t& volume) const {
+  return -1;
+}
+
+void OpenSLESPlayer::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+  ALOGD("AttachAudioBuffer");
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  audio_device_buffer_ = audioBuffer;
+  const int sample_rate_hz = audio_parameters_.sample_rate();
+  ALOGD("SetPlayoutSampleRate(%d)", sample_rate_hz);
+  audio_device_buffer_->SetPlayoutSampleRate(sample_rate_hz);
+  const size_t channels = audio_parameters_.channels();
+  ALOGD("SetPlayoutChannels(%zu)", channels);
+  audio_device_buffer_->SetPlayoutChannels(channels);
+  RTC_CHECK(audio_device_buffer_);
+  AllocateDataBuffers();
+}
+
+void OpenSLESPlayer::AllocateDataBuffers() {
+  ALOGD("AllocateDataBuffers");
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  RTC_DCHECK(!simple_buffer_queue_);
+  RTC_CHECK(audio_device_buffer_);
+  // Create a modified audio buffer class which allows us to ask for any number
+  // of samples (and not only multiple of 10ms) to match the native OpenSL ES
+  // buffer size. The native buffer size corresponds to the
+  // PROPERTY_OUTPUT_FRAMES_PER_BUFFER property which is the number of audio
+  // frames that the HAL (Hardware Abstraction Layer) buffer can hold. It is
+  // recommended to construct audio buffers so that they contain an exact
+  // multiple of this number. If so, callbacks will occur at regular intervals,
+  // which reduces jitter.
+  const size_t buffer_size_in_samples =
+      audio_parameters_.frames_per_buffer() * audio_parameters_.channels();
+  ALOGD("native buffer size: %zu", buffer_size_in_samples);
+  ALOGD("native buffer size in ms: %.2f",
+        audio_parameters_.GetBufferSizeInMilliseconds());
+  fine_audio_buffer_ = std::make_unique<FineAudioBuffer>(audio_device_buffer_);
+  // Allocated memory for audio buffers.
+  for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
+    audio_buffers_[i].reset(new SLint16[buffer_size_in_samples]);
+  }
+}
+
+bool OpenSLESPlayer::ObtainEngineInterface() {
+  ALOGD("ObtainEngineInterface");
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  if (engine_)
+    return true;
+  // Get access to (or create if not already existing) the global OpenSL Engine
+  // object.
+  SLObjectItf engine_object = audio_manager_->GetOpenSLEngine();
+  if (engine_object == nullptr) {
+    ALOGE("Failed to access the global OpenSL engine");
+    return false;
+  }
+  // Get the SL Engine Interface which is implicit.
+  RETURN_ON_ERROR(
+      (*engine_object)->GetInterface(engine_object, SL_IID_ENGINE, &engine_),
+      false);
+  return true;
+}
+
+bool OpenSLESPlayer::CreateMix() {
+  ALOGD("CreateMix");
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  RTC_DCHECK(engine_);
+  if (output_mix_.Get())
+    return true;
+
+  // Create the ouput mix on the engine object. No interfaces will be used.
+  RETURN_ON_ERROR((*engine_)->CreateOutputMix(engine_, output_mix_.Receive(), 0,
+                                              nullptr, nullptr),
+                  false);
+  RETURN_ON_ERROR(output_mix_->Realize(output_mix_.Get(), SL_BOOLEAN_FALSE),
+                  false);
+  return true;
+}
+
+void OpenSLESPlayer::DestroyMix() {
+  ALOGD("DestroyMix");
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  if (!output_mix_.Get())
+    return;
+  output_mix_.Reset();
+}
+
+bool OpenSLESPlayer::CreateAudioPlayer() {
+  ALOGD("CreateAudioPlayer");
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  RTC_DCHECK(output_mix_.Get());
+  if (player_object_.Get())
+    return true;
+  RTC_DCHECK(!player_);
+  RTC_DCHECK(!simple_buffer_queue_);
+  RTC_DCHECK(!volume_);
+
+  // source: Android Simple Buffer Queue Data Locator is source.
+  SLDataLocator_AndroidSimpleBufferQueue simple_buffer_queue = {
+      SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE,
+      static_cast<SLuint32>(kNumOfOpenSLESBuffers)};
+  SLDataSource audio_source = {&simple_buffer_queue, &pcm_format_};
+
+  // sink: OutputMix-based data is sink.
+  SLDataLocator_OutputMix locator_output_mix = {SL_DATALOCATOR_OUTPUTMIX,
+                                                output_mix_.Get()};
+  SLDataSink audio_sink = {&locator_output_mix, nullptr};
+
+  // Define interfaces that we indend to use and realize.
+  const SLInterfaceID interface_ids[] = {SL_IID_ANDROIDCONFIGURATION,
+                                         SL_IID_BUFFERQUEUE, SL_IID_VOLUME};
+  const SLboolean interface_required[] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE,
+                                          SL_BOOLEAN_TRUE};
+
+  // Create the audio player on the engine interface.
+  RETURN_ON_ERROR(
+      (*engine_)->CreateAudioPlayer(
+          engine_, player_object_.Receive(), &audio_source, &audio_sink,
+          arraysize(interface_ids), interface_ids, interface_required),
+      false);
+
+  // Use the Android configuration interface to set platform-specific
+  // parameters. Should be done before player is realized.
+  SLAndroidConfigurationItf player_config;
+  RETURN_ON_ERROR(
+      player_object_->GetInterface(player_object_.Get(),
+                                   SL_IID_ANDROIDCONFIGURATION, &player_config),
+      false);
+  // Set audio player configuration to SL_ANDROID_STREAM_VOICE which
+  // corresponds to android.media.AudioManager.STREAM_VOICE_CALL.
+  SLint32 stream_type = SL_ANDROID_STREAM_VOICE;
+  RETURN_ON_ERROR(
+      (*player_config)
+          ->SetConfiguration(player_config, SL_ANDROID_KEY_STREAM_TYPE,
+                             &stream_type, sizeof(SLint32)),
+      false);
+
+  // Realize the audio player object after configuration has been set.
+  RETURN_ON_ERROR(
+      player_object_->Realize(player_object_.Get(), SL_BOOLEAN_FALSE), false);
+
+  // Get the SLPlayItf interface on the audio player.
+  RETURN_ON_ERROR(
+      player_object_->GetInterface(player_object_.Get(), SL_IID_PLAY, &player_),
+      false);
+
+  // Get the SLAndroidSimpleBufferQueueItf interface on the audio player.
+  RETURN_ON_ERROR(
+      player_object_->GetInterface(player_object_.Get(), SL_IID_BUFFERQUEUE,
+                                   &simple_buffer_queue_),
+      false);
+
+  // Register callback method for the Android Simple Buffer Queue interface.
+  // This method will be called when the native audio layer needs audio data.
+  RETURN_ON_ERROR((*simple_buffer_queue_)
+                      ->RegisterCallback(simple_buffer_queue_,
+                                         SimpleBufferQueueCallback, this),
+                  false);
+
+  // Get the SLVolumeItf interface on the audio player.
+  RETURN_ON_ERROR(player_object_->GetInterface(player_object_.Get(),
+                                               SL_IID_VOLUME, &volume_),
+                  false);
+
+  // TODO(henrika): might not be required to set volume to max here since it
+  // seems to be default on most devices. Might be required for unit tests.
+  // RETURN_ON_ERROR((*volume_)->SetVolumeLevel(volume_, 0), false);
+
+  return true;
+}
+
+void OpenSLESPlayer::DestroyAudioPlayer() {
+  ALOGD("DestroyAudioPlayer");
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  if (!player_object_.Get())
+    return;
+  (*simple_buffer_queue_)
+      ->RegisterCallback(simple_buffer_queue_, nullptr, nullptr);
+  player_object_.Reset();
+  player_ = nullptr;
+  simple_buffer_queue_ = nullptr;
+  volume_ = nullptr;
+}
+
+// static
+void OpenSLESPlayer::SimpleBufferQueueCallback(
+    SLAndroidSimpleBufferQueueItf caller,
+    void* context) {
+  OpenSLESPlayer* stream = reinterpret_cast<OpenSLESPlayer*>(context);
+  stream->FillBufferQueue();
+}
+
+void OpenSLESPlayer::FillBufferQueue() {
+  RTC_DCHECK(thread_checker_opensles_.IsCurrent());
+  SLuint32 state = GetPlayState();
+  if (state != SL_PLAYSTATE_PLAYING) {
+    ALOGW("Buffer callback in non-playing state!");
+    return;
+  }
+  EnqueuePlayoutData(false);
+}
+
+void OpenSLESPlayer::EnqueuePlayoutData(bool silence) {
+  // Check delta time between two successive callbacks and provide a warning
+  // if it becomes very large.
+  // TODO(henrika): using 150ms as upper limit but this value is rather random.
+  const uint32_t current_time = rtc::Time();
+  const uint32_t diff = current_time - last_play_time_;
+  if (diff > 150) {
+    ALOGW("Bad OpenSL ES playout timing, dT=%u [ms]", diff);
+  }
+  last_play_time_ = current_time;
+  SLint8* audio_ptr8 =
+      reinterpret_cast<SLint8*>(audio_buffers_[buffer_index_].get());
+  if (silence) {
+    RTC_DCHECK(thread_checker_.IsCurrent());
+    // Avoid acquiring real audio data from WebRTC and fill the buffer with
+    // zeros instead. Used to prime the buffer with silence and to avoid asking
+    // for audio data from two different threads.
+    memset(audio_ptr8, 0, audio_parameters_.GetBytesPerBuffer());
+  } else {
+    RTC_DCHECK(thread_checker_opensles_.IsCurrent());
+    // Read audio data from the WebRTC source using the FineAudioBuffer object
+    // to adjust for differences in buffer size between WebRTC (10ms) and native
+    // OpenSL ES. Use hardcoded delay estimate since OpenSL ES does not support
+    // delay estimation.
+    fine_audio_buffer_->GetPlayoutData(
+        rtc::ArrayView<int16_t>(audio_buffers_[buffer_index_].get(),
+                                audio_parameters_.frames_per_buffer() *
+                                    audio_parameters_.channels()),
+        25);
+  }
+  // Enqueue the decoded audio buffer for playback.
+  SLresult err = (*simple_buffer_queue_)
+                     ->Enqueue(simple_buffer_queue_, audio_ptr8,
+                               audio_parameters_.GetBytesPerBuffer());
+  if (SL_RESULT_SUCCESS != err) {
+    ALOGE("Enqueue failed: %d", err);
+  }
+  buffer_index_ = (buffer_index_ + 1) % kNumOfOpenSLESBuffers;
+}
+
+SLuint32 OpenSLESPlayer::GetPlayState() const {
+  RTC_DCHECK(player_);
+  SLuint32 state;
+  SLresult err = (*player_)->GetPlayState(player_, &state);
+  if (SL_RESULT_SUCCESS != err) {
+    ALOGE("GetPlayState failed: %d", err);
+  }
+  return state;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_device/android/opensles_player.h b/modules/audio_device/android/opensles_player.h
new file mode 100644
index 0000000..41593a4
--- /dev/null
+++ b/modules/audio_device/android/opensles_player.h
@@ -0,0 +1,195 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_PLAYER_H_
+#define MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_PLAYER_H_
+
+#include <SLES/OpenSLES.h>
+#include <SLES/OpenSLES_Android.h>
+#include <SLES/OpenSLES_AndroidConfiguration.h>
+
+#include "api/sequence_checker.h"
+#include "modules/audio_device/android/audio_common.h"
+#include "modules/audio_device/android/audio_manager.h"
+#include "modules/audio_device/android/opensles_common.h"
+#include "modules/audio_device/audio_device_generic.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "modules/utility/include/helpers_android.h"
+
+namespace webrtc {
+
+class FineAudioBuffer;
+
+// Implements 16-bit mono PCM audio output support for Android using the
+// C based OpenSL ES API. No calls from C/C++ to Java using JNI is done.
+//
+// An instance must be created and destroyed on one and the same thread.
+// All public methods must also be called on the same thread. A thread checker
+// will RTC_DCHECK if any method is called on an invalid thread. Decoded audio
+// buffers are requested on a dedicated internal thread managed by the OpenSL
+// ES layer.
+//
+// The existing design forces the user to call InitPlayout() after Stoplayout()
+// to be able to call StartPlayout() again. This is inline with how the Java-
+// based implementation works.
+//
+// OpenSL ES is a native C API which have no Dalvik-related overhead such as
+// garbage collection pauses and it supports reduced audio output latency.
+// If the device doesn't claim this feature but supports API level 9 (Android
+// platform version 2.3) or later, then we can still use the OpenSL ES APIs but
+// the output latency may be higher.
+class OpenSLESPlayer {
+ public:
+  // Beginning with API level 17 (Android 4.2), a buffer count of 2 or more is
+  // required for lower latency. Beginning with API level 18 (Android 4.3), a
+  // buffer count of 1 is sufficient for lower latency. In addition, the buffer
+  // size and sample rate must be compatible with the device's native output
+  // configuration provided via the audio manager at construction.
+  // TODO(henrika): perhaps set this value dynamically based on OS version.
+  static const int kNumOfOpenSLESBuffers = 2;
+
+  explicit OpenSLESPlayer(AudioManager* audio_manager);
+  ~OpenSLESPlayer();
+
+  int Init();
+  int Terminate();
+
+  int InitPlayout();
+  bool PlayoutIsInitialized() const { return initialized_; }
+
+  int StartPlayout();
+  int StopPlayout();
+  bool Playing() const { return playing_; }
+
+  int SpeakerVolumeIsAvailable(bool& available);
+  int SetSpeakerVolume(uint32_t volume);
+  int SpeakerVolume(uint32_t& volume) const;
+  int MaxSpeakerVolume(uint32_t& maxVolume) const;
+  int MinSpeakerVolume(uint32_t& minVolume) const;
+
+  void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
+
+ private:
+  // These callback methods are called when data is required for playout.
+  // They are both called from an internal "OpenSL ES thread" which is not
+  // attached to the Dalvik VM.
+  static void SimpleBufferQueueCallback(SLAndroidSimpleBufferQueueItf caller,
+                                        void* context);
+  void FillBufferQueue();
+  // Reads audio data in PCM format using the AudioDeviceBuffer.
+  // Can be called both on the main thread (during Start()) and from the
+  // internal audio thread while output streaming is active.
+  // If the `silence` flag is set, the audio is filled with zeros instead of
+  // asking the WebRTC layer for real audio data. This procedure is also known
+  // as audio priming.
+  void EnqueuePlayoutData(bool silence);
+
+  // Allocate memory for audio buffers which will be used to render audio
+  // via the SLAndroidSimpleBufferQueueItf interface.
+  void AllocateDataBuffers();
+
+  // Obtaines the SL Engine Interface from the existing global Engine object.
+  // The interface exposes creation methods of all the OpenSL ES object types.
+  // This method defines the `engine_` member variable.
+  bool ObtainEngineInterface();
+
+  // Creates/destroys the output mix object.
+  bool CreateMix();
+  void DestroyMix();
+
+  // Creates/destroys the audio player and the simple-buffer object.
+  // Also creates the volume object.
+  bool CreateAudioPlayer();
+  void DestroyAudioPlayer();
+
+  SLuint32 GetPlayState() const;
+
+  // Ensures that methods are called from the same thread as this object is
+  // created on.
+  SequenceChecker thread_checker_;
+
+  // Stores thread ID in first call to SimpleBufferQueueCallback() from internal
+  // non-application thread which is not attached to the Dalvik JVM.
+  // Detached during construction of this object.
+  SequenceChecker thread_checker_opensles_;
+
+  // Raw pointer to the audio manager injected at construction. Used to cache
+  // audio parameters and to access the global SL engine object needed by the
+  // ObtainEngineInterface() method. The audio manager outlives any instance of
+  // this class.
+  AudioManager* audio_manager_;
+
+  // Contains audio parameters provided to this class at construction by the
+  // AudioManager.
+  const AudioParameters audio_parameters_;
+
+  // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
+  // AudioDeviceModuleImpl class and called by AudioDeviceModule::Create().
+  AudioDeviceBuffer* audio_device_buffer_;
+
+  bool initialized_;
+  bool playing_;
+
+  // PCM-type format definition.
+  // TODO(henrika): add support for SLAndroidDataFormat_PCM_EX (android-21) if
+  // 32-bit float representation is needed.
+  SLDataFormat_PCM pcm_format_;
+
+  // Queue of audio buffers to be used by the player object for rendering
+  // audio.
+  std::unique_ptr<SLint16[]> audio_buffers_[kNumOfOpenSLESBuffers];
+
+  // FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data
+  // in chunks of 10ms. It then allows for this data to be pulled in
+  // a finer or coarser granularity. I.e. interacting with this class instead
+  // of directly with the AudioDeviceBuffer one can ask for any number of
+  // audio data samples.
+  // Example: native buffer size can be 192 audio frames at 48kHz sample rate.
+  // WebRTC will provide 480 audio frames per 10ms but OpenSL ES asks for 192
+  // in each callback (one every 4th ms). This class can then ask for 192 and
+  // the FineAudioBuffer will ask WebRTC for new data approximately only every
+  // second callback and also cache non-utilized audio.
+  std::unique_ptr<FineAudioBuffer> fine_audio_buffer_;
+
+  // Keeps track of active audio buffer 'n' in the audio_buffers_[n] queue.
+  // Example (kNumOfOpenSLESBuffers = 2): counts 0, 1, 0, 1, ...
+  int buffer_index_;
+
+  // This interface exposes creation methods for all the OpenSL ES object types.
+  // It is the OpenSL ES API entry point.
+  SLEngineItf engine_;
+
+  // Output mix object to be used by the player object.
+  webrtc::ScopedSLObjectItf output_mix_;
+
+  // The audio player media object plays out audio to the speakers. It also
+  // supports volume control.
+  webrtc::ScopedSLObjectItf player_object_;
+
+  // This interface is supported on the audio player and it controls the state
+  // of the audio player.
+  SLPlayItf player_;
+
+  // The Android Simple Buffer Queue interface is supported on the audio player
+  // and it provides methods to send audio data from the source to the audio
+  // player for rendering.
+  SLAndroidSimpleBufferQueueItf simple_buffer_queue_;
+
+  // This interface exposes controls for manipulating the object’s audio volume
+  // properties. This interface is supported on the Audio Player object.
+  SLVolumeItf volume_;
+
+  // Last time the OpenSL ES layer asked for audio data to play out.
+  uint32_t last_play_time_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_PLAYER_H_
diff --git a/modules/audio_device/android/opensles_recorder.cc b/modules/audio_device/android/opensles_recorder.cc
new file mode 100644
index 0000000..4e0c26d
--- /dev/null
+++ b/modules/audio_device/android/opensles_recorder.cc
@@ -0,0 +1,431 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/android/opensles_recorder.h"
+
+#include <android/log.h>
+
+#include <memory>
+
+#include "api/array_view.h"
+#include "modules/audio_device/android/audio_common.h"
+#include "modules/audio_device/android/audio_manager.h"
+#include "modules/audio_device/fine_audio_buffer.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/time_utils.h"
+
+#define TAG "OpenSLESRecorder"
+#define ALOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__)
+#define ALOGD(...) __android_log_print(ANDROID_LOG_DEBUG, TAG, __VA_ARGS__)
+#define ALOGE(...) __android_log_print(ANDROID_LOG_ERROR, TAG, __VA_ARGS__)
+#define ALOGW(...) __android_log_print(ANDROID_LOG_WARN, TAG, __VA_ARGS__)
+#define ALOGI(...) __android_log_print(ANDROID_LOG_INFO, TAG, __VA_ARGS__)
+
+#define LOG_ON_ERROR(op)                                    \
+  [](SLresult err) {                                        \
+    if (err != SL_RESULT_SUCCESS) {                         \
+      ALOGE("%s:%d %s failed: %s", __FILE__, __LINE__, #op, \
+            GetSLErrorString(err));                         \
+      return true;                                          \
+    }                                                       \
+    return false;                                           \
+  }(op)
+
+namespace webrtc {
+
+OpenSLESRecorder::OpenSLESRecorder(AudioManager* audio_manager)
+    : audio_manager_(audio_manager),
+      audio_parameters_(audio_manager->GetRecordAudioParameters()),
+      audio_device_buffer_(nullptr),
+      initialized_(false),
+      recording_(false),
+      engine_(nullptr),
+      recorder_(nullptr),
+      simple_buffer_queue_(nullptr),
+      buffer_index_(0),
+      last_rec_time_(0) {
+  ALOGD("ctor[tid=%d]", rtc::CurrentThreadId());
+  // Detach from this thread since we want to use the checker to verify calls
+  // from the internal  audio thread.
+  thread_checker_opensles_.Detach();
+  // Use native audio output parameters provided by the audio manager and
+  // define the PCM format structure.
+  pcm_format_ = CreatePCMConfiguration(audio_parameters_.channels(),
+                                       audio_parameters_.sample_rate(),
+                                       audio_parameters_.bits_per_sample());
+}
+
+OpenSLESRecorder::~OpenSLESRecorder() {
+  ALOGD("dtor[tid=%d]", rtc::CurrentThreadId());
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  Terminate();
+  DestroyAudioRecorder();
+  engine_ = nullptr;
+  RTC_DCHECK(!engine_);
+  RTC_DCHECK(!recorder_);
+  RTC_DCHECK(!simple_buffer_queue_);
+}
+
+int OpenSLESRecorder::Init() {
+  ALOGD("Init[tid=%d]", rtc::CurrentThreadId());
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  if (audio_parameters_.channels() == 2) {
+    ALOGD("Stereo mode is enabled");
+  }
+  return 0;
+}
+
+int OpenSLESRecorder::Terminate() {
+  ALOGD("Terminate[tid=%d]", rtc::CurrentThreadId());
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  StopRecording();
+  return 0;
+}
+
+int OpenSLESRecorder::InitRecording() {
+  ALOGD("InitRecording[tid=%d]", rtc::CurrentThreadId());
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  RTC_DCHECK(!initialized_);
+  RTC_DCHECK(!recording_);
+  if (!ObtainEngineInterface()) {
+    ALOGE("Failed to obtain SL Engine interface");
+    return -1;
+  }
+  CreateAudioRecorder();
+  initialized_ = true;
+  buffer_index_ = 0;
+  return 0;
+}
+
+int OpenSLESRecorder::StartRecording() {
+  ALOGD("StartRecording[tid=%d]", rtc::CurrentThreadId());
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  RTC_DCHECK(initialized_);
+  RTC_DCHECK(!recording_);
+  if (fine_audio_buffer_) {
+    fine_audio_buffer_->ResetRecord();
+  }
+  // Add buffers to the queue before changing state to SL_RECORDSTATE_RECORDING
+  // to ensure that recording starts as soon as the state is modified. On some
+  // devices, SLAndroidSimpleBufferQueue::Clear() used in Stop() does not flush
+  // the buffers as intended and we therefore check the number of buffers
+  // already queued first. Enqueue() can return SL_RESULT_BUFFER_INSUFFICIENT
+  // otherwise.
+  int num_buffers_in_queue = GetBufferCount();
+  for (int i = 0; i < kNumOfOpenSLESBuffers - num_buffers_in_queue; ++i) {
+    if (!EnqueueAudioBuffer()) {
+      recording_ = false;
+      return -1;
+    }
+  }
+  num_buffers_in_queue = GetBufferCount();
+  RTC_DCHECK_EQ(num_buffers_in_queue, kNumOfOpenSLESBuffers);
+  LogBufferState();
+  // Start audio recording by changing the state to SL_RECORDSTATE_RECORDING.
+  // Given that buffers are already enqueued, recording should start at once.
+  // The macro returns -1 if recording fails to start.
+  last_rec_time_ = rtc::Time();
+  if (LOG_ON_ERROR(
+          (*recorder_)->SetRecordState(recorder_, SL_RECORDSTATE_RECORDING))) {
+    return -1;
+  }
+  recording_ = (GetRecordState() == SL_RECORDSTATE_RECORDING);
+  RTC_DCHECK(recording_);
+  return 0;
+}
+
+int OpenSLESRecorder::StopRecording() {
+  ALOGD("StopRecording[tid=%d]", rtc::CurrentThreadId());
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  if (!initialized_ || !recording_) {
+    return 0;
+  }
+  // Stop recording by setting the record state to SL_RECORDSTATE_STOPPED.
+  if (LOG_ON_ERROR(
+          (*recorder_)->SetRecordState(recorder_, SL_RECORDSTATE_STOPPED))) {
+    return -1;
+  }
+  // Clear the buffer queue to get rid of old data when resuming recording.
+  if (LOG_ON_ERROR((*simple_buffer_queue_)->Clear(simple_buffer_queue_))) {
+    return -1;
+  }
+  thread_checker_opensles_.Detach();
+  initialized_ = false;
+  recording_ = false;
+  return 0;
+}
+
+void OpenSLESRecorder::AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) {
+  ALOGD("AttachAudioBuffer");
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  RTC_CHECK(audio_buffer);
+  audio_device_buffer_ = audio_buffer;
+  // Ensure that the audio device buffer is informed about the native sample
+  // rate used on the recording side.
+  const int sample_rate_hz = audio_parameters_.sample_rate();
+  ALOGD("SetRecordingSampleRate(%d)", sample_rate_hz);
+  audio_device_buffer_->SetRecordingSampleRate(sample_rate_hz);
+  // Ensure that the audio device buffer is informed about the number of
+  // channels preferred by the OS on the recording side.
+  const size_t channels = audio_parameters_.channels();
+  ALOGD("SetRecordingChannels(%zu)", channels);
+  audio_device_buffer_->SetRecordingChannels(channels);
+  // Allocated memory for internal data buffers given existing audio parameters.
+  AllocateDataBuffers();
+}
+
+int OpenSLESRecorder::EnableBuiltInAEC(bool enable) {
+  ALOGD("EnableBuiltInAEC(%d)", enable);
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  ALOGE("Not implemented");
+  return 0;
+}
+
+int OpenSLESRecorder::EnableBuiltInAGC(bool enable) {
+  ALOGD("EnableBuiltInAGC(%d)", enable);
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  ALOGE("Not implemented");
+  return 0;
+}
+
+int OpenSLESRecorder::EnableBuiltInNS(bool enable) {
+  ALOGD("EnableBuiltInNS(%d)", enable);
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  ALOGE("Not implemented");
+  return 0;
+}
+
+bool OpenSLESRecorder::ObtainEngineInterface() {
+  ALOGD("ObtainEngineInterface");
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  if (engine_)
+    return true;
+  // Get access to (or create if not already existing) the global OpenSL Engine
+  // object.
+  SLObjectItf engine_object = audio_manager_->GetOpenSLEngine();
+  if (engine_object == nullptr) {
+    ALOGE("Failed to access the global OpenSL engine");
+    return false;
+  }
+  // Get the SL Engine Interface which is implicit.
+  if (LOG_ON_ERROR(
+          (*engine_object)
+              ->GetInterface(engine_object, SL_IID_ENGINE, &engine_))) {
+    return false;
+  }
+  return true;
+}
+
+bool OpenSLESRecorder::CreateAudioRecorder() {
+  ALOGD("CreateAudioRecorder");
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  if (recorder_object_.Get())
+    return true;
+  RTC_DCHECK(!recorder_);
+  RTC_DCHECK(!simple_buffer_queue_);
+
+  // Audio source configuration.
+  SLDataLocator_IODevice mic_locator = {SL_DATALOCATOR_IODEVICE,
+                                        SL_IODEVICE_AUDIOINPUT,
+                                        SL_DEFAULTDEVICEID_AUDIOINPUT, NULL};
+  SLDataSource audio_source = {&mic_locator, NULL};
+
+  // Audio sink configuration.
+  SLDataLocator_AndroidSimpleBufferQueue buffer_queue = {
+      SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE,
+      static_cast<SLuint32>(kNumOfOpenSLESBuffers)};
+  SLDataSink audio_sink = {&buffer_queue, &pcm_format_};
+
+  // Create the audio recorder object (requires the RECORD_AUDIO permission).
+  // Do not realize the recorder yet. Set the configuration first.
+  const SLInterfaceID interface_id[] = {SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
+                                        SL_IID_ANDROIDCONFIGURATION};
+  const SLboolean interface_required[] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE};
+  if (LOG_ON_ERROR((*engine_)->CreateAudioRecorder(
+          engine_, recorder_object_.Receive(), &audio_source, &audio_sink,
+          arraysize(interface_id), interface_id, interface_required))) {
+    return false;
+  }
+
+  // Configure the audio recorder (before it is realized).
+  SLAndroidConfigurationItf recorder_config;
+  if (LOG_ON_ERROR((recorder_object_->GetInterface(recorder_object_.Get(),
+                                                   SL_IID_ANDROIDCONFIGURATION,
+                                                   &recorder_config)))) {
+    return false;
+  }
+
+  // Uses the default microphone tuned for audio communication.
+  // Note that, SL_ANDROID_RECORDING_PRESET_VOICE_RECOGNITION leads to a fast
+  // track but also excludes usage of required effects like AEC, AGC and NS.
+  // SL_ANDROID_RECORDING_PRESET_VOICE_COMMUNICATION
+  SLint32 stream_type = SL_ANDROID_RECORDING_PRESET_VOICE_COMMUNICATION;
+  if (LOG_ON_ERROR(((*recorder_config)
+                        ->SetConfiguration(recorder_config,
+                                           SL_ANDROID_KEY_RECORDING_PRESET,
+                                           &stream_type, sizeof(SLint32))))) {
+    return false;
+  }
+
+  // The audio recorder can now be realized (in synchronous mode).
+  if (LOG_ON_ERROR((recorder_object_->Realize(recorder_object_.Get(),
+                                              SL_BOOLEAN_FALSE)))) {
+    return false;
+  }
+
+  // Get the implicit recorder interface (SL_IID_RECORD).
+  if (LOG_ON_ERROR((recorder_object_->GetInterface(
+          recorder_object_.Get(), SL_IID_RECORD, &recorder_)))) {
+    return false;
+  }
+
+  // Get the simple buffer queue interface (SL_IID_ANDROIDSIMPLEBUFFERQUEUE).
+  // It was explicitly requested.
+  if (LOG_ON_ERROR((recorder_object_->GetInterface(
+          recorder_object_.Get(), SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
+          &simple_buffer_queue_)))) {
+    return false;
+  }
+
+  // Register the input callback for the simple buffer queue.
+  // This callback will be called when receiving new data from the device.
+  if (LOG_ON_ERROR(((*simple_buffer_queue_)
+                        ->RegisterCallback(simple_buffer_queue_,
+                                           SimpleBufferQueueCallback, this)))) {
+    return false;
+  }
+  return true;
+}
+
+void OpenSLESRecorder::DestroyAudioRecorder() {
+  ALOGD("DestroyAudioRecorder");
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  if (!recorder_object_.Get())
+    return;
+  (*simple_buffer_queue_)
+      ->RegisterCallback(simple_buffer_queue_, nullptr, nullptr);
+  recorder_object_.Reset();
+  recorder_ = nullptr;
+  simple_buffer_queue_ = nullptr;
+}
+
+void OpenSLESRecorder::SimpleBufferQueueCallback(
+    SLAndroidSimpleBufferQueueItf buffer_queue,
+    void* context) {
+  OpenSLESRecorder* stream = static_cast<OpenSLESRecorder*>(context);
+  stream->ReadBufferQueue();
+}
+
+void OpenSLESRecorder::AllocateDataBuffers() {
+  ALOGD("AllocateDataBuffers");
+  RTC_DCHECK(thread_checker_.IsCurrent());
+  RTC_DCHECK(!simple_buffer_queue_);
+  RTC_CHECK(audio_device_buffer_);
+  // Create a modified audio buffer class which allows us to deliver any number
+  // of samples (and not only multiple of 10ms) to match the native audio unit
+  // buffer size.
+  ALOGD("frames per native buffer: %zu", audio_parameters_.frames_per_buffer());
+  ALOGD("frames per 10ms buffer: %zu",
+        audio_parameters_.frames_per_10ms_buffer());
+  ALOGD("bytes per native buffer: %zu", audio_parameters_.GetBytesPerBuffer());
+  ALOGD("native sample rate: %d", audio_parameters_.sample_rate());
+  RTC_DCHECK(audio_device_buffer_);
+  fine_audio_buffer_ = std::make_unique<FineAudioBuffer>(audio_device_buffer_);
+  // Allocate queue of audio buffers that stores recorded audio samples.
+  const int buffer_size_samples =
+      audio_parameters_.frames_per_buffer() * audio_parameters_.channels();
+  audio_buffers_.reset(new std::unique_ptr<SLint16[]>[kNumOfOpenSLESBuffers]);
+  for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
+    audio_buffers_[i].reset(new SLint16[buffer_size_samples]);
+  }
+}
+
+void OpenSLESRecorder::ReadBufferQueue() {
+  RTC_DCHECK(thread_checker_opensles_.IsCurrent());
+  SLuint32 state = GetRecordState();
+  if (state != SL_RECORDSTATE_RECORDING) {
+    ALOGW("Buffer callback in non-recording state!");
+    return;
+  }
+  // Check delta time between two successive callbacks and provide a warning
+  // if it becomes very large.
+  // TODO(henrika): using 150ms as upper limit but this value is rather random.
+  const uint32_t current_time = rtc::Time();
+  const uint32_t diff = current_time - last_rec_time_;
+  if (diff > 150) {
+    ALOGW("Bad OpenSL ES record timing, dT=%u [ms]", diff);
+  }
+  last_rec_time_ = current_time;
+  // Send recorded audio data to the WebRTC sink.
+  // TODO(henrika): fix delay estimates. It is OK to use fixed values for now
+  // since there is no support to turn off built-in EC in combination with
+  // OpenSL ES anyhow. Hence, as is, the WebRTC based AEC (which would use
+  // these estimates) will never be active.
+  fine_audio_buffer_->DeliverRecordedData(
+      rtc::ArrayView<const int16_t>(
+          audio_buffers_[buffer_index_].get(),
+          audio_parameters_.frames_per_buffer() * audio_parameters_.channels()),
+      25);
+  // Enqueue the utilized audio buffer and use if for recording again.
+  EnqueueAudioBuffer();
+}
+
+bool OpenSLESRecorder::EnqueueAudioBuffer() {
+  SLresult err =
+      (*simple_buffer_queue_)
+          ->Enqueue(
+              simple_buffer_queue_,
+              reinterpret_cast<SLint8*>(audio_buffers_[buffer_index_].get()),
+              audio_parameters_.GetBytesPerBuffer());
+  if (SL_RESULT_SUCCESS != err) {
+    ALOGE("Enqueue failed: %s", GetSLErrorString(err));
+    return false;
+  }
+  buffer_index_ = (buffer_index_ + 1) % kNumOfOpenSLESBuffers;
+  return true;
+}
+
+SLuint32 OpenSLESRecorder::GetRecordState() const {
+  RTC_DCHECK(recorder_);
+  SLuint32 state;
+  SLresult err = (*recorder_)->GetRecordState(recorder_, &state);
+  if (SL_RESULT_SUCCESS != err) {
+    ALOGE("GetRecordState failed: %s", GetSLErrorString(err));
+  }
+  return state;
+}
+
+SLAndroidSimpleBufferQueueState OpenSLESRecorder::GetBufferQueueState() const {
+  RTC_DCHECK(simple_buffer_queue_);
+  // state.count: Number of buffers currently in the queue.
+  // state.index: Index of the currently filling buffer. This is a linear index
+  // that keeps a cumulative count of the number of buffers recorded.
+  SLAndroidSimpleBufferQueueState state;
+  SLresult err =
+      (*simple_buffer_queue_)->GetState(simple_buffer_queue_, &state);
+  if (SL_RESULT_SUCCESS != err) {
+    ALOGE("GetState failed: %s", GetSLErrorString(err));
+  }
+  return state;
+}
+
+void OpenSLESRecorder::LogBufferState() const {
+  SLAndroidSimpleBufferQueueState state = GetBufferQueueState();
+  ALOGD("state.count:%d state.index:%d", state.count, state.index);
+}
+
+SLuint32 OpenSLESRecorder::GetBufferCount() {
+  SLAndroidSimpleBufferQueueState state = GetBufferQueueState();
+  return state.count;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_device/android/opensles_recorder.h b/modules/audio_device/android/opensles_recorder.h
new file mode 100644
index 0000000..e659c3c
--- /dev/null
+++ b/modules/audio_device/android/opensles_recorder.h
@@ -0,0 +1,193 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_RECORDER_H_
+#define MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_RECORDER_H_
+
+#include <SLES/OpenSLES.h>
+#include <SLES/OpenSLES_Android.h>
+#include <SLES/OpenSLES_AndroidConfiguration.h>
+
+#include <memory>
+
+#include "api/sequence_checker.h"
+#include "modules/audio_device/android/audio_common.h"
+#include "modules/audio_device/android/audio_manager.h"
+#include "modules/audio_device/android/opensles_common.h"
+#include "modules/audio_device/audio_device_generic.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "modules/utility/include/helpers_android.h"
+
+namespace webrtc {
+
+class FineAudioBuffer;
+
+// Implements 16-bit mono PCM audio input support for Android using the
+// C based OpenSL ES API. No calls from C/C++ to Java using JNI is done.
+//
+// An instance must be created and destroyed on one and the same thread.
+// All public methods must also be called on the same thread. A thread checker
+// will RTC_DCHECK if any method is called on an invalid thread. Recorded audio
+// buffers are provided on a dedicated internal thread managed by the OpenSL
+// ES layer.
+//
+// The existing design forces the user to call InitRecording() after
+// StopRecording() to be able to call StartRecording() again. This is inline
+// with how the Java-based implementation works.
+//
+// As of API level 21, lower latency audio input is supported on select devices.
+// To take advantage of this feature, first confirm that lower latency output is
+// available. The capability for lower latency output is a prerequisite for the
+// lower latency input feature. Then, create an AudioRecorder with the same
+// sample rate and buffer size as would be used for output. OpenSL ES interfaces
+// for input effects preclude the lower latency path.
+// See https://developer.android.com/ndk/guides/audio/opensl-prog-notes.html
+// for more details.
+class OpenSLESRecorder {
+ public:
+  // Beginning with API level 17 (Android 4.2), a buffer count of 2 or more is
+  // required for lower latency. Beginning with API level 18 (Android 4.3), a
+  // buffer count of 1 is sufficient for lower latency. In addition, the buffer
+  // size and sample rate must be compatible with the device's native input
+  // configuration provided via the audio manager at construction.
+  // TODO(henrika): perhaps set this value dynamically based on OS version.
+  static const int kNumOfOpenSLESBuffers = 2;
+
+  explicit OpenSLESRecorder(AudioManager* audio_manager);
+  ~OpenSLESRecorder();
+
+  int Init();
+  int Terminate();
+
+  int InitRecording();
+  bool RecordingIsInitialized() const { return initialized_; }
+
+  int StartRecording();
+  int StopRecording();
+  bool Recording() const { return recording_; }
+
+  void AttachAudioBuffer(AudioDeviceBuffer* audio_buffer);
+
+  // TODO(henrika): add support using OpenSL ES APIs when available.
+  int EnableBuiltInAEC(bool enable);
+  int EnableBuiltInAGC(bool enable);
+  int EnableBuiltInNS(bool enable);
+
+ private:
+  // Obtaines the SL Engine Interface from the existing global Engine object.
+  // The interface exposes creation methods of all the OpenSL ES object types.
+  // This method defines the `engine_` member variable.
+  bool ObtainEngineInterface();
+
+  // Creates/destroys the audio recorder and the simple-buffer queue object.
+  bool CreateAudioRecorder();
+  void DestroyAudioRecorder();
+
+  // Allocate memory for audio buffers which will be used to capture audio
+  // via the SLAndroidSimpleBufferQueueItf interface.
+  void AllocateDataBuffers();
+
+  // These callback methods are called when data has been written to the input
+  // buffer queue. They are both called from an internal "OpenSL ES thread"
+  // which is not attached to the Dalvik VM.
+  static void SimpleBufferQueueCallback(SLAndroidSimpleBufferQueueItf caller,
+                                        void* context);
+  void ReadBufferQueue();
+
+  // Wraps calls to SLAndroidSimpleBufferQueueState::Enqueue() and it can be
+  // called both on the main thread (but before recording has started) and from
+  // the internal audio thread while input streaming is active. It uses
+  // `simple_buffer_queue_` but no lock is needed since the initial calls from
+  // the main thread and the native callback thread are mutually exclusive.
+  bool EnqueueAudioBuffer();
+
+  // Returns the current recorder state.
+  SLuint32 GetRecordState() const;
+
+  // Returns the current buffer queue state.
+  SLAndroidSimpleBufferQueueState GetBufferQueueState() const;
+
+  // Number of buffers currently in the queue.
+  SLuint32 GetBufferCount();
+
+  // Prints a log message of the current queue state. Can be used for debugging
+  // purposes.
+  void LogBufferState() const;
+
+  // Ensures that methods are called from the same thread as this object is
+  // created on.
+  SequenceChecker thread_checker_;
+
+  // Stores thread ID in first call to SimpleBufferQueueCallback() from internal
+  // non-application thread which is not attached to the Dalvik JVM.
+  // Detached during construction of this object.
+  SequenceChecker thread_checker_opensles_;
+
+  // Raw pointer to the audio manager injected at construction. Used to cache
+  // audio parameters and to access the global SL engine object needed by the
+  // ObtainEngineInterface() method. The audio manager outlives any instance of
+  // this class.
+  AudioManager* const audio_manager_;
+
+  // Contains audio parameters provided to this class at construction by the
+  // AudioManager.
+  const AudioParameters audio_parameters_;
+
+  // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
+  // AudioDeviceModuleImpl class and called by AudioDeviceModule::Create().
+  AudioDeviceBuffer* audio_device_buffer_;
+
+  // PCM-type format definition.
+  // TODO(henrika): add support for SLAndroidDataFormat_PCM_EX (android-21) if
+  // 32-bit float representation is needed.
+  SLDataFormat_PCM pcm_format_;
+
+  bool initialized_;
+  bool recording_;
+
+  // This interface exposes creation methods for all the OpenSL ES object types.
+  // It is the OpenSL ES API entry point.
+  SLEngineItf engine_;
+
+  // The audio recorder media object records audio to the destination specified
+  // by the data sink capturing it from the input specified by the data source.
+  webrtc::ScopedSLObjectItf recorder_object_;
+
+  // This interface is supported on the audio recorder object and it controls
+  // the state of the audio recorder.
+  SLRecordItf recorder_;
+
+  // The Android Simple Buffer Queue interface is supported on the audio
+  // recorder. For recording, an app should enqueue empty buffers. When a
+  // registered callback sends notification that the system has finished writing
+  // data to the buffer, the app can read the buffer.
+  SLAndroidSimpleBufferQueueItf simple_buffer_queue_;
+
+  // Consumes audio of native buffer size and feeds the WebRTC layer with 10ms
+  // chunks of audio.
+  std::unique_ptr<FineAudioBuffer> fine_audio_buffer_;
+
+  // Queue of audio buffers to be used by the recorder object for capturing
+  // audio. They will be used in a Round-robin way and the size of each buffer
+  // is given by AudioParameters::frames_per_buffer(), i.e., it corresponds to
+  // the native OpenSL ES buffer size.
+  std::unique_ptr<std::unique_ptr<SLint16[]>[]> audio_buffers_;
+
+  // Keeps track of active audio buffer 'n' in the audio_buffers_[n] queue.
+  // Example (kNumOfOpenSLESBuffers = 2): counts 0, 1, 0, 1, ...
+  int buffer_index_;
+
+  // Last time the OpenSL ES layer delivered recorded audio data.
+  uint32_t last_rec_time_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_RECORDER_H_
diff --git a/modules/audio_device/audio_device_impl.cc b/modules/audio_device/audio_device_impl.cc
index 9da9c62..092b98f 100644
--- a/modules/audio_device/audio_device_impl.cc
+++ b/modules/audio_device/audio_device_impl.cc
@@ -26,7 +26,16 @@
 #endif
 #elif defined(WEBRTC_ANDROID)
 #include <stdlib.h>
-#include "sdk/android/native_api/audio_device_module/audio_device_android.h"
+#if defined(WEBRTC_AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
+#include "modules/audio_device/android/aaudio_player.h"
+#include "modules/audio_device/android/aaudio_recorder.h"
+#endif
+#include "modules/audio_device/android/audio_device_template.h"
+#include "modules/audio_device/android/audio_manager.h"
+#include "modules/audio_device/android/audio_record_jni.h"
+#include "modules/audio_device/android/audio_track_jni.h"
+#include "modules/audio_device/android/opensles_player.h"
+#include "modules/audio_device/android/opensles_recorder.h"
 #elif defined(WEBRTC_LINUX)
 #if defined(WEBRTC_ENABLE_LINUX_ALSA)
 #include "modules/audio_device/linux/audio_device_alsa_linux.h"
@@ -65,11 +74,7 @@
     AudioLayer audio_layer,
     TaskQueueFactory* task_queue_factory) {
   RTC_DLOG(LS_INFO) << __FUNCTION__;
-#if defined(WEBRTC_ANDROID)
-  return CreateAndroidAudioDeviceModule(audio_layer);
-#else
   return AudioDeviceModule::CreateForTest(audio_layer, task_queue_factory);
-#endif
 }
 
 // static
@@ -84,14 +89,6 @@
     RTC_LOG(LS_ERROR) << "Use the CreateWindowsCoreAudioAudioDeviceModule() "
                          "factory method instead for this option.";
     return nullptr;
-  } else if (audio_layer == AudioDeviceModule::kAndroidJavaAudio ||
-             audio_layer == AudioDeviceModule::kAndroidOpenSLESAudio ||
-             audio_layer == AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio ||
-             audio_layer == kAndroidAAudioAudio ||
-             audio_layer == kAndroidJavaInputAndAAudioOutputAudio) {
-    RTC_LOG(LS_ERROR) << "Use the CreateAndroidAudioDeviceModule() "
-                         "factory method instead for this option.";
-    return nullptr;
   }
 
   // Create the generic reference counted (platform independent) implementation.
@@ -185,13 +182,70 @@
   }
 #endif  // defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
 
+#if defined(WEBRTC_ANDROID)
+  // Create an Android audio manager.
+  audio_manager_android_.reset(new AudioManager());
+  // Select best possible combination of audio layers.
+  if (audio_layer == kPlatformDefaultAudio) {
+    if (audio_manager_android_->IsAAudioSupported()) {
+      // Use of AAudio for both playout and recording has highest priority.
+      audio_layer = kAndroidAAudioAudio;
+    } else if (audio_manager_android_->IsLowLatencyPlayoutSupported() &&
+               audio_manager_android_->IsLowLatencyRecordSupported()) {
+      // Use OpenSL ES for both playout and recording.
+      audio_layer = kAndroidOpenSLESAudio;
+    } else if (audio_manager_android_->IsLowLatencyPlayoutSupported() &&
+               !audio_manager_android_->IsLowLatencyRecordSupported()) {
+      // Use OpenSL ES for output on devices that only supports the
+      // low-latency output audio path.
+      audio_layer = kAndroidJavaInputAndOpenSLESOutputAudio;
+    } else {
+      // Use Java-based audio in both directions when low-latency output is
+      // not supported.
+      audio_layer = kAndroidJavaAudio;
+    }
+  }
+  AudioManager* audio_manager = audio_manager_android_.get();
+  if (audio_layer == kAndroidJavaAudio) {
+    // Java audio for both input and output audio.
+    audio_device_.reset(new AudioDeviceTemplate<AudioRecordJni, AudioTrackJni>(
+        audio_layer, audio_manager));
+  } else if (audio_layer == kAndroidOpenSLESAudio) {
+    // OpenSL ES based audio for both input and output audio.
+    audio_device_.reset(
+        new AudioDeviceTemplate<OpenSLESRecorder, OpenSLESPlayer>(
+            audio_layer, audio_manager));
+  } else if (audio_layer == kAndroidJavaInputAndOpenSLESOutputAudio) {
+    // Java audio for input and OpenSL ES for output audio (i.e. mixed APIs).
+    // This combination provides low-latency output audio and at the same
+    // time support for HW AEC using the AudioRecord Java API.
+    audio_device_.reset(new AudioDeviceTemplate<AudioRecordJni, OpenSLESPlayer>(
+        audio_layer, audio_manager));
+  } else if (audio_layer == kAndroidAAudioAudio) {
+#if defined(WEBRTC_AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
+    // AAudio based audio for both input and output.
+    audio_device_.reset(new AudioDeviceTemplate<AAudioRecorder, AAudioPlayer>(
+        audio_layer, audio_manager));
+#endif
+  } else if (audio_layer == kAndroidJavaInputAndAAudioOutputAudio) {
+#if defined(WEBRTC_AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
+    // Java audio for input and AAudio for output audio (i.e. mixed APIs).
+    audio_device_.reset(new AudioDeviceTemplate<AudioRecordJni, AAudioPlayer>(
+        audio_layer, audio_manager));
+#endif
+  } else {
+    RTC_LOG(LS_ERROR) << "The requested audio layer is not supported";
+    audio_device_.reset(nullptr);
+  }
+// END #if defined(WEBRTC_ANDROID)
+
 // Linux ADM implementation.
 // Note that, WEBRTC_ENABLE_LINUX_ALSA is always defined by default when
 // WEBRTC_LINUX is defined. WEBRTC_ENABLE_LINUX_PULSE depends on the
 // 'rtc_include_pulse_audio' build flag.
 // TODO(bugs.webrtc.org/9127): improve support and make it more clear that
 // PulseAudio is the default selection.
-#if !defined(WEBRTC_ANDROID) && defined(WEBRTC_LINUX)
+#elif defined(WEBRTC_LINUX)
 #if !defined(WEBRTC_ENABLE_LINUX_PULSE)
   // Build flag 'rtc_include_pulse_audio' is set to false. In this mode:
   // - kPlatformDefaultAudio => ALSA, and
diff --git a/modules/audio_device/audio_device_impl.h b/modules/audio_device/audio_device_impl.h
index 1737b46..45f73dc 100644
--- a/modules/audio_device/audio_device_impl.h
+++ b/modules/audio_device/audio_device_impl.h
@@ -24,6 +24,7 @@
 namespace webrtc {
 
 class AudioDeviceGeneric;
+class AudioManager;
 
 class AudioDeviceModuleImpl : public AudioDeviceModuleForTest {
  public:
@@ -144,6 +145,12 @@
   int GetRecordAudioParameters(AudioParameters* params) const override;
 #endif  // WEBRTC_IOS
 
+#if defined(WEBRTC_ANDROID)
+  // Only use this acccessor for test purposes on Android.
+  AudioManager* GetAndroidAudioManagerForTest() {
+    return audio_manager_android_.get();
+  }
+#endif
   AudioDeviceBuffer* GetAudioDeviceBuffer() { return &audio_device_buffer_; }
 
   int RestartPlayoutInternally() override { return -1; }
@@ -158,6 +165,10 @@
   AudioLayer audio_layer_;
   PlatformType platform_type_ = kPlatformNotSupported;
   bool initialized_ = false;
+#if defined(WEBRTC_ANDROID)
+  // Should be declared first to ensure that it outlives other resources.
+  std::unique_ptr<AudioManager> audio_manager_android_;
+#endif
   AudioDeviceBuffer audio_device_buffer_;
   std::unique_ptr<AudioDeviceGeneric> audio_device_;
 };
diff --git a/modules/audio_device/g3doc/audio_device_module.md b/modules/audio_device/g3doc/audio_device_module.md
index 84cfb41..101b2e4 100644
--- a/modules/audio_device/g3doc/audio_device_module.md
+++ b/modules/audio_device/g3doc/audio_device_module.md
@@ -5,8 +5,8 @@
 
 ## Overview
 
-The ADM(AudioDeviceModule) is responsible for driving input (microphone) and
-output (speaker) audio in WebRTC and the API is defined in [audio_device.h][19].
+The ADM is responsible for driving input (microphone) and output (speaker) audio
+in WebRTC and the API is defined in [audio_device.h][19].
 
 Main functions of the ADM are:
 
diff --git a/modules/utility/source/jvm_android.cc b/modules/utility/source/jvm_android.cc
index e0c66d5..ee9930b 100644
--- a/modules/utility/source/jvm_android.cc
+++ b/modules/utility/source/jvm_android.cc
@@ -27,6 +27,10 @@
   const char* name;
   jclass clazz;
 } loaded_classes[] = {
+    {"org/webrtc/voiceengine/BuildInfo", nullptr},
+    {"org/webrtc/voiceengine/WebRtcAudioManager", nullptr},
+    {"org/webrtc/voiceengine/WebRtcAudioRecord", nullptr},
+    {"org/webrtc/voiceengine/WebRtcAudioTrack", nullptr},
 };
 
 // Android's FindClass() is trickier than usual because the app-specific
diff --git a/sdk/android/BUILD.gn b/sdk/android/BUILD.gn
index d662fab..8611707 100644
--- a/sdk/android/BUILD.gn
+++ b/sdk/android/BUILD.gn
@@ -55,6 +55,7 @@
       ":swcodecs_java",
       ":video_api_java",
       ":video_java",
+      "../../modules/audio_device:audio_device_java",
       "../../rtc_base:base_java",
     ]
   }
@@ -90,6 +91,7 @@
       ":surfaceviewrenderer_java",
       ":video_api_java",
       ":video_java",
+      "//modules/audio_device:audio_device_java",
       "//rtc_base:base_java",
     ]
   }
@@ -154,7 +156,6 @@
     sources = [
       "api/org/webrtc/Predicate.java",
       "api/org/webrtc/RefCounted.java",
-      "src/java/org/webrtc/ApplicationContextProvider.java",
       "src/java/org/webrtc/CalledByNative.java",
       "src/java/org/webrtc/CalledByNativeUnchecked.java",
       "src/java/org/webrtc/Histogram.java",
@@ -164,10 +165,7 @@
       "src/java/org/webrtc/WebRtcClassLoader.java",
     ]
 
-    deps = [
-      "//rtc_base:base_java",
-      "//third_party/androidx:androidx_annotation_annotation_java",
-    ]
+    deps = [ "//third_party/androidx:androidx_annotation_annotation_java" ]
   }
 
   rtc_android_library("audio_api_java") {
@@ -319,6 +317,7 @@
       ":swcodecs_java",
       ":video_api_java",
       ":video_java",
+      "//modules/audio_device:audio_device_java",
       "//rtc_base:base_java",
       "//third_party/androidx:androidx_annotation_annotation_java",
     ]
@@ -580,6 +579,7 @@
       ":internal_jni",
       ":native_api_jni",
       "../../api:field_trials_view",
+      "../../api:libjingle_peerconnection_api",
       "../../api:scoped_refptr",
       "../../api:sequence_checker",
       "../../api/task_queue:pending_task_safety_flag",
@@ -931,7 +931,6 @@
   rtc_library("native_api_jni") {
     visibility = [ "*" ]
     sources = [
-      "native_api/jni/application_context_provider.cc",
       "native_api/jni/class_loader.cc",
       "native_api/jni/java_types.cc",
       "native_api/jni/jvm.cc",
@@ -940,7 +939,6 @@
     ]
 
     public = [
-      "native_api/jni/application_context_provider.h",
       "native_api/jni/class_loader.h",
       "native_api/jni/java_types.h",
       "native_api/jni/jni_int_wrapper.h",
@@ -986,12 +984,10 @@
 
     deps = [
       ":base_jni",
-      ":internal_jni",
       ":java_audio_device_module",
-      ":native_api_jni",
       ":opensles_audio_device_module",
       "../../api:scoped_refptr",
-      "../../modules/audio_device:audio_device_api",
+      "../../modules/audio_device",
       "../../rtc_base:checks",
       "../../rtc_base:logging",
       "../../rtc_base:refcount",
@@ -1201,7 +1197,7 @@
       ":base_jni",
       ":generated_java_audio_device_module_native_jni",
       "../../api:sequence_checker",
-      "../../modules/audio_device:audio_device_api",
+      "../../modules/audio_device",
       "../../modules/audio_device:audio_device_buffer",
       "../../rtc_base:checks",
       "../../rtc_base:logging",
@@ -1259,7 +1255,7 @@
       "../../api:refcountedbase",
       "../../api:scoped_refptr",
       "../../api:sequence_checker",
-      "../../modules/audio_device:audio_device_api",
+      "../../modules/audio_device",
       "../../modules/audio_device:audio_device_buffer",
       "../../rtc_base:checks",
       "../../rtc_base:logging",
@@ -1443,7 +1439,6 @@
 
   generate_jni("generated_native_api_jni") {
     sources = [
-      "src/java/org/webrtc/ApplicationContextProvider.java",
       "src/java/org/webrtc/JniHelper.java",
       "src/java/org/webrtc/WebRtcClassLoader.java",
     ]
@@ -1608,6 +1603,8 @@
 
     sources = [
       "native_unittests/android_network_monitor_unittest.cc",
+      "native_unittests/application_context_provider.cc",
+      "native_unittests/application_context_provider.h",
       "native_unittests/audio_device/audio_device_unittest.cc",
       "native_unittests/codecs/wrapper_unittest.cc",
       "native_unittests/java_types_unittest.cc",
@@ -1679,6 +1676,7 @@
     testonly = true
 
     sources = [
+      "native_unittests/org/webrtc/ApplicationContextProvider.java",
       "native_unittests/org/webrtc/BuildInfo.java",
       "native_unittests/org/webrtc/CodecsWrapperTestHelper.java",
       "native_unittests/org/webrtc/FakeVideoEncoder.java",
@@ -1703,6 +1701,7 @@
     testonly = true
 
     sources = [
+      "native_unittests/org/webrtc/ApplicationContextProvider.java",
       "native_unittests/org/webrtc/BuildInfo.java",
       "native_unittests/org/webrtc/CodecsWrapperTestHelper.java",
       "native_unittests/org/webrtc/JavaTypesTestHelper.java",
diff --git a/sdk/android/native_api/DEPS b/sdk/android/native_api/DEPS
index 8afaebe..020e1cb 100644
--- a/sdk/android/native_api/DEPS
+++ b/sdk/android/native_api/DEPS
@@ -1,5 +1,4 @@
 include_rules = [
   "+modules/audio_device/include/audio_device.h",
-  "+modules/utility/include/jvm_android.h",
   "+system_wrappers/include",
 ]
diff --git a/sdk/android/native_api/audio_device_module/audio_device_android.cc b/sdk/android/native_api/audio_device_module/audio_device_android.cc
index 6ba327a..2be7f7d 100644
--- a/sdk/android/native_api/audio_device_module/audio_device_android.cc
+++ b/sdk/android/native_api/audio_device_module/audio_device_android.cc
@@ -24,12 +24,10 @@
 #include "sdk/android/src/jni/audio_device/aaudio_recorder.h"
 #endif
 
-#include "sdk/android/native_api/jni/application_context_provider.h"
 #include "sdk/android/src/jni/audio_device/audio_record_jni.h"
 #include "sdk/android/src/jni/audio_device/audio_track_jni.h"
 #include "sdk/android/src/jni/audio_device/opensles_player.h"
 #include "sdk/android/src/jni/audio_device/opensles_recorder.h"
-#include "sdk/android/src/jni/jvm.h"
 #include "system_wrappers/include/metrics.h"
 
 namespace webrtc {
@@ -72,31 +70,6 @@
       std::make_unique<jni::AAudioRecorder>(input_parameters),
       std::make_unique<jni::AAudioPlayer>(output_parameters));
 }
-
-rtc::scoped_refptr<AudioDeviceModule>
-CreateJavaInputAndAAudioOutputAudioDeviceModule(JNIEnv* env,
-                                                jobject application_context) {
-  RTC_DLOG(LS_INFO) << __FUNCTION__;
-  // Get default audio input/output parameters.
-  const JavaParamRef<jobject> j_context(application_context);
-  const ScopedJavaLocalRef<jobject> j_audio_manager =
-      jni::GetAudioManager(env, j_context);
-  AudioParameters input_parameters;
-  AudioParameters output_parameters;
-  GetDefaultAudioParameters(env, application_context, &input_parameters,
-                            &output_parameters);
-  // Create ADM from AudioRecord and OpenSLESPlayer.
-  auto audio_input = std::make_unique<jni::AudioRecordJni>(
-      env, input_parameters, jni::kLowLatencyModeDelayEstimateInMilliseconds,
-      jni::AudioRecordJni::CreateJavaWebRtcAudioRecord(env, j_context,
-                                                       j_audio_manager));
-
-  return CreateAudioDeviceModuleFromInputAndOutput(
-      AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio,
-      false /* use_stereo_input */, false /* use_stereo_output */,
-      jni::kLowLatencyModeDelayEstimateInMilliseconds, std::move(audio_input),
-      std::make_unique<jni::AAudioPlayer>(output_parameters));
-}
 #endif
 
 rtc::scoped_refptr<AudioDeviceModule> CreateJavaAudioDeviceModule(
@@ -179,57 +152,4 @@
       std::move(audio_output));
 }
 
-rtc::scoped_refptr<AudioDeviceModule> CreateAndroidAudioDeviceModule(
-    AudioDeviceModule::AudioLayer audio_layer) {
-  auto env = AttachCurrentThreadIfNeeded();
-  auto j_context = webrtc::GetAppContext(env);
-  // Select best possible combination of audio layers.
-  if (audio_layer == AudioDeviceModule::kPlatformDefaultAudio) {
-#if defined(WEBRTC_AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
-    // AAudio based audio for both input and output.
-    audio_layer = AudioDeviceModule::kAndroidAAudioAudio;
-#else
-    if (jni::IsLowLatencyInputSupported(env, j_context) &&
-        jni::IsLowLatencyOutputSupported(env, j_context)) {
-      // Use OpenSL ES for both playout and recording.
-      audio_layer = AudioDeviceModule::kAndroidOpenSLESAudio;
-    } else if (jni::IsLowLatencyOutputSupported(env, j_context) &&
-               !jni::IsLowLatencyInputSupported(env, j_context)) {
-      // Use OpenSL ES for output on devices that only supports the
-      // low-latency output audio path.
-      audio_layer = AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio;
-    } else {
-      // Use Java-based audio in both directions when low-latency output is
-      // not supported.
-      audio_layer = AudioDeviceModule::kAndroidJavaAudio;
-    }
-#endif
-  }
-  switch (audio_layer) {
-    case AudioDeviceModule::kAndroidJavaAudio:
-      // Java audio for both input and output audio.
-      return CreateJavaAudioDeviceModule(env, j_context.obj());
-    case AudioDeviceModule::kAndroidOpenSLESAudio:
-      // OpenSL ES based audio for both input and output audio.
-      return CreateOpenSLESAudioDeviceModule(env, j_context.obj());
-    case AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio:
-      // Java audio for input and OpenSL ES for output audio (i.e. mixed APIs).
-      // This combination provides low-latency output audio and at the same
-      // time support for HW AEC using the AudioRecord Java API.
-      return CreateJavaInputAndOpenSLESOutputAudioDeviceModule(
-        env, j_context.obj());
-#if defined(WEBRTC_AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
-    case AudioDeviceModule::kAndroidAAudioAudio:
-      // AAudio based audio for both input and output.
-      return CreateAAudioAudioDeviceModule(env, j_context.obj());
-    case AudioDeviceModule::kAndroidJavaInputAndAAudioOutputAudio:
-      // Java audio for input and AAudio for output audio (i.e. mixed APIs).
-      return CreateJavaInputAndAAudioOutputAudioDeviceModule(
-        env, j_context.obj());
-#endif
-    default:
-      return nullptr;
-  }
-}
-
 }  // namespace webrtc
diff --git a/sdk/android/native_api/audio_device_module/audio_device_android.h b/sdk/android/native_api/audio_device_module/audio_device_android.h
index b687dca..a093f8c 100644
--- a/sdk/android/native_api/audio_device_module/audio_device_android.h
+++ b/sdk/android/native_api/audio_device_module/audio_device_android.h
@@ -32,17 +32,8 @@
     jobject application_context);
 
 rtc::scoped_refptr<AudioDeviceModule>
-CreateJavaInputAndOpenSLESOutputAudioDeviceModule(
-    JNIEnv* env,
-    jobject application_context);
-
-rtc::scoped_refptr<AudioDeviceModule>
-CreateJavaInputAndAAudioOutputAudioDeviceModule(
-    JNIEnv* env,
-    jobject application_context);
-
-rtc::scoped_refptr<AudioDeviceModule> CreateAndroidAudioDeviceModule(
-    AudioDeviceModule::AudioLayer audio_layer);
+CreateJavaInputAndOpenSLESOutputAudioDeviceModule(JNIEnv* env,
+                                                  jobject application_context);
 
 }  // namespace webrtc
 
diff --git a/sdk/android/native_api/jni/application_context_provider.h b/sdk/android/native_api/jni/application_context_provider.h
deleted file mode 100644
index dc3a80a..0000000
--- a/sdk/android/native_api/jni/application_context_provider.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- *  Copyright 2019 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-#ifndef SDK_ANDROID_NATIVE_API_JNI_APPLICATION_CONTEXT_PROVIDER_H_
-#define SDK_ANDROID_NATIVE_API_JNI_APPLICATION_CONTEXT_PROVIDER_H_
-
-#include "sdk/android/native_api/jni/scoped_java_ref.h"
-
-namespace webrtc {
-
-ScopedJavaLocalRef<jobject> GetAppContext(JNIEnv* jni);
-
-}  // namespace webrtc
-
-#endif  // SDK_ANDROID_NATIVE_API_JNI_APPLICATION_CONTEXT_PROVIDER_H_
diff --git a/sdk/android/native_unittests/android_network_monitor_unittest.cc b/sdk/android/native_unittests/android_network_monitor_unittest.cc
index 0489cfd..9aec62d 100644
--- a/sdk/android/native_unittests/android_network_monitor_unittest.cc
+++ b/sdk/android/native_unittests/android_network_monitor_unittest.cc
@@ -13,7 +13,7 @@
 #include "rtc_base/ip_address.h"
 #include "rtc_base/logging.h"
 #include "rtc_base/thread.h"
-#include "sdk/android/native_api/jni/application_context_provider.h"
+#include "sdk/android/native_unittests/application_context_provider.h"
 #include "sdk/android/src/jni/jni_helpers.h"
 #include "test/gtest.h"
 #include "test/scoped_key_value_config.h"
@@ -47,7 +47,7 @@
  public:
   AndroidNetworkMonitorTest() {
     JNIEnv* env = AttachCurrentThreadIfNeeded();
-    ScopedJavaLocalRef<jobject> context = GetAppContext(env);
+    ScopedJavaLocalRef<jobject> context = test::GetAppContextForTest(env);
     network_monitor_ = std::make_unique<jni::AndroidNetworkMonitor>(
         env, context, field_trials_);
   }
diff --git a/sdk/android/native_api/jni/application_context_provider.cc b/sdk/android/native_unittests/application_context_provider.cc
similarity index 63%
rename from sdk/android/native_api/jni/application_context_provider.cc
rename to sdk/android/native_unittests/application_context_provider.cc
index de3c4a3..07b3c04 100644
--- a/sdk/android/native_api/jni/application_context_provider.cc
+++ b/sdk/android/native_unittests/application_context_provider.cc
@@ -7,16 +7,18 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
-#include "sdk/android/native_api/jni/application_context_provider.h"
+#include "sdk/android/native_unittests/application_context_provider.h"
 
-#include "sdk/android/generated_native_api_jni/ApplicationContextProvider_jni.h"
-#include "sdk/android/native_api/jni/scoped_java_ref.h"
+#include "sdk/android/generated_native_unittests_jni/ApplicationContextProvider_jni.h"
+#include "sdk/android/src/jni/jni_helpers.h"
 
 namespace webrtc {
+namespace test {
 
-ScopedJavaLocalRef<jobject> GetAppContext(JNIEnv* jni) {
+ScopedJavaLocalRef<jobject> GetAppContextForTest(JNIEnv* jni) {
   return ScopedJavaLocalRef<jobject>(
-      jni::Java_ApplicationContextProvider_getApplicationContext(jni));
+      jni::Java_ApplicationContextProvider_getApplicationContextForTest(jni));
 }
 
+}  // namespace test
 }  // namespace webrtc
diff --git a/sdk/android/native_unittests/application_context_provider.h b/sdk/android/native_unittests/application_context_provider.h
new file mode 100644
index 0000000..8aace02
--- /dev/null
+++ b/sdk/android/native_unittests/application_context_provider.h
@@ -0,0 +1,23 @@
+/*
+ *  Copyright 2019 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef SDK_ANDROID_NATIVE_UNITTESTS_APPLICATION_CONTEXT_PROVIDER_H_
+#define SDK_ANDROID_NATIVE_UNITTESTS_APPLICATION_CONTEXT_PROVIDER_H_
+
+#include "sdk/android/src/jni/jni_helpers.h"
+
+namespace webrtc {
+namespace test {
+
+ScopedJavaLocalRef<jobject> GetAppContextForTest(JNIEnv* jni);
+
+}  // namespace test
+}  // namespace webrtc
+
+#endif  // SDK_ANDROID_NATIVE_UNITTESTS_APPLICATION_CONTEXT_PROVIDER_H_
diff --git a/sdk/android/native_unittests/audio_device/audio_device_unittest.cc b/sdk/android/native_unittests/audio_device/audio_device_unittest.cc
index 201a54e..7d582d4 100644
--- a/sdk/android/native_unittests/audio_device/audio_device_unittest.cc
+++ b/sdk/android/native_unittests/audio_device/audio_device_unittest.cc
@@ -22,7 +22,7 @@
 #include "rtc_base/time_utils.h"
 #include "sdk/android/generated_native_unittests_jni/BuildInfo_jni.h"
 #include "sdk/android/native_api/audio_device_module/audio_device_android.h"
-#include "sdk/android/native_api/jni/application_context_provider.h"
+#include "sdk/android/native_unittests/application_context_provider.h"
 #include "sdk/android/src/jni/audio_device/audio_common.h"
 #include "sdk/android/src/jni/audio_device/audio_device_module.h"
 #include "sdk/android/src/jni/audio_device/opensles_common.h"
@@ -466,7 +466,7 @@
     // implementations.
     // Creates an audio device using a default audio layer.
     jni_ = AttachCurrentThreadIfNeeded();
-    context_ = GetAppContext(jni_);
+    context_ = test::GetAppContextForTest(jni_);
     audio_device_ = CreateJavaAudioDeviceModule(jni_, context_.obj());
     EXPECT_NE(audio_device_.get(), nullptr);
     EXPECT_EQ(0, audio_device_->Init());
@@ -491,7 +491,7 @@
   }
 
   void SetActiveAudioLayer(AudioDeviceModule::AudioLayer audio_layer) {
-    audio_device_ = CreateAndroidAudioDeviceModule(audio_layer);
+    audio_device_ = CreateAudioDevice(audio_layer);
     EXPECT_NE(audio_device_.get(), nullptr);
     EXPECT_EQ(0, audio_device_->Init());
     UpdateParameters();
@@ -512,6 +512,30 @@
     return audio_device_;
   }
 
+  rtc::scoped_refptr<AudioDeviceModule> CreateAudioDevice(
+      AudioDeviceModule::AudioLayer audio_layer) {
+#if defined(WEBRTC_AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
+    if (audio_layer == AudioDeviceModule::kAndroidAAudioAudio) {
+      return rtc::scoped_refptr<AudioDeviceModule>(
+          CreateAAudioAudioDeviceModule(jni_, context_.obj()));
+    }
+#endif
+    if (audio_layer == AudioDeviceModule::kAndroidJavaAudio) {
+      return rtc::scoped_refptr<AudioDeviceModule>(
+          CreateJavaAudioDeviceModule(jni_, context_.obj()));
+    } else if (audio_layer == AudioDeviceModule::kAndroidOpenSLESAudio) {
+      return rtc::scoped_refptr<AudioDeviceModule>(
+          CreateOpenSLESAudioDeviceModule(jni_, context_.obj()));
+    } else if (audio_layer ==
+               AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio) {
+      return rtc::scoped_refptr<AudioDeviceModule>(
+          CreateJavaInputAndOpenSLESOutputAudioDeviceModule(jni_,
+                                                            context_.obj()));
+    } else {
+      return nullptr;
+    }
+  }
+
   // Returns file name relative to the resource root given a sample rate.
   std::string GetFileName(int sample_rate) {
     EXPECT_TRUE(sample_rate == 48000 || sample_rate == 44100);
@@ -542,7 +566,7 @@
   int TestDelayOnAudioLayer(
       const AudioDeviceModule::AudioLayer& layer_to_test) {
     rtc::scoped_refptr<AudioDeviceModule> audio_device;
-    audio_device = CreateAndroidAudioDeviceModule(layer_to_test);
+    audio_device = CreateAudioDevice(layer_to_test);
     EXPECT_NE(audio_device.get(), nullptr);
     uint16_t playout_delay;
     EXPECT_EQ(0, audio_device->PlayoutDelay(&playout_delay));
@@ -552,7 +576,7 @@
   AudioDeviceModule::AudioLayer TestActiveAudioLayer(
       const AudioDeviceModule::AudioLayer& layer_to_test) {
     rtc::scoped_refptr<AudioDeviceModule> audio_device;
-    audio_device = CreateAndroidAudioDeviceModule(layer_to_test);
+    audio_device = CreateAudioDevice(layer_to_test);
     EXPECT_NE(audio_device.get(), nullptr);
     AudioDeviceModule::AudioLayer active;
     EXPECT_EQ(0, audio_device->ActiveAudioLayer(&active));
@@ -650,22 +674,6 @@
     return volume;
   }
 
-  bool IsLowLatencyPlayoutSupported() {
-    return jni::IsLowLatencyInputSupported(jni_, context_);
-  }
-
-  bool IsLowLatencyRecordSupported() {
-    return jni::IsLowLatencyOutputSupported(jni_, context_);
-  }
-
-  bool IsAAudioSupported() {
-#if defined(WEBRTC_AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
-    return true;
-#else
-    return false;
-#endif
-  }
-
   JNIEnv* jni_;
   ScopedJavaLocalRef<jobject> context_;
   rtc::Event test_is_done_;
@@ -679,31 +687,6 @@
   // Using the test fixture to create and destruct the audio device module.
 }
 
-// We always ask for a default audio layer when the ADM is constructed. But the
-// ADM will then internally set the best suitable combination of audio layers,
-// for input and output based on if low-latency output and/or input audio in
-// combination with OpenSL ES is supported or not. This test ensures that the
-// correct selection is done.
-TEST_F(AudioDeviceTest, VerifyDefaultAudioLayer) {
-  const AudioDeviceModule::AudioLayer audio_layer =
-      TestActiveAudioLayer(AudioDeviceModule::kPlatformDefaultAudio);
-  bool low_latency_output = IsLowLatencyPlayoutSupported();
-  bool low_latency_input = IsLowLatencyRecordSupported();
-  bool aaudio = IsAAudioSupported();
-  AudioDeviceModule::AudioLayer expected_audio_layer;
-  if (aaudio) {
-    expected_audio_layer = AudioDeviceModule::kAndroidAAudioAudio;
-  } else if (low_latency_output && low_latency_input) {
-    expected_audio_layer = AudioDeviceModule::kAndroidOpenSLESAudio;
-  } else if (low_latency_output && !low_latency_input) {
-    expected_audio_layer =
-        AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio;
-  } else {
-    expected_audio_layer = AudioDeviceModule::kAndroidJavaAudio;
-  }
-  EXPECT_EQ(expected_audio_layer, audio_layer);
-}
-
 // Verify that it is possible to explicitly create the two types of supported
 // ADMs. These two tests overrides the default selection of native audio layer
 // by ignoring if the device supports low-latency output or not.
@@ -731,18 +714,15 @@
   EXPECT_EQ(expected_layer, active_layer);
 }
 
+// TODO(bugs.webrtc.org/8914)
+// TODO(phensman): Add test for AAudio/Java combination when this combination
+// is supported.
 #if !defined(WEBRTC_AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
 #define MAYBE_CorrectAudioLayerIsUsedForAAudioInBothDirections \
   DISABLED_CorrectAudioLayerIsUsedForAAudioInBothDirections
-
-#define MAYBE_CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo \
-  DISABLED_CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo
 #else
 #define MAYBE_CorrectAudioLayerIsUsedForAAudioInBothDirections \
   CorrectAudioLayerIsUsedForAAudioInBothDirections
-
-#define MAYBE_CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo \
-  CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo
 #endif
 TEST_F(AudioDeviceTest,
        MAYBE_CorrectAudioLayerIsUsedForAAudioInBothDirections) {
@@ -753,15 +733,6 @@
   EXPECT_EQ(expected_layer, active_layer);
 }
 
-TEST_F(AudioDeviceTest,
-       MAYBE_CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo) {
-  AudioDeviceModule::AudioLayer expected_layer =
-      AudioDeviceModule::kAndroidJavaInputAndAAudioOutputAudio;
-  AudioDeviceModule::AudioLayer active_layer =
-      TestActiveAudioLayer(expected_layer);
-  EXPECT_EQ(expected_layer, active_layer);
-}
-
 // The Android ADM supports two different delay reporting modes. One for the
 // low-latency output path (in combination with OpenSL ES), and one for the
 // high-latency output path (Java backends in both directions). These two tests
@@ -1158,7 +1129,7 @@
 
 TEST(JavaAudioDeviceTest, TestRunningTwoAdmsSimultaneously) {
   JNIEnv* jni = AttachCurrentThreadIfNeeded();
-  ScopedJavaLocalRef<jobject> context = GetAppContext(jni);
+  ScopedJavaLocalRef<jobject> context = test::GetAppContextForTest(jni);
 
   // Create and start the first ADM.
   rtc::scoped_refptr<AudioDeviceModule> adm_1 =
diff --git a/sdk/android/src/java/org/webrtc/ApplicationContextProvider.java b/sdk/android/native_unittests/org/webrtc/ApplicationContextProvider.java
similarity index 90%
rename from sdk/android/src/java/org/webrtc/ApplicationContextProvider.java
rename to sdk/android/native_unittests/org/webrtc/ApplicationContextProvider.java
index 6400a04..e10d347 100644
--- a/sdk/android/src/java/org/webrtc/ApplicationContextProvider.java
+++ b/sdk/android/native_unittests/org/webrtc/ApplicationContextProvider.java
@@ -14,7 +14,7 @@
 
 public class ApplicationContextProvider {
   @CalledByNative
-  public static Context getApplicationContext() {
+  public static Context getApplicationContextForTest() {
     return ContextUtils.getApplicationContext();
   }
 }
diff --git a/sdk/android/native_unittests/peerconnection/peer_connection_factory_unittest.cc b/sdk/android/native_unittests/peerconnection/peer_connection_factory_unittest.cc
index b751390..8bb6e33 100644
--- a/sdk/android/native_unittests/peerconnection/peer_connection_factory_unittest.cc
+++ b/sdk/android/native_unittests/peerconnection/peer_connection_factory_unittest.cc
@@ -24,7 +24,7 @@
 #include "sdk/android/generated_native_unittests_jni/PeerConnectionFactoryInitializationHelper_jni.h"
 #include "sdk/android/native_api/audio_device_module/audio_device_android.h"
 #include "sdk/android/native_api/jni/jvm.h"
-#include "sdk/android/native_api/jni/application_context_provider.h"
+#include "sdk/android/native_unittests/application_context_provider.h"
 #include "sdk/android/src/jni/jni_helpers.h"
 #include "test/gtest.h"
 
@@ -57,7 +57,7 @@
   cricket::MediaEngineDependencies media_deps;
   media_deps.task_queue_factory = pcf_deps.task_queue_factory.get();
   media_deps.adm =
-      CreateJavaAudioDeviceModule(jni, GetAppContext(jni).obj());
+      CreateJavaAudioDeviceModule(jni, GetAppContextForTest(jni).obj());
   media_deps.video_encoder_factory =
       std::make_unique<webrtc::InternalEncoderFactory>();
   media_deps.video_decoder_factory =
diff --git a/sdk/android/src/java/org/webrtc/audio/WebRtcAudioManager.java b/sdk/android/src/java/org/webrtc/audio/WebRtcAudioManager.java
index 506e33f..f398602 100644
--- a/sdk/android/src/java/org/webrtc/audio/WebRtcAudioManager.java
+++ b/sdk/android/src/java/org/webrtc/audio/WebRtcAudioManager.java
@@ -55,13 +55,11 @@
         : getMinInputFrameSize(sampleRate, numberOfInputChannels);
   }
 
-  @CalledByNative
-  static boolean isLowLatencyOutputSupported(Context context) {
+  private static boolean isLowLatencyOutputSupported(Context context) {
     return context.getPackageManager().hasSystemFeature(PackageManager.FEATURE_AUDIO_LOW_LATENCY);
   }
 
-  @CalledByNative
-  static boolean isLowLatencyInputSupported(Context context) {
+  private static boolean isLowLatencyInputSupported(Context context) {
     // TODO(henrika): investigate if some sort of device list is needed here
     // as well. The NDK doc states that: "As of API level 21, lower latency
     // audio input is supported on select devices. To take advantage of this
diff --git a/sdk/android/src/jni/audio_device/audio_device_module.cc b/sdk/android/src/jni/audio_device/audio_device_module.cc
index 3742d89..7c59d3e 100644
--- a/sdk/android/src/jni/audio_device/audio_device_module.cc
+++ b/sdk/android/src/jni/audio_device/audio_device_module.cc
@@ -633,14 +633,6 @@
   RTC_CHECK(output_parameters->is_valid());
 }
 
-bool IsLowLatencyInputSupported(JNIEnv* env, const JavaRef<jobject>& j_context) {
-  return Java_WebRtcAudioManager_isLowLatencyInputSupported(env, j_context);
-}
-
-bool IsLowLatencyOutputSupported(JNIEnv* env, const JavaRef<jobject>& j_context) {
-  return Java_WebRtcAudioManager_isLowLatencyOutputSupported(env, j_context);
-}
-
 rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceModuleFromInputAndOutput(
     AudioDeviceModule::AudioLayer audio_layer,
     bool is_stereo_playout_supported,
diff --git a/sdk/android/src/jni/audio_device/audio_device_module.h b/sdk/android/src/jni/audio_device/audio_device_module.h
index 9ec73de..1918336 100644
--- a/sdk/android/src/jni/audio_device/audio_device_module.h
+++ b/sdk/android/src/jni/audio_device/audio_device_module.h
@@ -86,10 +86,6 @@
                         AudioParameters* input_parameters,
                         AudioParameters* output_parameters);
 
-bool IsLowLatencyInputSupported(JNIEnv* env, const JavaRef<jobject>& j_context);
-
-bool IsLowLatencyOutputSupported(JNIEnv* env, const JavaRef<jobject>& j_context);
-
 // Glue together an audio input and audio output to get an AudioDeviceModule.
 rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceModuleFromInputAndOutput(
     AudioDeviceModule::AudioLayer audio_layer,