Add support of AAudio in native WebRTC on Android O and above

Bug: webrtc:8914
Change-Id: I016dd8fcebba1644c0a83e5f1460520545d4cdde
Reviewed-on: https://webrtc-review.googlesource.com/56180
Commit-Queue: Henrik Andreassson <henrika@webrtc.org>
Reviewed-by: Oskar Sundbom <ossu@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#22467}
diff --git a/modules/audio_device/BUILD.gn b/modules/audio_device/BUILD.gn
index 81b7454..6cc9b20 100644
--- a/modules/audio_device/BUILD.gn
+++ b/modules/audio_device/BUILD.gn
@@ -118,6 +118,7 @@
     "../../rtc_base:checks",
     "../../rtc_base:deprecation",
     "../../rtc_base:rtc_base_approved",
+    "../../rtc_base:stringutils",
   ]
   if (!build_with_chromium && is_clang) {
     # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
@@ -232,6 +233,9 @@
   if (rtc_audio_device_plays_sinus_tone) {
     defines += [ "AUDIO_DEVICE_PLAYS_SINUS_TONE" ]
   }
+  if (rtc_enable_android_aaudio) {
+    defines += [ "AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO" ]
+  }
   if (rtc_include_internal_audio_device) {
     # TODO(bugs.webrtc.org/8850): remove this when the circular dependency will be fixed.
     check_includes = false
@@ -264,6 +268,17 @@
         "log",
         "OpenSLES",
       ]
+      if (rtc_enable_android_aaudio) {
+        sources += [
+          "android/aaudio_player.cc",
+          "android/aaudio_player.h",
+          "android/aaudio_recorder.cc",
+          "android/aaudio_recorder.h",
+          "android/aaudio_wrapper.cc",
+          "android/aaudio_wrapper.h",
+        ]
+        libs += [ "aaudio" ]
+      }
 
       if (build_with_mozilla) {
         include_dirs += [
diff --git a/modules/audio_device/android/aaudio_player.cc b/modules/audio_device/android/aaudio_player.cc
new file mode 100644
index 0000000..f8af965
--- /dev/null
+++ b/modules/audio_device/android/aaudio_player.cc
@@ -0,0 +1,227 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/android/aaudio_player.h"
+
+#include "api/array_view.h"
+#include "modules/audio_device/android/audio_manager.h"
+#include "modules/audio_device/fine_audio_buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+enum AudioDeviceMessageType : uint32_t {
+  kMessageOutputStreamDisconnected,
+};
+
+AAudioPlayer::AAudioPlayer(AudioManager* audio_manager)
+    : main_thread_(rtc::Thread::Current()),
+      aaudio_(audio_manager, AAUDIO_DIRECTION_OUTPUT, this) {
+  RTC_LOG(INFO) << "ctor";
+  thread_checker_aaudio_.DetachFromThread();
+}
+
+AAudioPlayer::~AAudioPlayer() {
+  RTC_LOG(INFO) << "dtor";
+  RTC_DCHECK_RUN_ON(&main_thread_checker_);
+  Terminate();
+  RTC_LOG(INFO) << "#detected underruns: " << underrun_count_;
+}
+
+int AAudioPlayer::Init() {
+  RTC_LOG(INFO) << "Init";
+  RTC_DCHECK_RUN_ON(&main_thread_checker_);
+  RTC_DCHECK_EQ(aaudio_.audio_parameters().channels(), 1u);
+  return 0;
+}
+
+int AAudioPlayer::Terminate() {
+  RTC_LOG(INFO) << "Terminate";
+  RTC_DCHECK_RUN_ON(&main_thread_checker_);
+  StopPlayout();
+  return 0;
+}
+
+int AAudioPlayer::InitPlayout() {
+  RTC_LOG(INFO) << "InitPlayout";
+  RTC_DCHECK_RUN_ON(&main_thread_checker_);
+  RTC_DCHECK(!initialized_);
+  RTC_DCHECK(!playing_);
+  if (!aaudio_.Init()) {
+    return -1;
+  }
+  initialized_ = true;
+  return 0;
+}
+
+bool AAudioPlayer::PlayoutIsInitialized() const {
+  RTC_DCHECK_RUN_ON(&main_thread_checker_);
+  return initialized_;
+}
+
+int AAudioPlayer::StartPlayout() {
+  RTC_LOG(INFO) << "StartPlayout";
+  RTC_DCHECK_RUN_ON(&main_thread_checker_);
+  RTC_DCHECK(!playing_);
+  if (!initialized_) {
+    RTC_DLOG(LS_WARNING)
+        << "Playout can not start since InitPlayout must succeed first";
+    return 0;
+  }
+  if (fine_audio_buffer_) {
+    fine_audio_buffer_->ResetPlayout();
+  }
+  if (!aaudio_.Start()) {
+    return -1;
+  }
+  underrun_count_ = aaudio_.xrun_count();
+  first_data_callback_ = true;
+  playing_ = true;
+  return 0;
+}
+
+int AAudioPlayer::StopPlayout() {
+  RTC_LOG(INFO) << "StopPlayout";
+  RTC_DCHECK_RUN_ON(&main_thread_checker_);
+  if (!initialized_ || !playing_) {
+    return 0;
+  }
+  if (!aaudio_.Stop()) {
+    RTC_LOG(LS_ERROR) << "StopPlayout failed";
+    return -1;
+  }
+  thread_checker_aaudio_.DetachFromThread();
+  initialized_ = false;
+  playing_ = false;
+  return 0;
+}
+
+bool AAudioPlayer::Playing() const {
+  RTC_DCHECK_RUN_ON(&main_thread_checker_);
+  return playing_;
+}
+
+void AAudioPlayer::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+  RTC_DLOG(INFO) << "AttachAudioBuffer";
+  RTC_DCHECK_RUN_ON(&main_thread_checker_);
+  audio_device_buffer_ = audioBuffer;
+  const AudioParameters audio_parameters = aaudio_.audio_parameters();
+  audio_device_buffer_->SetPlayoutSampleRate(audio_parameters.sample_rate());
+  audio_device_buffer_->SetPlayoutChannels(audio_parameters.channels());
+  RTC_CHECK(audio_device_buffer_);
+  // Create a modified audio buffer class which allows us to ask for any number
+  // of samples (and not only multiple of 10ms) to match the optimal buffer
+  // size per callback used by AAudio. Use an initial capacity of 50ms to ensure
+  // that the buffer can cache old data and at the same time be prepared for
+  // increased burst size in AAudio if buffer underruns are detected.
+  const size_t capacity = 5 * audio_parameters.GetBytesPer10msBuffer();
+  fine_audio_buffer_.reset(new FineAudioBuffer(
+      audio_device_buffer_, audio_parameters.sample_rate(), capacity));
+}
+
+int AAudioPlayer::SpeakerVolumeIsAvailable(bool& available) {
+  available = false;
+  return 0;
+}
+
+void AAudioPlayer::OnErrorCallback(aaudio_result_t error) {
+  RTC_LOG(LS_ERROR) << "OnErrorCallback: " << AAudio_convertResultToText(error);
+  // TODO(henrika): investigate if we can use a thread checker here. Initial
+  // tests shows that this callback can sometimes be called on a unique thread
+  // but according to the documentation it should be on the same thread as the
+  // data callback.
+  // RTC_DCHECK_RUN_ON(&thread_checker_aaudio_);
+  if (aaudio_.stream_state() == AAUDIO_STREAM_STATE_DISCONNECTED) {
+    // The stream is disconnected and any attempt to use it will return
+    // AAUDIO_ERROR_DISCONNECTED.
+    RTC_LOG(WARNING) << "Output stream disconnected";
+    // AAudio documentation states: "You should not close or reopen the stream
+    // from the callback, use another thread instead". A message is therefore
+    // sent to the main thread to do the restart operation.
+    RTC_DCHECK(main_thread_);
+    main_thread_->Post(RTC_FROM_HERE, this, kMessageOutputStreamDisconnected);
+  }
+}
+
+aaudio_data_callback_result_t AAudioPlayer::OnDataCallback(void* audio_data,
+                                                           int32_t num_frames) {
+  RTC_DCHECK_RUN_ON(&thread_checker_aaudio_);
+  // Log device id in first data callback to ensure that a valid device is
+  // utilized.
+  if (first_data_callback_) {
+    RTC_LOG(INFO) << "--- First output data callback: "
+                  << "device id=" << aaudio_.device_id();
+    first_data_callback_ = false;
+  }
+
+  // Check if the underrun count has increased. If it has, increase the buffer
+  // size by adding the size of a burst. It will reduce the risk of underruns
+  // at the expense of an increased latency.
+  // TODO(henrika): enable possibility to disable and/or tune the algorithm.
+  const int32_t underrun_count = aaudio_.xrun_count();
+  if (underrun_count > underrun_count_) {
+    RTC_LOG(LS_ERROR) << "Underrun detected: " << underrun_count;
+    underrun_count_ = underrun_count;
+    aaudio_.IncreaseOutputBufferSize();
+  }
+
+  // Estimate latency between writing an audio frame to the output stream and
+  // the time that same frame is played out on the output audio device.
+  latency_millis_ = aaudio_.EstimateLatencyMillis();
+  // TODO(henrika): use for development only.
+  if (aaudio_.frames_written() % (1000 * aaudio_.frames_per_burst()) == 0) {
+    RTC_DLOG(INFO) << "output latency: " << latency_millis_
+                   << ", num_frames: " << num_frames;
+  }
+
+  // Read audio data from the WebRTC source using the FineAudioBuffer object
+  // and write that data into |audio_data| to be played out by AAudio.
+  const size_t num_bytes =
+      sizeof(int16_t) * aaudio_.samples_per_frame() * num_frames;
+  // Prime output with zeros during a short initial phase to avoid distortion.
+  // TODO(henrika): do more work to figure out of if the initial forced silence
+  // period is really needed.
+  if (aaudio_.frames_written() < 50 * aaudio_.frames_per_burst()) {
+    memset(audio_data, 0, num_bytes);
+  } else {
+    fine_audio_buffer_->GetPlayoutData(
+        rtc::ArrayView<int8_t>(static_cast<int8_t*>(audio_data), num_bytes),
+        static_cast<int>(latency_millis_ + 0.5));
+  }
+
+  // TODO(henrika): possibly add trace here to be included in systrace.
+  // See https://developer.android.com/studio/profile/systrace-commandline.html.
+  return AAUDIO_CALLBACK_RESULT_CONTINUE;
+}
+
+void AAudioPlayer::OnMessage(rtc::Message* msg) {
+  RTC_DCHECK_RUN_ON(&main_thread_checker_);
+  switch (msg->message_id) {
+    case kMessageOutputStreamDisconnected:
+      HandleStreamDisconnected();
+      break;
+  }
+}
+
+void AAudioPlayer::HandleStreamDisconnected() {
+  RTC_DCHECK_RUN_ON(&main_thread_checker_);
+  RTC_DLOG(INFO) << "HandleStreamDisconnected";
+  if (!initialized_ || !playing_) {
+    return;
+  }
+  // Perform a restart by first closing the disconnected stream and then start
+  // a new stream; this time using the new (preferred) audio output device.
+  audio_device_buffer_->NativeAudioPlayoutInterrupted();
+  StopPlayout();
+  InitPlayout();
+  StartPlayout();
+}
+}  // namespace webrtc
diff --git a/modules/audio_device/android/aaudio_player.h b/modules/audio_device/android/aaudio_player.h
new file mode 100644
index 0000000..a92d508
--- /dev/null
+++ b/modules/audio_device/android/aaudio_player.h
@@ -0,0 +1,146 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_PLAYER_H_
+#define MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_PLAYER_H_
+
+#include <aaudio/AAudio.h>
+#include <memory>
+
+#include "modules/audio_device/android/aaudio_wrapper.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "rtc_base/messagehandler.h"
+#include "rtc_base/thread.h"
+#include "rtc_base/thread_annotations.h"
+#include "rtc_base/thread_checker.h"
+
+namespace webrtc {
+
+class AudioDeviceBuffer;
+class FineAudioBuffer;
+class AudioManager;
+
+// Implements low-latency 16-bit mono PCM audio output support for Android
+// using the C based AAudio API.
+//
+// An instance must be created and destroyed on one and the same thread.
+// All public methods must also be called on the same thread. A thread checker
+// will DCHECK if any method is called on an invalid thread. Audio buffers
+// are requested on a dedicated high-priority thread owned by AAudio.
+//
+// The existing design forces the user to call InitPlayout() after StopPlayout()
+// to be able to call StartPlayout() again. This is in line with how the Java-
+// based implementation works.
+//
+// An audio stream can be disconnected, e.g. when an audio device is removed.
+// This implementation will restart the audio stream using the new preferred
+// device if such an event happens.
+//
+// Also supports automatic buffer-size adjustment based on underrun detections
+// where the internal AAudio buffer can be increased when needed. It will
+// reduce the risk of underruns (~glitches) at the expense of an increased
+// latency.
+class AAudioPlayer final : public AAudioObserverInterface,
+                           public rtc::MessageHandler {
+ public:
+  explicit AAudioPlayer(AudioManager* audio_manager);
+  ~AAudioPlayer();
+
+  int Init();
+  int Terminate();
+
+  int InitPlayout();
+  bool PlayoutIsInitialized() const;
+
+  int StartPlayout();
+  int StopPlayout();
+  bool Playing() const;
+
+  void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
+
+  // Not implemented in AAudio.
+  int SpeakerVolumeIsAvailable(bool& available);  // NOLINT
+  int SetSpeakerVolume(uint32_t volume) { return -1; }
+  int SpeakerVolume(uint32_t& volume) const { return -1; }        // NOLINT
+  int MaxSpeakerVolume(uint32_t& maxVolume) const { return -1; }  // NOLINT
+  int MinSpeakerVolume(uint32_t& minVolume) const { return -1; }  // NOLINT
+
+ protected:
+  // AAudioObserverInterface implementation.
+
+  // For an output stream, this function should render and write |num_frames|
+  // of data in the streams current data format to the |audio_data| buffer.
+  // Called on a real-time thread owned by AAudio.
+  aaudio_data_callback_result_t OnDataCallback(void* audio_data,
+                                               int32_t num_frames) override;
+  // AAudio calls this functions if any error occurs on a callback thread.
+  // Called on a real-time thread owned by AAudio.
+  void OnErrorCallback(aaudio_result_t error) override;
+
+  // rtc::MessageHandler used for restart messages from the error-callback
+  // thread to the main (creating) thread.
+  void OnMessage(rtc::Message* msg) override;
+
+ private:
+  // Closes the existing stream and starts a new stream.
+  void HandleStreamDisconnected();
+
+  // Ensures that methods are called from the same thread as this object is
+  // created on.
+  rtc::ThreadChecker main_thread_checker_;
+
+  // Stores thread ID in first call to AAudioPlayer::OnDataCallback from a
+  // real-time thread owned by AAudio. Detached during construction of this
+  // object.
+  rtc::ThreadChecker thread_checker_aaudio_;
+
+  // The thread on which this object is created on.
+  rtc::Thread* main_thread_;
+
+  // Wraps all AAudio resources. Contains an output stream using the default
+  // output audio device. Can be accessed on both the main thread and the
+  // real-time thread owned by AAudio. See separate AAudio documentation about
+  // thread safety.
+  AAudioWrapper aaudio_;
+
+  // FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data
+  // in chunks of 10ms. It then allows for this data to be pulled in
+  // a finer or coarser granularity. I.e. interacting with this class instead
+  // of directly with the AudioDeviceBuffer one can ask for any number of
+  // audio data samples.
+  // Example: native buffer size can be 192 audio frames at 48kHz sample rate.
+  // WebRTC will provide 480 audio frames per 10ms but AAudio asks for 192
+  // in each callback (once every 4th ms). This class can then ask for 192 and
+  // the FineAudioBuffer will ask WebRTC for new data approximately only every
+  // second callback and also cache non-utilized audio.
+  std::unique_ptr<FineAudioBuffer> fine_audio_buffer_;
+
+  // Counts number of detected underrun events reported by AAudio.
+  int32_t underrun_count_ = 0;
+
+  // True only for the first data callback in each audio session.
+  bool first_data_callback_ = true;
+
+  // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
+  // AudioDeviceModuleImpl class and set by AudioDeviceModule::Create().
+  AudioDeviceBuffer* audio_device_buffer_ RTC_GUARDED_BY(main_thread_checker_) =
+      nullptr;
+
+  bool initialized_ RTC_GUARDED_BY(main_thread_checker_) = false;
+  bool playing_ RTC_GUARDED_BY(main_thread_checker_) = false;
+
+  // Estimated latency between writing an audio frame to the output stream and
+  // the time that same frame is played out on the output audio device.
+  double latency_millis_ RTC_GUARDED_BY(thread_checker_aaudio_) = 0;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_PLAYER_H_
diff --git a/modules/audio_device/android/aaudio_recorder.cc b/modules/audio_device/android/aaudio_recorder.cc
new file mode 100644
index 0000000..346707b
--- /dev/null
+++ b/modules/audio_device/android/aaudio_recorder.cc
@@ -0,0 +1,221 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/android/aaudio_recorder.h"
+
+#include "api/array_view.h"
+#include "modules/audio_device/android/audio_manager.h"
+#include "modules/audio_device/fine_audio_buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/timeutils.h"
+#include "system_wrappers/include/sleep.h"
+
+namespace webrtc {
+
+enum AudioDeviceMessageType : uint32_t {
+  kMessageInputStreamDisconnected,
+};
+
+AAudioRecorder::AAudioRecorder(AudioManager* audio_manager)
+    : main_thread_(rtc::Thread::Current()),
+      aaudio_(audio_manager, AAUDIO_DIRECTION_INPUT, this) {
+  RTC_LOG(INFO) << "ctor";
+  thread_checker_aaudio_.DetachFromThread();
+}
+
+AAudioRecorder::~AAudioRecorder() {
+  RTC_LOG(INFO) << "dtor";
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  Terminate();
+  RTC_LOG(INFO) << "detected owerflows: " << overflow_count_;
+}
+
+int AAudioRecorder::Init() {
+  RTC_LOG(INFO) << "Init";
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK_EQ(aaudio_.audio_parameters().channels(), 1u);
+  return 0;
+}
+
+int AAudioRecorder::Terminate() {
+  RTC_LOG(INFO) << "Terminate";
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  StopRecording();
+  return 0;
+}
+
+int AAudioRecorder::InitRecording() {
+  RTC_LOG(INFO) << "InitRecording";
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(!initialized_);
+  RTC_DCHECK(!recording_);
+  if (!aaudio_.Init()) {
+    return -1;
+  }
+  initialized_ = true;
+  return 0;
+}
+
+int AAudioRecorder::StartRecording() {
+  RTC_LOG(INFO) << "StartRecording";
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(initialized_);
+  RTC_DCHECK(!recording_);
+  if (fine_audio_buffer_) {
+    fine_audio_buffer_->ResetPlayout();
+  }
+  if (!aaudio_.Start()) {
+    return -1;
+  }
+  overflow_count_ = aaudio_.xrun_count();
+  first_data_callback_ = true;
+  recording_ = true;
+  return 0;
+}
+
+int AAudioRecorder::StopRecording() {
+  RTC_LOG(INFO) << "StopRecording";
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  if (!initialized_ || !recording_) {
+    return 0;
+  }
+  if (!aaudio_.Stop()) {
+    return -1;
+  }
+  thread_checker_aaudio_.DetachFromThread();
+  initialized_ = false;
+  recording_ = false;
+  return 0;
+}
+
+void AAudioRecorder::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+  RTC_LOG(INFO) << "AttachAudioBuffer";
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  audio_device_buffer_ = audioBuffer;
+  const AudioParameters audio_parameters = aaudio_.audio_parameters();
+  audio_device_buffer_->SetRecordingSampleRate(audio_parameters.sample_rate());
+  audio_device_buffer_->SetRecordingChannels(audio_parameters.channels());
+  RTC_CHECK(audio_device_buffer_);
+  // Create a modified audio buffer class which allows us to deliver any number
+  // of samples (and not only multiples of 10ms which WebRTC uses) to match the
+  // native AAudio buffer size.
+  const size_t capacity = 5 * audio_parameters.GetBytesPer10msBuffer();
+  fine_audio_buffer_.reset(new FineAudioBuffer(
+      audio_device_buffer_, audio_parameters.sample_rate(), capacity));
+}
+
+int AAudioRecorder::EnableBuiltInAEC(bool enable) {
+  RTC_LOG(INFO) << "EnableBuiltInAEC: " << enable;
+  RTC_LOG(LS_ERROR) << "Not implemented";
+  return -1;
+}
+
+int AAudioRecorder::EnableBuiltInAGC(bool enable) {
+  RTC_LOG(INFO) << "EnableBuiltInAGC: " << enable;
+  RTC_LOG(LS_ERROR) << "Not implemented";
+  return -1;
+}
+
+int AAudioRecorder::EnableBuiltInNS(bool enable) {
+  RTC_LOG(INFO) << "EnableBuiltInNS: " << enable;
+  RTC_LOG(LS_ERROR) << "Not implemented";
+  return -1;
+}
+
+void AAudioRecorder::OnErrorCallback(aaudio_result_t error) {
+  RTC_LOG(LS_ERROR) << "OnErrorCallback: " << AAudio_convertResultToText(error);
+  // RTC_DCHECK(thread_checker_aaudio_.CalledOnValidThread());
+  if (aaudio_.stream_state() == AAUDIO_STREAM_STATE_DISCONNECTED) {
+    // The stream is disconnected and any attempt to use it will return
+    // AAUDIO_ERROR_DISCONNECTED..
+    RTC_LOG(WARNING) << "Input stream disconnected => restart is required";
+    // AAudio documentation states: "You should not close or reopen the stream
+    // from the callback, use another thread instead". A message is therefore
+    // sent to the main thread to do the restart operation.
+    RTC_DCHECK(main_thread_);
+    main_thread_->Post(RTC_FROM_HERE, this, kMessageInputStreamDisconnected);
+  }
+}
+
+// Read and process |num_frames| of data from the |audio_data| buffer.
+// TODO(henrika): possibly add trace here to be included in systrace.
+// See https://developer.android.com/studio/profile/systrace-commandline.html.
+aaudio_data_callback_result_t AAudioRecorder::OnDataCallback(
+    void* audio_data,
+    int32_t num_frames) {
+  // TODO(henrika): figure out why we sometimes hit this one.
+  // RTC_DCHECK(thread_checker_aaudio_.CalledOnValidThread());
+  // RTC_LOG(INFO) << "OnDataCallback: " << num_frames;
+  // Drain the input buffer at first callback to ensure that it does not
+  // contain any old data. Will also ensure that the lowest possible latency
+  // is obtained.
+  if (first_data_callback_) {
+    RTC_LOG(INFO) << "--- First input data callback: "
+                  << "device id=" << aaudio_.device_id();
+    aaudio_.ClearInputStream(audio_data, num_frames);
+    first_data_callback_ = false;
+  }
+  // Check if the overflow counter has increased and if so log a warning.
+  // TODO(henrika): possible add UMA stat or capacity extension.
+  const int32_t overflow_count = aaudio_.xrun_count();
+  if (overflow_count > overflow_count_) {
+    RTC_LOG(LS_ERROR) << "Overflow detected: " << overflow_count;
+    overflow_count_ = overflow_count;
+  }
+  // Estimated time between an audio frame was recorded by the input device and
+  // it can read on the input stream.
+  latency_millis_ = aaudio_.EstimateLatencyMillis();
+  // TODO(henrika): use for development only.
+  if (aaudio_.frames_read() % (1000 * aaudio_.frames_per_burst()) == 0) {
+    RTC_DLOG(INFO) << "input latency: " << latency_millis_
+                   << ", num_frames: " << num_frames;
+  }
+  // Copy recorded audio in |audio_data| to the WebRTC sink using the
+  // FineAudioBuffer object.
+  const size_t num_bytes =
+      sizeof(int16_t) * aaudio_.samples_per_frame() * num_frames;
+  fine_audio_buffer_->DeliverRecordedData(
+      rtc::ArrayView<const int8_t>(static_cast<const int8_t*>(audio_data),
+                                   num_bytes),
+      static_cast<int>(latency_millis_ + 0.5));
+
+  return AAUDIO_CALLBACK_RESULT_CONTINUE;
+}
+
+void AAudioRecorder::OnMessage(rtc::Message* msg) {
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  switch (msg->message_id) {
+    case kMessageInputStreamDisconnected:
+      HandleStreamDisconnected();
+      break;
+    default:
+      RTC_LOG(LS_ERROR) << "Invalid message id: " << msg->message_id;
+      break;
+  }
+}
+
+void AAudioRecorder::HandleStreamDisconnected() {
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  RTC_LOG(INFO) << "HandleStreamDisconnected";
+  if (!initialized_ || !recording_) {
+    return;
+  }
+  // Perform a restart by first closing the disconnected stream and then start
+  // a new stream; this time using the new (preferred) audio input device.
+  // TODO(henrika): resolve issue where a one restart attempt leads to a long
+  // sequence of new calls to OnErrorCallback().
+  // See b/73148976 for details.
+  audio_device_buffer_->NativeAudioRecordingInterrupted();
+  StopRecording();
+  InitRecording();
+  StartRecording();
+}
+}  // namespace webrtc
diff --git a/modules/audio_device/android/aaudio_recorder.h b/modules/audio_device/android/aaudio_recorder.h
new file mode 100644
index 0000000..1b78a01
--- /dev/null
+++ b/modules/audio_device/android/aaudio_recorder.h
@@ -0,0 +1,128 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_RECORDER_H_
+#define MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_RECORDER_H_
+
+#include <aaudio/AAudio.h>
+#include <memory>
+
+#include "modules/audio_device/android/aaudio_wrapper.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "rtc_base/messagehandler.h"
+#include "rtc_base/thread.h"
+#include "rtc_base/thread_checker.h"
+
+namespace webrtc {
+
+class AudioDeviceBuffer;
+class FineAudioBuffer;
+class AudioManager;
+
+// Implements low-latency 16-bit mono PCM audio input support for Android
+// using the C based AAudio API.
+//
+// An instance must be created and destroyed on one and the same thread.
+// All public methods must also be called on the same thread. A thread checker
+// will RTC_DCHECK if any method is called on an invalid thread. Audio buffers
+// are delivered on a dedicated high-priority thread owned by AAudio.
+//
+// The existing design forces the user to call InitRecording() after
+// StopRecording() to be able to call StartRecording() again. This is in line
+// with how the Java- based implementation works.
+//
+// TODO(henrika): add comments about device changes and adaptive buffer
+// management.
+class AAudioRecorder : public AAudioObserverInterface,
+                       public rtc::MessageHandler {
+ public:
+  explicit AAudioRecorder(AudioManager* audio_manager);
+  ~AAudioRecorder();
+
+  int Init();
+  int Terminate();
+
+  int InitRecording();
+  bool RecordingIsInitialized() const { return initialized_; }
+
+  int StartRecording();
+  int StopRecording();
+  bool Recording() const { return recording_; }
+
+  void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
+
+  double latency_millis() const { return latency_millis_; }
+
+  // TODO(henrika): add support using AAudio APIs when available.
+  int EnableBuiltInAEC(bool enable);
+  int EnableBuiltInAGC(bool enable);
+  int EnableBuiltInNS(bool enable);
+
+ protected:
+  // AAudioObserverInterface implementation.
+
+  // For an input stream, this function should read |num_frames| of recorded
+  // data, in the stream's current data format, from the |audio_data| buffer.
+  // Called on a real-time thread owned by AAudio.
+  aaudio_data_callback_result_t OnDataCallback(void* audio_data,
+                                               int32_t num_frames) override;
+
+  // AAudio calls this function if any error occurs on a callback thread.
+  // Called on a real-time thread owned by AAudio.
+  void OnErrorCallback(aaudio_result_t error) override;
+
+  // rtc::MessageHandler used for restart messages.
+  void OnMessage(rtc::Message* msg) override;
+
+ private:
+  // Closes the existing stream and starts a new stream.
+  void HandleStreamDisconnected();
+
+  // Ensures that methods are called from the same thread as this object is
+  // created on.
+  rtc::ThreadChecker thread_checker_;
+
+  // Stores thread ID in first call to AAudioPlayer::OnDataCallback from a
+  // real-time thread owned by AAudio. Detached during construction of this
+  // object.
+  rtc::ThreadChecker thread_checker_aaudio_;
+
+  // The thread on which this object is created on.
+  rtc::Thread* main_thread_;
+
+  // Wraps all AAudio resources. Contains an input stream using the default
+  // input audio device.
+  AAudioWrapper aaudio_;
+
+  // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
+  // AudioDeviceModuleImpl class and called by AudioDeviceModule::Create().
+  AudioDeviceBuffer* audio_device_buffer_ = nullptr;
+
+  bool initialized_ = false;
+  bool recording_ = false;
+
+  // Consumes audio of native buffer size and feeds the WebRTC layer with 10ms
+  // chunks of audio.
+  std::unique_ptr<FineAudioBuffer> fine_audio_buffer_;
+
+  // Counts number of detected overflow events reported by AAudio.
+  int32_t overflow_count_ = 0;
+
+  // Estimated time between an audio frame was recorded by the input device and
+  // it can read on the input stream.
+  double latency_millis_ = 0;
+
+  // True only for the first data callback in each audio session.
+  bool first_data_callback_ = true;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_RECORDER_H_
diff --git a/modules/audio_device/android/aaudio_wrapper.cc b/modules/audio_device/android/aaudio_wrapper.cc
new file mode 100644
index 0000000..d95a9b4
--- /dev/null
+++ b/modules/audio_device/android/aaudio_wrapper.cc
@@ -0,0 +1,499 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/android/aaudio_wrapper.h"
+
+#include "modules/audio_device/android/audio_manager.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/timeutils.h"
+
+#define LOG_ON_ERROR(op)                                                      \
+  do {                                                                        \
+    aaudio_result_t result = (op);                                            \
+    if (result != AAUDIO_OK) {                                                \
+      RTC_LOG(LS_ERROR) << #op << ": " << AAudio_convertResultToText(result); \
+    }                                                                         \
+  } while (0)
+
+#define RETURN_ON_ERROR(op, ...)                                              \
+  do {                                                                        \
+    aaudio_result_t result = (op);                                            \
+    if (result != AAUDIO_OK) {                                                \
+      RTC_LOG(LS_ERROR) << #op << ": " << AAudio_convertResultToText(result); \
+      return __VA_ARGS__;                                                     \
+    }                                                                         \
+  } while (0)
+
+namespace webrtc {
+
+namespace {
+
+const char* DirectionToString(aaudio_direction_t direction) {
+  switch (direction) {
+    case AAUDIO_DIRECTION_OUTPUT:
+      return "OUTPUT";
+    case AAUDIO_DIRECTION_INPUT:
+      return "INPUT";
+    default:
+      return "UNKNOWN";
+  }
+}
+
+const char* SharingModeToString(aaudio_sharing_mode_t mode) {
+  switch (mode) {
+    case AAUDIO_SHARING_MODE_EXCLUSIVE:
+      return "EXCLUSIVE";
+    case AAUDIO_SHARING_MODE_SHARED:
+      return "SHARED";
+    default:
+      return "UNKNOWN";
+  }
+}
+
+const char* PerformanceModeToString(aaudio_performance_mode_t mode) {
+  switch (mode) {
+    case AAUDIO_PERFORMANCE_MODE_NONE:
+      return "NONE";
+    case AAUDIO_PERFORMANCE_MODE_POWER_SAVING:
+      return "POWER_SAVING";
+    case AAUDIO_PERFORMANCE_MODE_LOW_LATENCY:
+      return "LOW_LATENCY";
+    default:
+      return "UNKNOWN";
+  }
+}
+
+const char* FormatToString(int32_t id) {
+  switch (id) {
+    case AAUDIO_FORMAT_INVALID:
+      return "INVALID";
+    case AAUDIO_FORMAT_UNSPECIFIED:
+      return "UNSPECIFIED";
+    case AAUDIO_FORMAT_PCM_I16:
+      return "PCM_I16";
+    case AAUDIO_FORMAT_PCM_FLOAT:
+      return "FLOAT";
+    default:
+      return "UNKNOWN";
+  }
+}
+
+void ErrorCallback(AAudioStream* stream,
+                   void* user_data,
+                   aaudio_result_t error) {
+  RTC_DCHECK(user_data);
+  AAudioWrapper* aaudio_wrapper = reinterpret_cast<AAudioWrapper*>(user_data);
+  RTC_LOG(WARNING) << "ErrorCallback: "
+                   << DirectionToString(aaudio_wrapper->direction());
+  RTC_DCHECK(aaudio_wrapper->observer());
+  aaudio_wrapper->observer()->OnErrorCallback(error);
+}
+
+aaudio_data_callback_result_t DataCallback(AAudioStream* stream,
+                                           void* user_data,
+                                           void* audio_data,
+                                           int32_t num_frames) {
+  RTC_DCHECK(user_data);
+  RTC_DCHECK(audio_data);
+  AAudioWrapper* aaudio_wrapper = reinterpret_cast<AAudioWrapper*>(user_data);
+  RTC_DCHECK(aaudio_wrapper->observer());
+  return aaudio_wrapper->observer()->OnDataCallback(audio_data, num_frames);
+}
+
+// Wraps the stream builder object to ensure that it is released properly when
+// the stream builder goes out of scope.
+class ScopedStreamBuilder {
+ public:
+  ScopedStreamBuilder() {
+    LOG_ON_ERROR(AAudio_createStreamBuilder(&builder_));
+    RTC_DCHECK(builder_);
+  }
+  ~ScopedStreamBuilder() {
+    if (builder_) {
+      LOG_ON_ERROR(AAudioStreamBuilder_delete(builder_));
+    }
+  }
+
+  AAudioStreamBuilder* get() const { return builder_; }
+
+ private:
+  AAudioStreamBuilder* builder_ = nullptr;
+};
+
+}  // namespace
+
+AAudioWrapper::AAudioWrapper(AudioManager* audio_manager,
+                             aaudio_direction_t direction,
+                             AAudioObserverInterface* observer)
+    : direction_(direction), observer_(observer) {
+  RTC_LOG(INFO) << "ctor";
+  RTC_DCHECK(observer_);
+  direction_ == AAUDIO_DIRECTION_OUTPUT
+      ? audio_parameters_ = audio_manager->GetPlayoutAudioParameters()
+      : audio_parameters_ = audio_manager->GetRecordAudioParameters();
+  aaudio_thread_checker_.DetachFromThread();
+  RTC_LOG(INFO) << audio_parameters_.ToString();
+}
+
+AAudioWrapper::~AAudioWrapper() {
+  RTC_LOG(INFO) << "dtor";
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(!stream_);
+}
+
+bool AAudioWrapper::Init() {
+  RTC_LOG(INFO) << "Init";
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  // Creates a stream builder which can be used to open an audio stream.
+  ScopedStreamBuilder builder;
+  // Configures the stream builder using audio parameters given at construction.
+  SetStreamConfiguration(builder.get());
+  // Opens a stream based on options in the stream builder.
+  if (!OpenStream(builder.get())) {
+    return false;
+  }
+  // Ensures that the opened stream could activate the requested settings.
+  if (!VerifyStreamConfiguration()) {
+    return false;
+  }
+  // Optimizes the buffer scheme for lowest possible latency and creates
+  // additional buffer logic to match the 10ms buffer size used in WebRTC.
+  if (!OptimizeBuffers()) {
+    return false;
+  }
+  LogStreamState();
+  return true;
+}
+
+bool AAudioWrapper::Start() {
+  RTC_LOG(INFO) << "Start";
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  // TODO(henrika): this state check might not be needed.
+  aaudio_stream_state_t current_state = AAudioStream_getState(stream_);
+  if (current_state != AAUDIO_STREAM_STATE_OPEN) {
+    RTC_LOG(LS_ERROR) << "Invalid state: "
+                      << AAudio_convertStreamStateToText(current_state);
+    return false;
+  }
+  // Asynchronous request for the stream to start.
+  RETURN_ON_ERROR(AAudioStream_requestStart(stream_), false);
+  LogStreamState();
+  return true;
+}
+
+bool AAudioWrapper::Stop() {
+  RTC_LOG(INFO) << "Stop: " << DirectionToString(direction());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  // Asynchronous request for the stream to stop.
+  RETURN_ON_ERROR(AAudioStream_requestStop(stream_), false);
+  CloseStream();
+  aaudio_thread_checker_.DetachFromThread();
+  return true;
+}
+
+double AAudioWrapper::EstimateLatencyMillis() const {
+  RTC_DCHECK(stream_);
+  double latency_millis = 0.0;
+  if (direction() == AAUDIO_DIRECTION_INPUT) {
+    // For input streams. Best guess we can do is to use the current burst size
+    // as delay estimate.
+    latency_millis = static_cast<double>(frames_per_burst()) / sample_rate() *
+                     rtc::kNumMillisecsPerSec;
+  } else {
+    int64_t existing_frame_index;
+    int64_t existing_frame_presentation_time;
+    // Get the time at which a particular frame was presented to audio hardware.
+    aaudio_result_t result = AAudioStream_getTimestamp(
+        stream_, CLOCK_MONOTONIC, &existing_frame_index,
+        &existing_frame_presentation_time);
+    // Results are only valid when the stream is in AAUDIO_STREAM_STATE_STARTED.
+    if (result == AAUDIO_OK) {
+      // Get write index for next audio frame.
+      int64_t next_frame_index = frames_written();
+      // Number of frames between next frame and the existing frame.
+      int64_t frame_index_delta = next_frame_index - existing_frame_index;
+      // Assume the next frame will be written now.
+      int64_t next_frame_write_time = rtc::TimeNanos();
+      // Calculate time when next frame will be presented to the hardware taking
+      // sample rate into account.
+      int64_t frame_time_delta =
+          (frame_index_delta * rtc::kNumNanosecsPerSec) / sample_rate();
+      int64_t next_frame_presentation_time =
+          existing_frame_presentation_time + frame_time_delta;
+      // Derive a latency estimate given results above.
+      latency_millis = static_cast<double>(next_frame_presentation_time -
+                                           next_frame_write_time) /
+                       rtc::kNumNanosecsPerMillisec;
+    }
+  }
+  return latency_millis;
+}
+
+// Returns new buffer size or a negative error value if buffer size could not
+// be increased.
+bool AAudioWrapper::IncreaseOutputBufferSize() {
+  RTC_LOG(INFO) << "IncreaseBufferSize";
+  RTC_DCHECK(stream_);
+  RTC_DCHECK(aaudio_thread_checker_.CalledOnValidThread());
+  RTC_DCHECK_EQ(direction(), AAUDIO_DIRECTION_OUTPUT);
+  aaudio_result_t buffer_size = AAudioStream_getBufferSizeInFrames(stream_);
+  // Try to increase size of buffer with one burst to reduce risk of underrun.
+  buffer_size += frames_per_burst();
+  // Verify that the new buffer size is not larger than max capacity.
+  // TODO(henrika): keep track of case when we reach the capacity limit.
+  const int32_t max_buffer_size = buffer_capacity_in_frames();
+  if (buffer_size > max_buffer_size) {
+    RTC_LOG(LS_ERROR) << "Required buffer size (" << buffer_size
+                      << ") is higher than max: " << max_buffer_size;
+    return false;
+  }
+  RTC_LOG(INFO) << "Updating buffer size to: " << buffer_size
+                << " (max=" << max_buffer_size << ")";
+  buffer_size = AAudioStream_setBufferSizeInFrames(stream_, buffer_size);
+  if (buffer_size < 0) {
+    RTC_LOG(LS_ERROR) << "Failed to change buffer size: "
+                      << AAudio_convertResultToText(buffer_size);
+    return false;
+  }
+  RTC_LOG(INFO) << "Buffer size changed to: " << buffer_size;
+  return true;
+}
+
+void AAudioWrapper::ClearInputStream(void* audio_data, int32_t num_frames) {
+  RTC_LOG(INFO) << "ClearInputStream";
+  RTC_DCHECK(stream_);
+  RTC_DCHECK(aaudio_thread_checker_.CalledOnValidThread());
+  RTC_DCHECK_EQ(direction(), AAUDIO_DIRECTION_INPUT);
+  aaudio_result_t cleared_frames = 0;
+  do {
+    cleared_frames = AAudioStream_read(stream_, audio_data, num_frames, 0);
+  } while (cleared_frames > 0);
+}
+
+AAudioObserverInterface* AAudioWrapper::observer() const {
+  return observer_;
+}
+
+AudioParameters AAudioWrapper::audio_parameters() const {
+  return audio_parameters_;
+}
+
+int32_t AAudioWrapper::samples_per_frame() const {
+  RTC_DCHECK(stream_);
+  return AAudioStream_getSamplesPerFrame(stream_);
+}
+
+int32_t AAudioWrapper::buffer_size_in_frames() const {
+  RTC_DCHECK(stream_);
+  return AAudioStream_getBufferSizeInFrames(stream_);
+}
+
+int32_t AAudioWrapper::buffer_capacity_in_frames() const {
+  RTC_DCHECK(stream_);
+  return AAudioStream_getBufferCapacityInFrames(stream_);
+}
+
+int32_t AAudioWrapper::device_id() const {
+  RTC_DCHECK(stream_);
+  return AAudioStream_getDeviceId(stream_);
+}
+
+int32_t AAudioWrapper::xrun_count() const {
+  RTC_DCHECK(stream_);
+  return AAudioStream_getXRunCount(stream_);
+}
+
+int32_t AAudioWrapper::format() const {
+  RTC_DCHECK(stream_);
+  return AAudioStream_getFormat(stream_);
+}
+
+int32_t AAudioWrapper::sample_rate() const {
+  RTC_DCHECK(stream_);
+  return AAudioStream_getSampleRate(stream_);
+}
+
+int32_t AAudioWrapper::channel_count() const {
+  RTC_DCHECK(stream_);
+  return AAudioStream_getChannelCount(stream_);
+}
+
+int32_t AAudioWrapper::frames_per_callback() const {
+  RTC_DCHECK(stream_);
+  return AAudioStream_getFramesPerDataCallback(stream_);
+}
+
+aaudio_sharing_mode_t AAudioWrapper::sharing_mode() const {
+  RTC_DCHECK(stream_);
+  return AAudioStream_getSharingMode(stream_);
+}
+
+aaudio_performance_mode_t AAudioWrapper::performance_mode() const {
+  RTC_DCHECK(stream_);
+  return AAudioStream_getPerformanceMode(stream_);
+}
+
+aaudio_stream_state_t AAudioWrapper::stream_state() const {
+  RTC_DCHECK(stream_);
+  return AAudioStream_getState(stream_);
+}
+
+int64_t AAudioWrapper::frames_written() const {
+  RTC_DCHECK(stream_);
+  return AAudioStream_getFramesWritten(stream_);
+}
+
+int64_t AAudioWrapper::frames_read() const {
+  RTC_DCHECK(stream_);
+  return AAudioStream_getFramesRead(stream_);
+}
+
+void AAudioWrapper::SetStreamConfiguration(AAudioStreamBuilder* builder) {
+  RTC_LOG(INFO) << "SetStreamConfiguration";
+  RTC_DCHECK(builder);
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  // Request usage of default primary output/input device.
+  // TODO(henrika): verify that default device follows Java APIs.
+  // https://developer.android.com/reference/android/media/AudioDeviceInfo.html.
+  AAudioStreamBuilder_setDeviceId(builder, AAUDIO_UNSPECIFIED);
+  // Use preferred sample rate given by the audio parameters.
+  AAudioStreamBuilder_setSampleRate(builder, audio_parameters().sample_rate());
+  // Use preferred channel configuration given by the audio parameters.
+  AAudioStreamBuilder_setChannelCount(builder, audio_parameters().channels());
+  // Always use 16-bit PCM audio sample format.
+  AAudioStreamBuilder_setFormat(builder, AAUDIO_FORMAT_PCM_I16);
+  // TODO(henrika): investigate effect of using AAUDIO_SHARING_MODE_EXCLUSIVE.
+  // Ask for exclusive mode since this will give us the lowest possible latency.
+  // If exclusive mode isn't available, shared mode will be used instead.
+  AAudioStreamBuilder_setSharingMode(builder, AAUDIO_SHARING_MODE_SHARED);
+  // Use the direction that was given at construction.
+  AAudioStreamBuilder_setDirection(builder, direction_);
+  // TODO(henrika): investigate performance using different performance modes.
+  AAudioStreamBuilder_setPerformanceMode(builder,
+                                         AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
+  // Given that WebRTC applications require low latency, our audio stream uses
+  // an asynchronous callback function to transfer data to and from the
+  // application. AAudio executes the callback in a higher-priority thread that
+  // has better performance.
+  AAudioStreamBuilder_setDataCallback(builder, DataCallback, this);
+  // Request that AAudio calls this functions if any error occurs on a callback
+  // thread.
+  AAudioStreamBuilder_setErrorCallback(builder, ErrorCallback, this);
+}
+
+bool AAudioWrapper::OpenStream(AAudioStreamBuilder* builder) {
+  RTC_LOG(INFO) << "OpenStream";
+  RTC_DCHECK(builder);
+  AAudioStream* stream = nullptr;
+  RETURN_ON_ERROR(AAudioStreamBuilder_openStream(builder, &stream), false);
+  stream_ = stream;
+  LogStreamConfiguration();
+  return true;
+}
+
+void AAudioWrapper::CloseStream() {
+  RTC_LOG(INFO) << "CloseStream";
+  RTC_DCHECK(stream_);
+  LOG_ON_ERROR(AAudioStream_close(stream_));
+  stream_ = nullptr;
+}
+
+void AAudioWrapper::LogStreamConfiguration() {
+  RTC_DCHECK(stream_);
+  char ss_buf[1024];
+  rtc::SimpleStringBuilder ss(ss_buf);
+  ss << "Stream Configuration: ";
+  ss << "sample rate=" << sample_rate() << ", channels=" << channel_count();
+  ss << ", samples per frame=" << samples_per_frame();
+  ss << ", format=" << FormatToString(format());
+  ss << ", sharing mode=" << SharingModeToString(sharing_mode());
+  ss << ", performance mode=" << PerformanceModeToString(performance_mode());
+  ss << ", direction=" << DirectionToString(direction());
+  ss << ", device id=" << AAudioStream_getDeviceId(stream_);
+  ss << ", frames per callback=" << frames_per_callback();
+  RTC_LOG(INFO) << ss.str();
+}
+
+void AAudioWrapper::LogStreamState() {
+  RTC_LOG(INFO) << "AAudio stream state: "
+                << AAudio_convertStreamStateToText(stream_state());
+}
+
+bool AAudioWrapper::VerifyStreamConfiguration() {
+  RTC_LOG(INFO) << "VerifyStreamConfiguration";
+  RTC_DCHECK(stream_);
+  // TODO(henrika): should we verify device ID as well?
+  if (AAudioStream_getSampleRate(stream_) != audio_parameters().sample_rate()) {
+    RTC_LOG(LS_ERROR) << "Stream unable to use requested sample rate";
+    return false;
+  }
+  if (AAudioStream_getChannelCount(stream_) !=
+      static_cast<int32_t>(audio_parameters().channels())) {
+    RTC_LOG(LS_ERROR) << "Stream unable to use requested channel count";
+    return false;
+  }
+  if (AAudioStream_getFormat(stream_) != AAUDIO_FORMAT_PCM_I16) {
+    RTC_LOG(LS_ERROR) << "Stream unable to use requested format";
+    return false;
+  }
+  if (AAudioStream_getSharingMode(stream_) != AAUDIO_SHARING_MODE_SHARED) {
+    RTC_LOG(LS_ERROR) << "Stream unable to use requested sharing mode";
+    return false;
+  }
+  if (AAudioStream_getPerformanceMode(stream_) !=
+      AAUDIO_PERFORMANCE_MODE_LOW_LATENCY) {
+    RTC_LOG(LS_ERROR) << "Stream unable to use requested performance mode";
+    return false;
+  }
+  if (AAudioStream_getDirection(stream_) != direction()) {
+    RTC_LOG(LS_ERROR) << "Stream direction could not be set";
+    return false;
+  }
+  if (AAudioStream_getSamplesPerFrame(stream_) !=
+      static_cast<int32_t>(audio_parameters().channels())) {
+    RTC_LOG(LS_ERROR) << "Invalid number of samples per frame";
+    return false;
+  }
+  return true;
+}
+
+bool AAudioWrapper::OptimizeBuffers() {
+  RTC_LOG(INFO) << "OptimizeBuffers";
+  RTC_DCHECK(stream_);
+  // Maximum number of frames that can be filled without blocking.
+  RTC_LOG(INFO) << "max buffer capacity in frames: "
+                << buffer_capacity_in_frames();
+  // Query the number of frames that the application should read or write at
+  // one time for optimal performance.
+  int32_t frames_per_burst = AAudioStream_getFramesPerBurst(stream_);
+  RTC_LOG(INFO) << "frames per burst for optimal performance: "
+                << frames_per_burst;
+  frames_per_burst_ = frames_per_burst;
+  if (direction() == AAUDIO_DIRECTION_INPUT) {
+    // There is no point in calling setBufferSizeInFrames() for input streams
+    // since it has no effect on the performance (latency in this case).
+    return true;
+  }
+  // Set buffer size to same as burst size to guarantee lowest possible latency.
+  // This size might change for output streams if underruns are detected and
+  // automatic buffer adjustment is enabled.
+  AAudioStream_setBufferSizeInFrames(stream_, frames_per_burst);
+  int32_t buffer_size = AAudioStream_getBufferSizeInFrames(stream_);
+  if (buffer_size != frames_per_burst) {
+    RTC_LOG(LS_ERROR) << "Failed to use optimal buffer burst size";
+    return false;
+  }
+  // Maximum number of frames that can be filled without blocking.
+  RTC_LOG(INFO) << "buffer burst size in frames: " << buffer_size;
+  return true;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_device/android/aaudio_wrapper.h b/modules/audio_device/android/aaudio_wrapper.h
new file mode 100644
index 0000000..4915092
--- /dev/null
+++ b/modules/audio_device/android/aaudio_wrapper.h
@@ -0,0 +1,127 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_WRAPPER_H_
+#define MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_WRAPPER_H_
+
+#include <aaudio/AAudio.h>
+
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "rtc_base/thread_checker.h"
+
+namespace webrtc {
+
+class AudioManager;
+
+// AAudio callback interface for audio transport to/from the AAudio stream.
+// The interface also contains an error callback method for notifications of
+// e.g. device changes.
+class AAudioObserverInterface {
+ public:
+  // Audio data will be passed in our out of this function dependning on the
+  // direction of the audio stream. This callback function will be called on a
+  // real-time thread owned by AAudio.
+  virtual aaudio_data_callback_result_t OnDataCallback(void* audio_data,
+                                                       int32_t num_frames) = 0;
+  // AAudio will call this functions if any error occurs on a callback thread.
+  // In response, this function could signal or launch another thread to reopen
+  // a stream on another device. Do not reopen the stream in this callback.
+  virtual void OnErrorCallback(aaudio_result_t error) = 0;
+
+ protected:
+  virtual ~AAudioObserverInterface() {}
+};
+
+// Utility class which wraps the C-based AAudio API into a more handy C++ class
+// where the underlying resources (AAudioStreamBuilder and AAudioStream) are
+// encapsulated. User must set the direction (in or out) at construction since
+// it defines the stream type and the direction of the data flow in the
+// AAudioObserverInterface.
+//
+// AAudio is a new Android C API introduced in the Android O (26) release.
+// It is designed for high-performance audio applications that require low
+// latency. Applications communicate with AAudio by reading and writing data
+// to streams.
+//
+// Each stream is attached to a single audio device, where each audio device
+// has a unique ID. The ID can be used to bind an audio stream to a specific
+// audio device but this implementation lets AAudio choose the default primary
+// device instead (device selection takes place in Java). A stream can only
+// move data in one direction. When a stream is opened, Android checks to
+// ensure that the audio device and stream direction agree.
+class AAudioWrapper {
+ public:
+  AAudioWrapper(AudioManager* audio_manager,
+                aaudio_direction_t direction,
+                AAudioObserverInterface* observer);
+  ~AAudioWrapper();
+
+  bool Init();
+  bool Start();
+  bool Stop();
+
+  // For output streams: estimates latency between writing an audio frame to
+  // the output stream and the time that same frame is played out on the output
+  // audio device.
+  // For input streams: estimates latency between reading an audio frame from
+  // the input stream and the time that same frame was recorded on the input
+  // audio device.
+  double EstimateLatencyMillis() const;
+
+  // Increases the internal buffer size for output streams by one burst size to
+  // reduce the risk of underruns. Can be used while a stream is active.
+  bool IncreaseOutputBufferSize();
+
+  // Drains the recording stream of any existing data by reading from it until
+  // it's empty. Can be used to clear out old data before starting a new audio
+  // session.
+  void ClearInputStream(void* audio_data, int32_t num_frames);
+
+  AAudioObserverInterface* observer() const;
+  AudioParameters audio_parameters() const;
+  int32_t samples_per_frame() const;
+  int32_t buffer_size_in_frames() const;
+  int32_t buffer_capacity_in_frames() const;
+  int32_t device_id() const;
+  int32_t xrun_count() const;
+  int32_t format() const;
+  int32_t sample_rate() const;
+  int32_t channel_count() const;
+  int32_t frames_per_callback() const;
+  aaudio_sharing_mode_t sharing_mode() const;
+  aaudio_performance_mode_t performance_mode() const;
+  aaudio_stream_state_t stream_state() const;
+  int64_t frames_written() const;
+  int64_t frames_read() const;
+  aaudio_direction_t direction() const { return direction_; }
+  AAudioStream* stream() const { return stream_; }
+  int32_t frames_per_burst() const { return frames_per_burst_; }
+
+ private:
+  void SetStreamConfiguration(AAudioStreamBuilder* builder);
+  bool OpenStream(AAudioStreamBuilder* builder);
+  void CloseStream();
+  void LogStreamConfiguration();
+  void LogStreamState();
+  bool VerifyStreamConfiguration();
+  bool OptimizeBuffers();
+
+  rtc::ThreadChecker thread_checker_;
+  rtc::ThreadChecker aaudio_thread_checker_;
+  AudioParameters audio_parameters_;
+  const aaudio_direction_t direction_;
+  AAudioObserverInterface* observer_ = nullptr;
+  AAudioStream* stream_ = nullptr;
+  int32_t frames_per_burst_ = 0;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_DEVICE_ANDROID_AAUDIO_WRAPPER_H_
diff --git a/modules/audio_device/android/audio_device_unittest.cc b/modules/audio_device/android/audio_device_unittest.cc
index 4a9262b..c297aaa6 100644
--- a/modules/audio_device/android/audio_device_unittest.cc
+++ b/modules/audio_device/android/audio_device_unittest.cc
@@ -405,7 +405,7 @@
                                       const int32_t clockDrift,
                                       const uint32_t currentMicLevel,
                                       const bool keyPressed,
-                                      uint32_t& newMicLevel) {
+                                      uint32_t& newMicLevel) {  // NOLINT
     EXPECT_TRUE(rec_mode()) << "No test is expecting these callbacks.";
     rec_count_++;
     // Process the recorded audio stream if an AudioStreamInterface
@@ -424,7 +424,7 @@
                                const size_t nChannels,
                                const uint32_t samplesPerSec,
                                void* audioSamples,
-                               size_t& nSamplesOut,
+                               size_t& nSamplesOut,  // NOLINT
                                int64_t* elapsed_time_ms,
                                int64_t* ntp_time_ms) {
     EXPECT_TRUE(play_mode()) << "No test is expecting these callbacks.";
@@ -684,8 +684,11 @@
   const AudioDeviceModule::AudioLayer audio_layer = GetActiveAudioLayer();
   bool low_latency_output = audio_manager()->IsLowLatencyPlayoutSupported();
   bool low_latency_input = audio_manager()->IsLowLatencyRecordSupported();
+  bool aaudio = audio_manager()->IsAAudioSupported();
   AudioDeviceModule::AudioLayer expected_audio_layer;
-  if (low_latency_output && low_latency_input) {
+  if (aaudio) {
+    expected_audio_layer = AudioDeviceModule::kAndroidAAudioAudio;
+  } else if (low_latency_output && low_latency_input) {
     expected_audio_layer = AudioDeviceModule::kAndroidOpenSLESAudio;
   } else if (low_latency_output && !low_latency_input) {
     expected_audio_layer =
@@ -723,6 +726,40 @@
   EXPECT_EQ(expected_layer, active_layer);
 }
 
+// TODO(bugs.webrtc.org/8914)
+#if !defined(AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
+#define MAYBE_CorrectAudioLayerIsUsedForAAudioInBothDirections \
+  DISABLED_CorrectAudioLayerIsUsedForAAudioInBothDirections
+#else
+#define MAYBE_CorrectAudioLayerIsUsedForAAudioInBothDirections \
+  CorrectAudioLayerIsUsedForAAudioInBothDirections
+#endif
+TEST_F(AudioDeviceTest,
+       MAYBE_CorrectAudioLayerIsUsedForAAudioInBothDirections) {
+  AudioDeviceModule::AudioLayer expected_layer =
+      AudioDeviceModule::kAndroidAAudioAudio;
+  AudioDeviceModule::AudioLayer active_layer =
+      TestActiveAudioLayer(expected_layer);
+  EXPECT_EQ(expected_layer, active_layer);
+}
+
+// TODO(bugs.webrtc.org/8914)
+#if !defined(AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
+#define MAYBE_CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo \
+  DISABLED_CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo
+#else
+#define MAYBE_CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo \
+  CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo
+#endif
+TEST_F(AudioDeviceTest,
+       MAYBE_CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo) {
+  AudioDeviceModule::AudioLayer expected_layer =
+      AudioDeviceModule::kAndroidJavaInputAndAAudioOutputAudio;
+  AudioDeviceModule::AudioLayer active_layer =
+      TestActiveAudioLayer(expected_layer);
+  EXPECT_EQ(expected_layer, active_layer);
+}
+
 // The Android ADM supports two different delay reporting modes. One for the
 // low-latency output path (in combination with OpenSL ES), and one for the
 // high-latency output path (Java backends in both directions). These two tests
@@ -873,19 +910,15 @@
 
 // Start recording and verify that the native audio layer starts feeding real
 // audio samples via the RecordedDataIsAvailable callback.
+// TODO(henrika): investigate if it is possible to perform a sanity check of
+// delay estimates as well (argument #6).
 TEST_F(AudioDeviceTest, StartRecordingVerifyCallbacks) {
   MockAudioTransportAndroid mock(kRecording);
   mock.HandleCallbacks(test_is_done_.get(), nullptr, kNumCallbacks);
-  EXPECT_CALL(mock, RecordedDataIsAvailable(NotNull(),
-                                            record_frames_per_10ms_buffer(),
-                                            kBytesPerSample,
-                                            record_channels(),
-                                            record_sample_rate(),
-                                            total_delay_ms(),
-                                            0,
-                                            0,
-                                            false,
-                                            _))
+  EXPECT_CALL(
+      mock, RecordedDataIsAvailable(NotNull(), record_frames_per_10ms_buffer(),
+                                    kBytesPerSample, record_channels(),
+                                    record_sample_rate(), _, 0, 0, false, _))
       .Times(AtLeast(kNumCallbacks));
 
   EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
@@ -907,16 +940,10 @@
                                      NotNull(),
                                      _, _, _))
       .Times(AtLeast(kNumCallbacks));
-  EXPECT_CALL(mock, RecordedDataIsAvailable(NotNull(),
-                                            record_frames_per_10ms_buffer(),
-                                            kBytesPerSample,
-                                            record_channels(),
-                                            record_sample_rate(),
-                                            total_delay_ms(),
-                                            0,
-                                            0,
-                                            false,
-                                            _))
+  EXPECT_CALL(
+      mock, RecordedDataIsAvailable(NotNull(), record_frames_per_10ms_buffer(),
+                                    kBytesPerSample, record_channels(),
+                                    record_sample_rate(), _, 0, 0, false, _))
       .Times(AtLeast(kNumCallbacks));
   EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
   StartPlayout();
diff --git a/modules/audio_device/android/audio_manager.cc b/modules/audio_device/android/audio_manager.cc
index 16995d0..63020bb 100644
--- a/modules/audio_device/android/audio_manager.cc
+++ b/modules/audio_device/android/audio_manager.cc
@@ -71,7 +71,7 @@
   RTC_LOG(INFO) << "ctor";
   RTC_CHECK(j_environment_);
   JNINativeMethod native_methods[] = {
-      {"nativeCacheAudioParameters", "(IIIZZZZZZIIJ)V",
+      {"nativeCacheAudioParameters", "(IIIZZZZZZZIIJ)V",
        reinterpret_cast<void*>(&webrtc::AudioManager::CacheAudioParameters)}};
   j_native_registration_ = j_environment_->RegisterNatives(
       "org/webrtc/voiceengine/WebRtcAudioManager", native_methods,
@@ -213,6 +213,15 @@
   return pro_audio_;
 }
 
+// TODO(henrika): improve comments...
+bool AudioManager::IsAAudioSupported() const {
+#if defined(AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
+  return a_audio_;
+#else
+  return false;
+#endif
+}
+
 bool AudioManager::IsStereoPlayoutSupported() const {
   RTC_DCHECK(thread_checker_.CalledOnValidThread());
   return (playout_parameters_.channels() == 2);
@@ -238,6 +247,7 @@
                                                 jboolean low_latency_output,
                                                 jboolean low_latency_input,
                                                 jboolean pro_audio,
+                                                jboolean a_audio,
                                                 jint output_buffer_size,
                                                 jint input_buffer_size,
                                                 jlong native_audio_manager) {
@@ -246,7 +256,7 @@
   this_object->OnCacheAudioParameters(
       env, sample_rate, output_channels, input_channels, hardware_aec,
       hardware_agc, hardware_ns, low_latency_output, low_latency_input,
-      pro_audio, output_buffer_size, input_buffer_size);
+      pro_audio, a_audio, output_buffer_size, input_buffer_size);
 }
 
 void AudioManager::OnCacheAudioParameters(JNIEnv* env,
@@ -259,6 +269,7 @@
                                           jboolean low_latency_output,
                                           jboolean low_latency_input,
                                           jboolean pro_audio,
+                                          jboolean a_audio,
                                           jint output_buffer_size,
                                           jint input_buffer_size) {
   RTC_LOG(INFO)
@@ -269,6 +280,7 @@
       << ", low_latency_output: " << static_cast<bool>(low_latency_output)
       << ", low_latency_input: " << static_cast<bool>(low_latency_input)
       << ", pro_audio: " << static_cast<bool>(pro_audio)
+      << ", a_audio: " << static_cast<bool>(a_audio)
       << ", sample_rate: " << static_cast<int>(sample_rate)
       << ", output_channels: " << static_cast<int>(output_channels)
       << ", input_channels: " << static_cast<int>(input_channels)
@@ -281,6 +293,7 @@
   low_latency_playout_ = low_latency_output;
   low_latency_record_ = low_latency_input;
   pro_audio_ = pro_audio;
+  a_audio_ = a_audio;
   playout_parameters_.reset(sample_rate, static_cast<size_t>(output_channels),
                             static_cast<size_t>(output_buffer_size));
   record_parameters_.reset(sample_rate, static_cast<size_t>(input_channels),
diff --git a/modules/audio_device/android/audio_manager.h b/modules/audio_device/android/audio_manager.h
index dd6b51f..3de991c 100644
--- a/modules/audio_device/android/audio_manager.h
+++ b/modules/audio_device/android/audio_manager.h
@@ -11,11 +11,11 @@
 #ifndef MODULES_AUDIO_DEVICE_ANDROID_AUDIO_MANAGER_H_
 #define MODULES_AUDIO_DEVICE_ANDROID_AUDIO_MANAGER_H_
 
-#include <memory>
-
 #include <jni.h>
 #include <SLES/OpenSLES.h>
 
+#include <memory>
+
 #include "modules/audio_device/android/audio_common.h"
 #include "modules/audio_device/android/opensles_common.h"
 #include "modules/audio_device/audio_device_config.h"
@@ -115,6 +115,9 @@
   // OpenSL ES.
   bool IsProAudioSupported() const;
 
+  // Returns true if the device supports AAudio.
+  bool IsAAudioSupported() const;
+
   // Returns the estimated total delay of this device. Unit is in milliseconds.
   // The vaule is set once at construction and never changes after that.
   // Possible values are webrtc::kLowLatencyModeDelayEstimateInMilliseconds and
@@ -136,6 +139,7 @@
                                            jboolean low_latency_output,
                                            jboolean low_latency_input,
                                            jboolean pro_audio,
+                                           jboolean a_audio,
                                            jint output_buffer_size,
                                            jint input_buffer_size,
                                            jlong native_audio_manager);
@@ -149,6 +153,7 @@
                               jboolean low_latency_output,
                               jboolean low_latency_input,
                               jboolean pro_audio,
+                              jboolean a_audio,
                               jint output_buffer_size,
                               jint input_buffer_size);
 
@@ -200,6 +205,9 @@
   // True if device supports the low-latency OpenSL ES pro-audio path.
   bool pro_audio_;
 
+  // True if device supports the low-latency AAudio audio path.
+  bool a_audio_;
+
   // The delay estimate can take one of two fixed values depending on if the
   // device supports low-latency output or not.
   int delay_estimate_in_milliseconds_;
diff --git a/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java b/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java
index 08979aa..37e528b 100644
--- a/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java
+++ b/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java
@@ -37,6 +37,10 @@
 
   private static final String TAG = "WebRtcAudioManager";
 
+  // TODO(bugs.webrtc.org/8914): disabled by default until AAudio support has
+  // been completed. Goal is to always return false on Android O MR1 and higher.
+  private static final boolean blacklistDeviceForAAudioUsage = true;
+
   // Use mono as default for both audio directions.
   private static boolean useStereoOutput = false;
   private static boolean useStereoInput = false;
@@ -156,6 +160,7 @@
   private boolean lowLatencyOutput;
   private boolean lowLatencyInput;
   private boolean proAudio;
+  private boolean aAudio;
   private int sampleRate;
   private int outputChannels;
   private int inputChannels;
@@ -175,8 +180,9 @@
     volumeLogger = new VolumeLogger(audioManager);
     storeAudioParameters();
     nativeCacheAudioParameters(sampleRate, outputChannels, inputChannels, hardwareAEC, hardwareAGC,
-        hardwareNS, lowLatencyOutput, lowLatencyInput, proAudio, outputBufferSize, inputBufferSize,
-        nativeAudioManager);
+        hardwareNS, lowLatencyOutput, lowLatencyInput, proAudio, aAudio, outputBufferSize,
+        inputBufferSize, nativeAudioManager);
+    WebRtcAudioUtils.logAudioState(TAG);
   }
 
   private boolean init() {
@@ -225,6 +231,7 @@
     lowLatencyOutput = isLowLatencyOutputSupported();
     lowLatencyInput = isLowLatencyInputSupported();
     proAudio = isProAudioSupported();
+    aAudio = isAAudioSupported();
     outputBufferSize = lowLatencyOutput ? getLowLatencyOutputFramesPerBuffer()
                                         : getMinOutputFrameSize(sampleRate, outputChannels);
     inputBufferSize = lowLatencyInput ? getLowLatencyInputFramesPerBuffer()
@@ -263,6 +270,15 @@
                PackageManager.FEATURE_AUDIO_PRO);
   }
 
+  // AAudio is supported on Androio Oreo MR1 (API 27) and higher.
+  // TODO(bugs.webrtc.org/8914): currently disabled by default.
+  private boolean isAAudioSupported() {
+    if (blacklistDeviceForAAudioUsage) {
+      Logging.w(TAG, "AAudio support is currently disabled on all devices!");
+    }
+    return !blacklistDeviceForAAudioUsage && WebRtcAudioUtils.runningOnOreoMR1OrHigher();
+  }
+
   // Returns the native output sample rate for this device's output stream.
   private int getNativeOutputSampleRate() {
     // Override this if we're running on an old emulator image which only
@@ -361,6 +377,6 @@
 
   private native void nativeCacheAudioParameters(int sampleRate, int outputChannels,
       int inputChannels, boolean hardwareAEC, boolean hardwareAGC, boolean hardwareNS,
-      boolean lowLatencyOutput, boolean lowLatencyInput, boolean proAudio, int outputBufferSize,
-      int inputBufferSize, long nativeAudioManager);
+      boolean lowLatencyOutput, boolean lowLatencyInput, boolean proAudio, boolean aAudio,
+      int outputBufferSize, int inputBufferSize, long nativeAudioManager);
 }
diff --git a/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java b/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java
index f31e497..097f3be 100644
--- a/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java
+++ b/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java
@@ -191,6 +191,16 @@
     return Build.VERSION.SDK_INT >= Build.VERSION_CODES.N;
   }
 
+  public static boolean runningOnOreoOrHigher() {
+    // API Level 26.
+    return Build.VERSION.SDK_INT >= Build.VERSION_CODES.O;
+  }
+
+  public static boolean runningOnOreoMR1OrHigher() {
+    // API Level 27.
+    return Build.VERSION.SDK_INT >= Build.VERSION_CODES.O_MR1;
+  }
+
   // Helper method for building a string of thread information.
   public static String getThreadInfo() {
     return "@[name=" + Thread.currentThread().getName() + ", id=" + Thread.currentThread().getId()
diff --git a/modules/audio_device/android/opensles_player.cc b/modules/audio_device/android/opensles_player.cc
index 2732f99..d177c87 100644
--- a/modules/audio_device/android/opensles_player.cc
+++ b/modules/audio_device/android/opensles_player.cc
@@ -404,9 +404,12 @@
     RTC_DCHECK(thread_checker_opensles_.CalledOnValidThread());
     // Read audio data from the WebRTC source using the FineAudioBuffer object
     // to adjust for differences in buffer size between WebRTC (10ms) and native
-    // OpenSL ES.
-    fine_audio_buffer_->GetPlayoutData(rtc::ArrayView<SLint8>(
-        audio_ptr, audio_parameters_.GetBytesPerBuffer()));
+    // OpenSL ES. Use hardcoded delay estimate since OpenSL ES does not support
+    // delay estimation.
+    fine_audio_buffer_->GetPlayoutData(
+        rtc::ArrayView<SLint8>(audio_ptr,
+                               audio_parameters_.GetBytesPerBuffer()),
+        25);
   }
   // Enqueue the decoded audio buffer for playback.
   SLresult err = (*simple_buffer_queue_)
diff --git a/modules/audio_device/android/opensles_recorder.cc b/modules/audio_device/android/opensles_recorder.cc
index 919486b..446ef98 100644
--- a/modules/audio_device/android/opensles_recorder.cc
+++ b/modules/audio_device/android/opensles_recorder.cc
@@ -379,7 +379,7 @@
   const int8_t* data =
       static_cast<const int8_t*>(audio_buffers_[buffer_index_].get());
   fine_audio_buffer_->DeliverRecordedData(
-      rtc::ArrayView<const int8_t>(data, size_in_bytes), 25, 25);
+      rtc::ArrayView<const int8_t>(data, size_in_bytes), 25);
   // Enqueue the utilized audio buffer and use if for recording again.
   EnqueueAudioBuffer();
 }
diff --git a/modules/audio_device/audio_device_buffer.cc b/modules/audio_device/audio_device_buffer.cc
index 892a3af..db2efdc 100644
--- a/modules/audio_device/audio_device_buffer.cc
+++ b/modules/audio_device/audio_device_buffer.cc
@@ -234,9 +234,13 @@
   return 0;
 }
 
-void AudioDeviceBuffer::NativeAudioInterrupted() {
+void AudioDeviceBuffer::NativeAudioPlayoutInterrupted() {
   RTC_DCHECK(main_thread_checker_.CalledOnValidThread());
   playout_thread_checker_.DetachFromThread();
+}
+
+void AudioDeviceBuffer::NativeAudioRecordingInterrupted() {
+  RTC_DCHECK(main_thread_checker_.CalledOnValidThread());
   recording_thread_checker_.DetachFromThread();
 }
 
diff --git a/modules/audio_device/audio_device_buffer.h b/modules/audio_device/audio_device_buffer.h
index e3f7c19..ce64fec 100644
--- a/modules/audio_device/audio_device_buffer.h
+++ b/modules/audio_device/audio_device_buffer.h
@@ -104,11 +104,12 @@
 
   int32_t SetTypingStatus(bool typing_status);
 
-  // Called on iOS where the native audio layer can be interrupted by other
-  // audio applications. This method can then be used to reset internal
-  // states and detach thread checkers to allow for a new audio session where
-  // native callbacks may come from a new set of I/O threads.
-  void NativeAudioInterrupted();
+  // Called on iOS and Android where the native audio layer can be interrupted
+  // by other audio applications. These methods can then be used to reset
+  // internal states and detach thread checkers to allow for new audio sessions
+  // where native callbacks may come from a new set of I/O threads.
+  void NativeAudioPlayoutInterrupted();
+  void NativeAudioRecordingInterrupted();
 
  private:
   // Starts/stops periodic logging of audio stats.
diff --git a/modules/audio_device/audio_device_impl.cc b/modules/audio_device/audio_device_impl.cc
index ff651d4..af3724d 100644
--- a/modules/audio_device/audio_device_impl.cc
+++ b/modules/audio_device/audio_device_impl.cc
@@ -20,10 +20,14 @@
 
 #if defined(_WIN32)
 #if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
-#include "audio_device_core_win.h"
+#include "modules/audio_device/win/audio_device_core_win.h"
 #endif
 #elif defined(WEBRTC_ANDROID)
 #include <stdlib.h>
+#if defined(AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
+#include "modules/audio_device/android/aaudio_player.h"
+#include "modules/audio_device/android/aaudio_recorder.h"
+#endif
 #include "modules/audio_device/android/audio_device_template.h"
 #include "modules/audio_device/android/audio_manager.h"
 #include "modules/audio_device/android/audio_record_jni.h"
@@ -32,15 +36,15 @@
 #include "modules/audio_device/android/opensles_recorder.h"
 #elif defined(WEBRTC_LINUX)
 #if defined(LINUX_ALSA)
-#include "audio_device_alsa_linux.h"
+#include "modules/audio_device/linux/audio_device_alsa_linux.h"
 #endif
 #if defined(LINUX_PULSE)
-#include "audio_device_pulse_linux.h"
+#include "modules/audio_device/linux/audio_device_pulse_linux.h"
 #endif
 #elif defined(WEBRTC_IOS)
-#include "audio_device_ios.h"
+#include "modules/audio_device/ios/audio_device_ios.h"
 #elif defined(WEBRTC_MAC)
-#include "audio_device_mac.h"
+#include "modules/audio_device/mac/audio_device_mac.h"
 #endif
 #if defined(WEBRTC_DUMMY_FILE_DEVICES)
 #include "modules/audio_device/dummy/file_audio_device_factory.h"
@@ -52,14 +56,14 @@
   {                         \
     if (!initialized_) {    \
       return -1;            \
-    };                      \
+    }                       \
   }
 
 #define CHECKinitialized__BOOL() \
   {                              \
     if (!initialized_) {         \
       return false;              \
-    };                           \
+    }                            \
   }
 
 namespace webrtc {
@@ -170,8 +174,11 @@
   audio_manager_android_.reset(new AudioManager());
   // Select best possible combination of audio layers.
   if (audio_layer == kPlatformDefaultAudio) {
-    if (audio_manager_android_->IsLowLatencyPlayoutSupported() &&
-        audio_manager_android_->IsLowLatencyRecordSupported()) {
+    if (audio_manager_android_->IsAAudioSupported()) {
+      // Use of AAudio for both playout and recording has highest priority.
+      audio_layer = kAndroidAAudioAudio;
+    } else if (audio_manager_android_->IsLowLatencyPlayoutSupported() &&
+               audio_manager_android_->IsLowLatencyRecordSupported()) {
       // Use OpenSL ES for both playout and recording.
       audio_layer = kAndroidOpenSLESAudio;
     } else if (audio_manager_android_->IsLowLatencyPlayoutSupported() &&
@@ -201,8 +208,20 @@
     // time support for HW AEC using the AudioRecord Java API.
     audio_device_.reset(new AudioDeviceTemplate<AudioRecordJni, OpenSLESPlayer>(
         audio_layer, audio_manager));
+  } else if (audio_layer == kAndroidAAudioAudio) {
+#if defined(AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
+    // AAudio based audio for both input and output.
+    audio_device_.reset(new AudioDeviceTemplate<AAudioRecorder, AAudioPlayer>(
+        audio_layer, audio_manager));
+#endif
+  } else if (audio_layer == kAndroidJavaInputAndAAudioOutputAudio) {
+#if defined(AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
+    // Java audio for input and AAudio for output audio (i.e. mixed APIs).
+    audio_device_.reset(new AudioDeviceTemplate<AudioRecordJni, AAudioPlayer>(
+        audio_layer, audio_manager));
+#endif
   } else {
-    // Invalid audio layer.
+    RTC_LOG(LS_ERROR) << "The requested audio layer is not supported";
     audio_device_.reset(nullptr);
   }
 // END #if defined(WEBRTC_ANDROID)
diff --git a/modules/audio_device/fine_audio_buffer.cc b/modules/audio_device/fine_audio_buffer.cc
index 7683834..c1f1285 100644
--- a/modules/audio_device/fine_audio_buffer.cc
+++ b/modules/audio_device/fine_audio_buffer.cc
@@ -29,7 +29,7 @@
       bytes_per_10_ms_(samples_per_10_ms_ * sizeof(int16_t)),
       playout_buffer_(0, capacity),
       record_buffer_(0, capacity) {
-  RTC_LOG(INFO) << "samples_per_10_ms_:" << samples_per_10_ms_;
+  RTC_LOG(INFO) << "samples_per_10_ms_: " << samples_per_10_ms_;
 }
 
 FineAudioBuffer::~FineAudioBuffer() {}
@@ -42,7 +42,8 @@
   record_buffer_.Clear();
 }
 
-void FineAudioBuffer::GetPlayoutData(rtc::ArrayView<int8_t> audio_buffer) {
+void FineAudioBuffer::GetPlayoutData(rtc::ArrayView<int8_t> audio_buffer,
+                                     int playout_delay_ms) {
   // Ask WebRTC for new data in chunks of 10ms until we have enough to
   // fulfill the request. It is possible that the buffer already contains
   // enough samples from the last round.
@@ -67,11 +68,12 @@
   memmove(playout_buffer_.data(), playout_buffer_.data() + num_bytes,
           playout_buffer_.size() - num_bytes);
   playout_buffer_.SetSize(playout_buffer_.size() - num_bytes);
+  // Cache playout latency for usage in DeliverRecordedData();
+  playout_delay_ms_ = playout_delay_ms;
 }
 
 void FineAudioBuffer::DeliverRecordedData(
     rtc::ArrayView<const int8_t> audio_buffer,
-    int playout_delay_ms,
     int record_delay_ms) {
   // Always append new data and grow the buffer if needed.
   record_buffer_.AppendData(audio_buffer.data(), audio_buffer.size());
@@ -81,7 +83,7 @@
   while (record_buffer_.size() >= bytes_per_10_ms_) {
     device_buffer_->SetRecordedBuffer(record_buffer_.data(),
                                       samples_per_10_ms_);
-    device_buffer_->SetVQEData(playout_delay_ms, record_delay_ms);
+    device_buffer_->SetVQEData(playout_delay_ms_, record_delay_ms);
     device_buffer_->DeliverRecordedData();
     memmove(record_buffer_.data(), record_buffer_.data() + bytes_per_10_ms_,
             record_buffer_.size() - bytes_per_10_ms_);
diff --git a/modules/audio_device/fine_audio_buffer.h b/modules/audio_device/fine_audio_buffer.h
index 6bd047e..fb80e98 100644
--- a/modules/audio_device/fine_audio_buffer.h
+++ b/modules/audio_device/fine_audio_buffer.h
@@ -49,19 +49,22 @@
   // Copies audio samples into |audio_buffer| where number of requested
   // elements is specified by |audio_buffer.size()|. The producer will always
   // fill up the audio buffer and if no audio exists, the buffer will contain
-  // silence instead.
-  void GetPlayoutData(rtc::ArrayView<int8_t> audio_buffer);
+  // silence instead. The provided delay estimate in |playout_delay_ms| should
+  // contain an estime of the latency between when an audio frame is read from
+  // WebRTC and when it is played out on the speaker.
+  void GetPlayoutData(rtc::ArrayView<int8_t> audio_buffer,
+                      int playout_delay_ms);
 
   // Consumes the audio data in |audio_buffer| and sends it to the WebRTC layer
-  // in chunks of 10ms. The provided delay estimates in |playout_delay_ms| and
-  // |record_delay_ms| are given to the AEC in the audio processing module.
+  // in chunks of 10ms. The sum of the provided delay estimate in
+  // |record_delay_ms| and the latest |playout_delay_ms| in GetPlayoutData()
+  // are given to the AEC in the audio processing module.
   // They can be fixed values on most platforms and they are ignored if an
   // external (hardware/built-in) AEC is used.
   // Example: buffer size is 5ms => call #1 stores 5ms of data, call #2 stores
   // 5ms of data and sends a total of 10ms to WebRTC and clears the intenal
   // cache. Call #3 restarts the scheme above.
   void DeliverRecordedData(rtc::ArrayView<const int8_t> audio_buffer,
-                           int playout_delay_ms,
                            int record_delay_ms);
 
  private:
@@ -84,6 +87,8 @@
   // Storage for input samples that are about to be delivered to the WebRTC
   // ADB or remains from the last successful delivery of a 10ms audio buffer.
   rtc::BufferT<int8_t> record_buffer_;
+  // Contains latest delay estimate given to GetPlayoutData().
+  int playout_delay_ms_ = 0;
 };
 
 }  // namespace webrtc
diff --git a/modules/audio_device/fine_audio_buffer_unittest.cc b/modules/audio_device/fine_audio_buffer_unittest.cc
index f493e51..d6bf542 100644
--- a/modules/audio_device/fine_audio_buffer_unittest.cc
+++ b/modules/audio_device/fine_audio_buffer_unittest.cc
@@ -124,11 +124,11 @@
 
   for (int i = 0; i < kNumberOfFrames; ++i) {
     fine_buffer.GetPlayoutData(
-        rtc::ArrayView<int8_t>(out_buffer.get(), kFrameSizeBytes));
+        rtc::ArrayView<int8_t>(out_buffer.get(), kFrameSizeBytes), 0);
     EXPECT_TRUE(VerifyBuffer(out_buffer.get(), i, kFrameSizeBytes));
     UpdateInputBuffer(in_buffer.get(), i, kFrameSizeBytes);
     fine_buffer.DeliverRecordedData(
-        rtc::ArrayView<const int8_t>(in_buffer.get(), kFrameSizeBytes), 0, 0);
+        rtc::ArrayView<const int8_t>(in_buffer.get(), kFrameSizeBytes), 0);
   }
 }
 
diff --git a/modules/audio_device/include/audio_device.h b/modules/audio_device/include/audio_device.h
index 5e88b88..a894df4 100644
--- a/modules/audio_device/include/audio_device.h
+++ b/modules/audio_device/include/audio_device.h
@@ -34,7 +34,9 @@
     kAndroidJavaAudio = 5,
     kAndroidOpenSLESAudio = 6,
     kAndroidJavaInputAndOpenSLESOutputAudio = 7,
-    kDummyAudio = 8
+    kAndroidAAudioAudio = 8,
+    kAndroidJavaInputAndAAudioOutputAudio = 9,
+    kDummyAudio = 10
   };
 
   enum WindowsDeviceType {
diff --git a/modules/audio_device/include/audio_device_defines.h b/modules/audio_device/include/audio_device_defines.h
index 04119f6..bb9a0db 100644
--- a/modules/audio_device/include/audio_device_defines.h
+++ b/modules/audio_device/include/audio_device_defines.h
@@ -12,9 +12,11 @@
 #define MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_DEFINES_H_
 
 #include <stddef.h>
+#include <string>
 
 #include "rtc_base/checks.h"
 #include "rtc_base/deprecation.h"
+#include "rtc_base/strings/string_builder.h"
 #include "typedefs.h"  // NOLINT(build/include)
 
 namespace webrtc {
@@ -41,16 +43,16 @@
                                           const int32_t clockDrift,
                                           const uint32_t currentMicLevel,
                                           const bool keyPressed,
-                                          uint32_t& newMicLevel) = 0;
+                                          uint32_t& newMicLevel) = 0;  // NOLINT
 
   virtual int32_t NeedMorePlayData(const size_t nSamples,
                                    const size_t nBytesPerSample,
                                    const size_t nChannels,
                                    const uint32_t samplesPerSec,
                                    void* audioSamples,
-                                   size_t& nSamplesOut,
+                                   size_t& nSamplesOut,  // NOLINT
                                    int64_t* elapsed_time_ms,
-                                   int64_t* ntp_time_ms) = 0;
+                                   int64_t* ntp_time_ms) = 0;  // NOLINT
 
   // Method to push the captured audio data to the specific VoE channel.
   // The data will not undergo audio processing.
@@ -143,6 +145,19 @@
       return 0.0;
     return static_cast<double>(frames_per_buffer_) / (sample_rate_);
   }
+  std::string ToString() const {
+    char ss_buf[1024];
+    rtc::SimpleStringBuilder ss(ss_buf);
+    ss << "AudioParameters: ";
+    ss << "sample_rate=" << sample_rate() << ", channels=" << channels();
+    ss << ", frames_per_buffer=" << frames_per_buffer();
+    ss << ", frames_per_10ms_buffer=" << frames_per_10ms_buffer();
+    ss << ", bytes_per_frame=" << GetBytesPerFrame();
+    ss << ", bytes_per_buffer=" << GetBytesPerBuffer();
+    ss << ", bytes_per_10ms_buffer=" << GetBytesPer10msBuffer();
+    ss << ", size_in_ms=" << GetBufferSizeInMilliseconds();
+    return ss.str();
+  }
 
  private:
   int sample_rate_;
diff --git a/modules/audio_device/ios/audio_device_ios.mm b/modules/audio_device/ios/audio_device_ios.mm
index 5dd816a..aa55126 100644
--- a/modules/audio_device/ios/audio_device_ios.mm
+++ b/modules/audio_device/ios/audio_device_ios.mm
@@ -394,8 +394,7 @@
   // Get a pointer to the recorded audio and send it to the WebRTC ADB.
   // Use the FineAudioBuffer instance to convert between native buffer size
   // and the 10ms buffer size used by WebRTC.
-  fine_audio_buffer_->DeliverRecordedData(
-      record_audio_buffer_, kFixedPlayoutDelayEstimate, kFixedRecordDelayEstimate);
+  fine_audio_buffer_->DeliverRecordedData(record_audio_buffer_, kFixedRecordDelayEstimate);
   return noErr;
 }
 
@@ -455,7 +454,8 @@
   // Read decoded 16-bit PCM samples from WebRTC (using a size that matches
   // the native I/O audio unit) and copy the result to the audio buffer in the
   // |io_data| destination.
-  fine_audio_buffer_->GetPlayoutData(rtc::ArrayView<int8_t>(destination, size_in_bytes));
+  fine_audio_buffer_->GetPlayoutData(rtc::ArrayView<int8_t>(destination, size_in_bytes),
+                                     kFixedPlayoutDelayEstimate);
   return noErr;
 }
 
@@ -500,7 +500,8 @@
       io_thread_checker_.DetachFromThread();
       // The audio device buffer must also be informed about the interrupted
       // state so it can detach its thread checkers as well.
-      audio_device_buffer_->NativeAudioInterrupted();
+      audio_device_buffer_->NativeAudioPlayoutInterrupted();
+      audio_device_buffer_->NativeAudioRecordingInterrupted();
     }
   }
   is_interrupted_ = true;
diff --git a/webrtc.gni b/webrtc.gni
index f48fce7..6878ae2 100644
--- a/webrtc.gni
+++ b/webrtc.gni
@@ -81,6 +81,11 @@
   # Enable to use the Mozilla internal settings.
   build_with_mozilla = false
 
+  # Enable use of Android AAudio which requires Android SDK 26 or above and
+  # NDK r16 or above.
+  rtc_enable_android_aaudio = false
+
+  # TODO(henrika): can this flag be removed?
   rtc_enable_android_opensl = false
 
   # Link-Time Optimizations.