Revert "Remove old audio device implementation."

This reverts commit 0cfa4cba5cae5e942f5d8e0e4e93b94982d0bfc3.

Reason for revert: audio_device_ios_objc target is removed, but still referenced by iPhone Meetins:Meeting_build_test, which now fails to build

Original change's description:
> Remove old audio device implementation.
> 
> The iOS ADM implementation now lives in sdk/objc/native/api/audio_device_module.{h,mm}.
> 
> Bug: webrtc:10514
> Change-Id: Ib0b162027b5680ebc40d621a57f1155f08e7a057
> Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/131326
> Commit-Queue: Kári Helgason <kthelgason@webrtc.org>
> Reviewed-by: Henrik Andreassson <henrika@webrtc.org>
> Reviewed-by: Niels Moller <nisse@webrtc.org>
> Cr-Commit-Position: refs/heads/master@{#27488}

TBR=henrika@webrtc.org,nisse@webrtc.org,kthelgason@webrtc.org

Change-Id: I5be10b3d17403a79ea30afc255cde01171bc9f5b
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: webrtc:10514
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/131960
Reviewed-by: Jeroen de Borst <jeroendb@webrtc.org>
Commit-Queue: Jeroen de Borst <jeroendb@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#27492}
diff --git a/modules/audio_device/BUILD.gn b/modules/audio_device/BUILD.gn
index e915320..02e54c0 100644
--- a/modules/audio_device/BUILD.gn
+++ b/modules/audio_device/BUILD.gn
@@ -45,6 +45,47 @@
   ]
 }
 
+if (rtc_include_internal_audio_device && is_ios) {
+  rtc_source_set("audio_device_ios_objc") {
+    visibility = [
+      ":audio_device_impl",
+      ":audio_device_ios_objc_unittests",
+    ]
+    sources = [
+      "ios/audio_device_ios.h",
+      "ios/audio_device_ios.mm",
+      "ios/audio_device_not_implemented_ios.mm",
+      "ios/audio_session_observer.h",
+      "ios/objc/RTCAudioSession.h",
+      "ios/objc/RTCAudioSessionConfiguration.h",
+      "ios/objc/RTCAudioSessionDelegateAdapter.h",
+      "ios/objc/RTCAudioSessionDelegateAdapter.mm",
+      "ios/voice_processing_audio_unit.h",
+      "ios/voice_processing_audio_unit.mm",
+    ]
+    libs = [
+      "AudioToolbox.framework",
+      "AVFoundation.framework",
+      "Foundation.framework",
+      "UIKit.framework",
+    ]
+    deps = [
+      ":audio_device_api",
+      ":audio_device_buffer",
+      ":audio_device_generic",
+      "../../api:array_view",
+      "../../rtc_base",
+      "../../rtc_base:checks",
+      "../../rtc_base:gtest_prod",
+      "../../rtc_base/system:fallthrough",
+      "../../sdk:audio_device",
+      "../../sdk:audio_objc",
+      "../../sdk:base_objc",
+      "../../system_wrappers:metrics",
+    ]
+  }
+}
+
 rtc_source_set("audio_device_api") {
   visibility = [ "*" ]
   sources = [
@@ -183,7 +224,7 @@
     "//third_party/abseil-cpp/absl/memory",
   ]
   if (rtc_include_internal_audio_device && is_ios) {
-    deps += [ "../../sdk:audio_device" ]
+    deps += [ ":audio_device_ios_objc" ]
   }
 
   sources = [
@@ -356,6 +397,32 @@
 }
 
 if (rtc_include_tests) {
+  # TODO(kthelgason): Reenable these tests on simulator.
+  # See bugs.webrtc.org/7812
+  if (rtc_include_internal_audio_device && is_ios && !use_ios_simulator) {
+    rtc_source_set("audio_device_ios_objc_unittests") {
+      testonly = true
+      visibility = [ ":*" ]
+      sources = [
+        "ios/audio_device_unittest_ios.mm",
+      ]
+      deps = [
+        ":audio_device",
+        ":audio_device_buffer",
+        ":audio_device_impl",
+        ":audio_device_ios_objc",
+        ":mock_audio_device",
+        "../../api:scoped_refptr",
+        "../../rtc_base:rtc_base_approved",
+        "../../sdk:audio_objc",
+        "../../system_wrappers",
+        "../../test:fileutils",
+        "../../test:test_support",
+        "//third_party/ocmock",
+      ]
+    }
+  }
+
   rtc_source_set("audio_device_unittests") {
     testonly = true
 
diff --git a/modules/audio_device/DEPS b/modules/audio_device/DEPS
index fc5eed7..f74767a 100644
--- a/modules/audio_device/DEPS
+++ b/modules/audio_device/DEPS
@@ -7,7 +7,28 @@
   "ensure_initialized\.cc": [
     "+base/android",
   ],
-  "audio_device_impl\.cc": [
+  "audio_device_ios\.h": [
+    "+sdk/objc",
+  ],
+  "audio_device_ios\.mm": [
+    "+sdk/objc",
+  ],
+  "audio_device_unittest_ios\.mm": [
+    "+sdk/objc",
+  ],
+  "RTCAudioSession\.h": [
+    "+sdk/objc",
+  ],
+  "RTCAudioSessionConfiguration\.h": [
+    "+sdk/objc",
+  ],
+  "RTCAudioSessionDelegateAdapter\.h": [
+    "+sdk/objc",
+  ],
+  "RTCAudioSessionDelegateAdapter\.mm": [
+    "+sdk/objc",
+  ],
+  "voice_processing_audio_unit\.mm": [
     "+sdk/objc",
   ],
 }
diff --git a/modules/audio_device/audio_device_impl.cc b/modules/audio_device/audio_device_impl.cc
index f7efced..7b08a5a 100644
--- a/modules/audio_device/audio_device_impl.cc
+++ b/modules/audio_device/audio_device_impl.cc
@@ -45,7 +45,7 @@
 #include "modules/audio_device/linux/audio_device_pulse_linux.h"
 #endif
 #elif defined(WEBRTC_IOS)
-#include "sdk/objc/native/src/audio/audio_device_ios.h"
+#include "modules/audio_device/ios/audio_device_ios.h"
 #elif defined(WEBRTC_MAC)
 #include "modules/audio_device/mac/audio_device_mac.h"
 #endif
@@ -287,7 +287,7 @@
 // iOS ADM implementation.
 #if defined(WEBRTC_IOS)
   if (audio_layer == kPlatformDefaultAudio) {
-    audio_device_.reset(new ios_adm::AudioDeviceIOS());
+    audio_device_.reset(new AudioDeviceIOS());
     RTC_LOG(INFO) << "iPhone Audio APIs will be utilized.";
   }
 // END #if defined(WEBRTC_IOS)
diff --git a/modules/audio_device/ios/audio_device_ios.h b/modules/audio_device/ios/audio_device_ios.h
new file mode 100644
index 0000000..e90bb44
--- /dev/null
+++ b/modules/audio_device/ios/audio_device_ios.h
@@ -0,0 +1,296 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_IOS_AUDIO_DEVICE_IOS_H_
+#define MODULES_AUDIO_DEVICE_IOS_AUDIO_DEVICE_IOS_H_
+
+#include <memory>
+
+#include "modules/audio_device/audio_device_generic.h"
+#include "modules/audio_device/ios/audio_session_observer.h"
+#include "modules/audio_device/ios/voice_processing_audio_unit.h"
+#include "rtc_base/buffer.h"
+#include "rtc_base/gtest_prod_util.h"
+#include "rtc_base/thread.h"
+#include "rtc_base/thread_annotations.h"
+#include "rtc_base/thread_checker.h"
+#include "sdk/objc/base/RTCMacros.h"
+
+RTC_FWD_DECL_OBJC_CLASS(RTCAudioSessionDelegateAdapter);
+
+namespace webrtc {
+
+class FineAudioBuffer;
+
+// Implements full duplex 16-bit mono PCM audio support for iOS using a
+// Voice-Processing (VP) I/O audio unit in Core Audio. The VP I/O audio unit
+// supports audio echo cancellation. It also adds automatic gain control,
+// adjustment of voice-processing quality and muting.
+//
+// An instance must be created and destroyed on one and the same thread.
+// All supported public methods must also be called on the same thread.
+// A thread checker will RTC_DCHECK if any supported method is called on an
+// invalid thread.
+//
+// Recorded audio will be delivered on a real-time internal I/O thread in the
+// audio unit. The audio unit will also ask for audio data to play out on this
+// same thread.
+class AudioDeviceIOS : public AudioDeviceGeneric,
+                       public AudioSessionObserver,
+                       public VoiceProcessingAudioUnitObserver,
+                       public rtc::MessageHandler {
+ public:
+  AudioDeviceIOS();
+  ~AudioDeviceIOS();
+
+  void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override;
+
+  InitStatus Init() override;
+  int32_t Terminate() override;
+  bool Initialized() const override;
+
+  int32_t InitPlayout() override;
+  bool PlayoutIsInitialized() const override;
+
+  int32_t InitRecording() override;
+  bool RecordingIsInitialized() const override;
+
+  int32_t StartPlayout() override;
+  int32_t StopPlayout() override;
+  bool Playing() const override { return playing_; }
+
+  int32_t StartRecording() override;
+  int32_t StopRecording() override;
+  bool Recording() const override { return recording_; }
+
+  // These methods returns hard-coded delay values and not dynamic delay
+  // estimates. The reason is that iOS supports a built-in AEC and the WebRTC
+  // AEC will always be disabled in the Libjingle layer to avoid running two
+  // AEC implementations at the same time. And, it saves resources to avoid
+  // updating these delay values continuously.
+  // TODO(henrika): it would be possible to mark these two methods as not
+  // implemented since they are only called for A/V-sync purposes today and
+  // A/V-sync is not supported on iOS. However, we avoid adding error messages
+  // the log by using these dummy implementations instead.
+  int32_t PlayoutDelay(uint16_t& delayMS) const override;
+
+  // Native audio parameters stored during construction.
+  // These methods are unique for the iOS implementation.
+  int GetPlayoutAudioParameters(AudioParameters* params) const override;
+  int GetRecordAudioParameters(AudioParameters* params) const override;
+
+  // These methods are currently not fully implemented on iOS:
+
+  // See audio_device_not_implemented.cc for trivial implementations.
+  int32_t ActiveAudioLayer(
+      AudioDeviceModule::AudioLayer& audioLayer) const override;
+  int32_t PlayoutIsAvailable(bool& available) override;
+  int32_t RecordingIsAvailable(bool& available) override;
+  int16_t PlayoutDevices() override;
+  int16_t RecordingDevices() override;
+  int32_t PlayoutDeviceName(uint16_t index,
+                            char name[kAdmMaxDeviceNameSize],
+                            char guid[kAdmMaxGuidSize]) override;
+  int32_t RecordingDeviceName(uint16_t index,
+                              char name[kAdmMaxDeviceNameSize],
+                              char guid[kAdmMaxGuidSize]) override;
+  int32_t SetPlayoutDevice(uint16_t index) override;
+  int32_t SetPlayoutDevice(
+      AudioDeviceModule::WindowsDeviceType device) override;
+  int32_t SetRecordingDevice(uint16_t index) override;
+  int32_t SetRecordingDevice(
+      AudioDeviceModule::WindowsDeviceType device) override;
+  int32_t InitSpeaker() override;
+  bool SpeakerIsInitialized() const override;
+  int32_t InitMicrophone() override;
+  bool MicrophoneIsInitialized() const override;
+  int32_t SpeakerVolumeIsAvailable(bool& available) override;
+  int32_t SetSpeakerVolume(uint32_t volume) override;
+  int32_t SpeakerVolume(uint32_t& volume) const override;
+  int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override;
+  int32_t MinSpeakerVolume(uint32_t& minVolume) const override;
+  int32_t MicrophoneVolumeIsAvailable(bool& available) override;
+  int32_t SetMicrophoneVolume(uint32_t volume) override;
+  int32_t MicrophoneVolume(uint32_t& volume) const override;
+  int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const override;
+  int32_t MinMicrophoneVolume(uint32_t& minVolume) const override;
+  int32_t MicrophoneMuteIsAvailable(bool& available) override;
+  int32_t SetMicrophoneMute(bool enable) override;
+  int32_t MicrophoneMute(bool& enabled) const override;
+  int32_t SpeakerMuteIsAvailable(bool& available) override;
+  int32_t SetSpeakerMute(bool enable) override;
+  int32_t SpeakerMute(bool& enabled) const override;
+  int32_t StereoPlayoutIsAvailable(bool& available) override;
+  int32_t SetStereoPlayout(bool enable) override;
+  int32_t StereoPlayout(bool& enabled) const override;
+  int32_t StereoRecordingIsAvailable(bool& available) override;
+  int32_t SetStereoRecording(bool enable) override;
+  int32_t StereoRecording(bool& enabled) const override;
+
+  // AudioSessionObserver methods. May be called from any thread.
+  void OnInterruptionBegin() override;
+  void OnInterruptionEnd() override;
+  void OnValidRouteChange() override;
+  void OnCanPlayOrRecordChange(bool can_play_or_record) override;
+  void OnChangedOutputVolume() override;
+
+  // VoiceProcessingAudioUnitObserver methods.
+  OSStatus OnDeliverRecordedData(AudioUnitRenderActionFlags* flags,
+                                 const AudioTimeStamp* time_stamp,
+                                 UInt32 bus_number,
+                                 UInt32 num_frames,
+                                 AudioBufferList* io_data) override;
+  OSStatus OnGetPlayoutData(AudioUnitRenderActionFlags* flags,
+                            const AudioTimeStamp* time_stamp,
+                            UInt32 bus_number,
+                            UInt32 num_frames,
+                            AudioBufferList* io_data) override;
+
+  // Handles messages from posts.
+  void OnMessage(rtc::Message* msg) override;
+
+ private:
+  // Called by the relevant AudioSessionObserver methods on |thread_|.
+  void HandleInterruptionBegin();
+  void HandleInterruptionEnd();
+  void HandleValidRouteChange();
+  void HandleCanPlayOrRecordChange(bool can_play_or_record);
+  void HandleSampleRateChange(float sample_rate);
+  void HandlePlayoutGlitchDetected();
+  void HandleOutputVolumeChange();
+
+  // Uses current |playout_parameters_| and |record_parameters_| to inform the
+  // audio device buffer (ADB) about our internal audio parameters.
+  void UpdateAudioDeviceBuffer();
+
+  // Since the preferred audio parameters are only hints to the OS, the actual
+  // values may be different once the AVAudioSession has been activated.
+  // This method asks for the current hardware parameters and takes actions
+  // if they should differ from what we have asked for initially. It also
+  // defines |playout_parameters_| and |record_parameters_|.
+  void SetupAudioBuffersForActiveAudioSession();
+
+  // Creates the audio unit.
+  bool CreateAudioUnit();
+
+  // Updates the audio unit state based on current state.
+  void UpdateAudioUnit(bool can_play_or_record);
+
+  // Configures the audio session for WebRTC.
+  bool ConfigureAudioSession();
+  // Unconfigures the audio session.
+  void UnconfigureAudioSession();
+
+  // Activates our audio session, creates and initializes the voice-processing
+  // audio unit and verifies that we got the preferred native audio parameters.
+  bool InitPlayOrRecord();
+
+  // Closes and deletes the voice-processing I/O unit.
+  void ShutdownPlayOrRecord();
+
+  // Resets thread-checkers before a call is restarted.
+  void PrepareForNewStart();
+
+  // Ensures that methods are called from the same thread as this object is
+  // created on.
+  rtc::ThreadChecker thread_checker_;
+
+  // Native I/O audio thread checker.
+  rtc::ThreadChecker io_thread_checker_;
+
+  // Thread that this object is created on.
+  rtc::Thread* thread_;
+
+  // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
+  // AudioDeviceModuleImpl class and called by AudioDeviceModule::Create().
+  // The AudioDeviceBuffer is a member of the AudioDeviceModuleImpl instance
+  // and therefore outlives this object.
+  AudioDeviceBuffer* audio_device_buffer_;
+
+  // Contains audio parameters (sample rate, #channels, buffer size etc.) for
+  // the playout and recording sides. These structure is set in two steps:
+  // first, native sample rate and #channels are defined in Init(). Next, the
+  // audio session is activated and we verify that the preferred parameters
+  // were granted by the OS. At this stage it is also possible to add a third
+  // component to the parameters; the native I/O buffer duration.
+  // A RTC_CHECK will be hit if we for some reason fail to open an audio session
+  // using the specified parameters.
+  AudioParameters playout_parameters_;
+  AudioParameters record_parameters_;
+
+  // The AudioUnit used to play and record audio.
+  std::unique_ptr<VoiceProcessingAudioUnit> audio_unit_;
+
+  // FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data
+  // in chunks of 10ms. It then allows for this data to be pulled in
+  // a finer or coarser granularity. I.e. interacting with this class instead
+  // of directly with the AudioDeviceBuffer one can ask for any number of
+  // audio data samples. Is also supports a similar scheme for the recording
+  // side.
+  // Example: native buffer size can be 128 audio frames at 16kHz sample rate.
+  // WebRTC will provide 480 audio frames per 10ms but iOS asks for 128
+  // in each callback (one every 8ms). This class can then ask for 128 and the
+  // FineAudioBuffer will ask WebRTC for new data only when needed and also
+  // cache non-utilized audio between callbacks. On the recording side, iOS
+  // can provide audio data frames of size 128 and these are accumulated until
+  // enough data to supply one 10ms call exists. This 10ms chunk is then sent
+  // to WebRTC and the remaining part is stored.
+  std::unique_ptr<FineAudioBuffer> fine_audio_buffer_;
+
+  // Temporary storage for recorded data. AudioUnitRender() renders into this
+  // array as soon as a frame of the desired buffer size has been recorded.
+  // On real iOS devices, the size will be fixed and set once. For iOS
+  // simulators, the size can vary from callback to callback and the size
+  // will be changed dynamically to account for this behavior.
+  rtc::BufferT<int16_t> record_audio_buffer_;
+
+  // Set to 1 when recording is active and 0 otherwise.
+  volatile int recording_;
+
+  // Set to 1 when playout is active and 0 otherwise.
+  volatile int playing_;
+
+  // Set to true after successful call to Init(), false otherwise.
+  bool initialized_ RTC_GUARDED_BY(thread_checker_);
+
+  // Set to true after successful call to InitRecording() or InitPlayout(),
+  // false otherwise.
+  bool audio_is_initialized_;
+
+  // Set to true if audio session is interrupted, false otherwise.
+  bool is_interrupted_;
+
+  // Audio interruption observer instance.
+  RTCAudioSessionDelegateAdapter* audio_session_observer_
+      RTC_GUARDED_BY(thread_checker_);
+
+  // Set to true if we've activated the audio session.
+  bool has_configured_session_ RTC_GUARDED_BY(thread_checker_);
+
+  // Counts number of detected audio glitches on the playout side.
+  int64_t num_detected_playout_glitches_ RTC_GUARDED_BY(thread_checker_);
+  int64_t last_playout_time_ RTC_GUARDED_BY(io_thread_checker_);
+
+  // Counts number of playout callbacks per call.
+  // The value isupdated on the native I/O thread and later read on the
+  // creating thread (see thread_checker_) but at this stage no audio is
+  // active. Hence, it is a "thread safe" design and no lock is needed.
+  int64_t num_playout_callbacks_;
+
+  // Contains the time for when the last output volume change was detected.
+  int64_t last_output_volume_change_time_ RTC_GUARDED_BY(thread_checker_);
+
+  // Exposes private members for testing purposes only.
+  FRIEND_TEST_ALL_PREFIXES(AudioDeviceTest, testInterruptedAudioSession);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_DEVICE_IOS_AUDIO_DEVICE_IOS_H_
diff --git a/modules/audio_device/ios/audio_device_ios.mm b/modules/audio_device/ios/audio_device_ios.mm
new file mode 100644
index 0000000..8fa34d1
--- /dev/null
+++ b/modules/audio_device/ios/audio_device_ios.mm
@@ -0,0 +1,908 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <AVFoundation/AVFoundation.h>
+#import <Foundation/Foundation.h>
+
+#include "modules/audio_device/ios/audio_device_ios.h"
+
+#include <cmath>
+
+#include "api/array_view.h"
+#include "modules/audio_device/fine_audio_buffer.h"
+#include "rtc_base/atomic_ops.h"
+#include "rtc_base/bind.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/critical_section.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/thread.h"
+#include "rtc_base/thread_annotations.h"
+#include "rtc_base/time_utils.h"
+#include "sdk/objc/native/src/audio/helpers.h"
+#include "system_wrappers/include/metrics.h"
+
+#import "modules/audio_device/ios/objc/RTCAudioSessionDelegateAdapter.h"
+#import "sdk/objc/base/RTCLogging.h"
+#import "sdk/objc/components/audio/RTCAudioSession+Private.h"
+#import "sdk/objc/components/audio/RTCAudioSession.h"
+#import "sdk/objc/components/audio/RTCAudioSessionConfiguration.h"
+
+namespace webrtc {
+
+#define LOGI() RTC_LOG(LS_INFO) << "AudioDeviceIOS::"
+
+#define LOG_AND_RETURN_IF_ERROR(error, message)    \
+  do {                                             \
+    OSStatus err = error;                          \
+    if (err) {                                     \
+      RTC_LOG(LS_ERROR) << message << ": " << err; \
+      return false;                                \
+    }                                              \
+  } while (0)
+
+#define LOG_IF_ERROR(error, message)               \
+  do {                                             \
+    OSStatus err = error;                          \
+    if (err) {                                     \
+      RTC_LOG(LS_ERROR) << message << ": " << err; \
+    }                                              \
+  } while (0)
+
+// Hardcoded delay estimates based on real measurements.
+// TODO(henrika): these value is not used in combination with built-in AEC.
+// Can most likely be removed.
+const UInt16 kFixedPlayoutDelayEstimate = 30;
+const UInt16 kFixedRecordDelayEstimate = 30;
+
+enum AudioDeviceMessageType : uint32_t {
+  kMessageTypeInterruptionBegin,
+  kMessageTypeInterruptionEnd,
+  kMessageTypeValidRouteChange,
+  kMessageTypeCanPlayOrRecordChange,
+  kMessageTypePlayoutGlitchDetected,
+  kMessageOutputVolumeChange,
+};
+
+using ios::CheckAndLogError;
+
+#if !defined(NDEBUG)
+// Returns true when the code runs on a device simulator.
+static bool DeviceIsSimulator() {
+  return ios::GetDeviceName() == "x86_64";
+}
+
+// Helper method that logs essential device information strings.
+static void LogDeviceInfo() {
+  RTC_LOG(LS_INFO) << "LogDeviceInfo";
+  @autoreleasepool {
+    RTC_LOG(LS_INFO) << " system name: " << ios::GetSystemName();
+    RTC_LOG(LS_INFO) << " system version: " << ios::GetSystemVersionAsString();
+    RTC_LOG(LS_INFO) << " device type: " << ios::GetDeviceType();
+    RTC_LOG(LS_INFO) << " device name: " << ios::GetDeviceName();
+    RTC_LOG(LS_INFO) << " process name: " << ios::GetProcessName();
+    RTC_LOG(LS_INFO) << " process ID: " << ios::GetProcessID();
+    RTC_LOG(LS_INFO) << " OS version: " << ios::GetOSVersionString();
+    RTC_LOG(LS_INFO) << " processing cores: " << ios::GetProcessorCount();
+    RTC_LOG(LS_INFO) << " low power mode: " << ios::GetLowPowerModeEnabled();
+#if TARGET_IPHONE_SIMULATOR
+    RTC_LOG(LS_INFO) << " TARGET_IPHONE_SIMULATOR is defined";
+#endif
+    RTC_LOG(LS_INFO) << " DeviceIsSimulator: " << DeviceIsSimulator();
+  }
+}
+#endif  // !defined(NDEBUG)
+
+AudioDeviceIOS::AudioDeviceIOS()
+    : audio_device_buffer_(nullptr),
+      audio_unit_(nullptr),
+      recording_(0),
+      playing_(0),
+      initialized_(false),
+      audio_is_initialized_(false),
+      is_interrupted_(false),
+      has_configured_session_(false),
+      num_detected_playout_glitches_(0),
+      last_playout_time_(0),
+      num_playout_callbacks_(0),
+      last_output_volume_change_time_(0) {
+  LOGI() << "ctor" << ios::GetCurrentThreadDescription();
+  io_thread_checker_.DetachFromThread();
+  thread_ = rtc::Thread::Current();
+  audio_session_observer_ = [[RTCAudioSessionDelegateAdapter alloc] initWithObserver:this];
+}
+
+AudioDeviceIOS::~AudioDeviceIOS() {
+  LOGI() << "~dtor" << ios::GetCurrentThreadDescription();
+  audio_session_observer_ = nil;
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  Terminate();
+}
+
+void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+  LOGI() << "AttachAudioBuffer";
+  RTC_DCHECK(audioBuffer);
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  audio_device_buffer_ = audioBuffer;
+}
+
+AudioDeviceGeneric::InitStatus AudioDeviceIOS::Init() {
+  LOGI() << "Init";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  if (initialized_) {
+    return InitStatus::OK;
+  }
+#if !defined(NDEBUG)
+  LogDeviceInfo();
+#endif
+  // Store the preferred sample rate and preferred number of channels already
+  // here. They have not been set and confirmed yet since configureForWebRTC
+  // is not called until audio is about to start. However, it makes sense to
+  // store the parameters now and then verify at a later stage.
+  RTCAudioSessionConfiguration* config = [RTCAudioSessionConfiguration webRTCConfiguration];
+  playout_parameters_.reset(config.sampleRate, config.outputNumberOfChannels);
+  record_parameters_.reset(config.sampleRate, config.inputNumberOfChannels);
+  // Ensure that the audio device buffer (ADB) knows about the internal audio
+  // parameters. Note that, even if we are unable to get a mono audio session,
+  // we will always tell the I/O audio unit to do a channel format conversion
+  // to guarantee mono on the "input side" of the audio unit.
+  UpdateAudioDeviceBuffer();
+  initialized_ = true;
+  return InitStatus::OK;
+}
+
+int32_t AudioDeviceIOS::Terminate() {
+  LOGI() << "Terminate";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  if (!initialized_) {
+    return 0;
+  }
+  StopPlayout();
+  StopRecording();
+  initialized_ = false;
+  return 0;
+}
+
+bool AudioDeviceIOS::Initialized() const {
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  return initialized_;
+}
+
+int32_t AudioDeviceIOS::InitPlayout() {
+  LOGI() << "InitPlayout";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  RTC_DCHECK(initialized_);
+  RTC_DCHECK(!audio_is_initialized_);
+  RTC_DCHECK(!playing_);
+  if (!audio_is_initialized_) {
+    if (!InitPlayOrRecord()) {
+      RTC_LOG_F(LS_ERROR) << "InitPlayOrRecord failed for InitPlayout!";
+      return -1;
+    }
+  }
+  audio_is_initialized_ = true;
+  return 0;
+}
+
+bool AudioDeviceIOS::PlayoutIsInitialized() const {
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  return audio_is_initialized_;
+}
+
+bool AudioDeviceIOS::RecordingIsInitialized() const {
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  return audio_is_initialized_;
+}
+
+int32_t AudioDeviceIOS::InitRecording() {
+  LOGI() << "InitRecording";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  RTC_DCHECK(initialized_);
+  RTC_DCHECK(!audio_is_initialized_);
+  RTC_DCHECK(!recording_);
+  if (!audio_is_initialized_) {
+    if (!InitPlayOrRecord()) {
+      RTC_LOG_F(LS_ERROR) << "InitPlayOrRecord failed for InitRecording!";
+      return -1;
+    }
+  }
+  audio_is_initialized_ = true;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::StartPlayout() {
+  LOGI() << "StartPlayout";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  RTC_DCHECK(audio_is_initialized_);
+  RTC_DCHECK(!playing_);
+  RTC_DCHECK(audio_unit_);
+  if (fine_audio_buffer_) {
+    fine_audio_buffer_->ResetPlayout();
+  }
+  if (!recording_ && audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) {
+    if (!audio_unit_->Start()) {
+      RTCLogError(@"StartPlayout failed to start audio unit.");
+      return -1;
+    }
+    RTC_LOG(LS_INFO) << "Voice-Processing I/O audio unit is now started";
+  }
+  rtc::AtomicOps::ReleaseStore(&playing_, 1);
+  num_playout_callbacks_ = 0;
+  num_detected_playout_glitches_ = 0;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::StopPlayout() {
+  LOGI() << "StopPlayout";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  if (!audio_is_initialized_ || !playing_) {
+    return 0;
+  }
+  if (!recording_) {
+    ShutdownPlayOrRecord();
+    audio_is_initialized_ = false;
+  }
+  rtc::AtomicOps::ReleaseStore(&playing_, 0);
+
+  // Derive average number of calls to OnGetPlayoutData() between detected
+  // audio glitches and add the result to a histogram.
+  int average_number_of_playout_callbacks_between_glitches = 100000;
+  RTC_DCHECK_GE(num_playout_callbacks_, num_detected_playout_glitches_);
+  if (num_detected_playout_glitches_ > 0) {
+    average_number_of_playout_callbacks_between_glitches =
+        num_playout_callbacks_ / num_detected_playout_glitches_;
+  }
+  RTC_HISTOGRAM_COUNTS_100000("WebRTC.Audio.AveragePlayoutCallbacksBetweenGlitches",
+                              average_number_of_playout_callbacks_between_glitches);
+  RTCLog(@"Average number of playout callbacks between glitches: %d",
+         average_number_of_playout_callbacks_between_glitches);
+  return 0;
+}
+
+int32_t AudioDeviceIOS::StartRecording() {
+  LOGI() << "StartRecording";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  RTC_DCHECK(audio_is_initialized_);
+  RTC_DCHECK(!recording_);
+  RTC_DCHECK(audio_unit_);
+  if (fine_audio_buffer_) {
+    fine_audio_buffer_->ResetRecord();
+  }
+  if (!playing_ && audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) {
+    if (!audio_unit_->Start()) {
+      RTCLogError(@"StartRecording failed to start audio unit.");
+      return -1;
+    }
+    RTC_LOG(LS_INFO) << "Voice-Processing I/O audio unit is now started";
+  }
+  rtc::AtomicOps::ReleaseStore(&recording_, 1);
+  return 0;
+}
+
+int32_t AudioDeviceIOS::StopRecording() {
+  LOGI() << "StopRecording";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  if (!audio_is_initialized_ || !recording_) {
+    return 0;
+  }
+  if (!playing_) {
+    ShutdownPlayOrRecord();
+    audio_is_initialized_ = false;
+  }
+  rtc::AtomicOps::ReleaseStore(&recording_, 0);
+  return 0;
+}
+
+int32_t AudioDeviceIOS::PlayoutDelay(uint16_t& delayMS) const {
+  delayMS = kFixedPlayoutDelayEstimate;
+  return 0;
+}
+
+int AudioDeviceIOS::GetPlayoutAudioParameters(AudioParameters* params) const {
+  LOGI() << "GetPlayoutAudioParameters";
+  RTC_DCHECK(playout_parameters_.is_valid());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  *params = playout_parameters_;
+  return 0;
+}
+
+int AudioDeviceIOS::GetRecordAudioParameters(AudioParameters* params) const {
+  LOGI() << "GetRecordAudioParameters";
+  RTC_DCHECK(record_parameters_.is_valid());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  *params = record_parameters_;
+  return 0;
+}
+
+void AudioDeviceIOS::OnInterruptionBegin() {
+  RTC_DCHECK(thread_);
+  LOGI() << "OnInterruptionBegin";
+  thread_->Post(RTC_FROM_HERE, this, kMessageTypeInterruptionBegin);
+}
+
+void AudioDeviceIOS::OnInterruptionEnd() {
+  RTC_DCHECK(thread_);
+  LOGI() << "OnInterruptionEnd";
+  thread_->Post(RTC_FROM_HERE, this, kMessageTypeInterruptionEnd);
+}
+
+void AudioDeviceIOS::OnValidRouteChange() {
+  RTC_DCHECK(thread_);
+  thread_->Post(RTC_FROM_HERE, this, kMessageTypeValidRouteChange);
+}
+
+void AudioDeviceIOS::OnCanPlayOrRecordChange(bool can_play_or_record) {
+  RTC_DCHECK(thread_);
+  thread_->Post(RTC_FROM_HERE,
+                this,
+                kMessageTypeCanPlayOrRecordChange,
+                new rtc::TypedMessageData<bool>(can_play_or_record));
+}
+
+void AudioDeviceIOS::OnChangedOutputVolume() {
+  RTC_DCHECK(thread_);
+  thread_->Post(RTC_FROM_HERE, this, kMessageOutputVolumeChange);
+}
+
+OSStatus AudioDeviceIOS::OnDeliverRecordedData(AudioUnitRenderActionFlags* flags,
+                                               const AudioTimeStamp* time_stamp,
+                                               UInt32 bus_number,
+                                               UInt32 num_frames,
+                                               AudioBufferList* /* io_data */) {
+  RTC_DCHECK_RUN_ON(&io_thread_checker_);
+  OSStatus result = noErr;
+  // Simply return if recording is not enabled.
+  if (!rtc::AtomicOps::AcquireLoad(&recording_)) return result;
+
+  // Set the size of our own audio buffer and clear it first to avoid copying
+  // in combination with potential reallocations.
+  // On real iOS devices, the size will only be set once (at first callback).
+  record_audio_buffer_.Clear();
+  record_audio_buffer_.SetSize(num_frames);
+
+  // Allocate AudioBuffers to be used as storage for the received audio.
+  // The AudioBufferList structure works as a placeholder for the
+  // AudioBuffer structure, which holds a pointer to the actual data buffer
+  // in |record_audio_buffer_|. Recorded audio will be rendered into this memory
+  // at each input callback when calling AudioUnitRender().
+  AudioBufferList audio_buffer_list;
+  audio_buffer_list.mNumberBuffers = 1;
+  AudioBuffer* audio_buffer = &audio_buffer_list.mBuffers[0];
+  audio_buffer->mNumberChannels = record_parameters_.channels();
+  audio_buffer->mDataByteSize =
+      record_audio_buffer_.size() * VoiceProcessingAudioUnit::kBytesPerSample;
+  audio_buffer->mData = reinterpret_cast<int8_t*>(record_audio_buffer_.data());
+
+  // Obtain the recorded audio samples by initiating a rendering cycle.
+  // Since it happens on the input bus, the |io_data| parameter is a reference
+  // to the preallocated audio buffer list that the audio unit renders into.
+  // We can make the audio unit provide a buffer instead in io_data, but we
+  // currently just use our own.
+  // TODO(henrika): should error handling be improved?
+  result = audio_unit_->Render(flags, time_stamp, bus_number, num_frames, &audio_buffer_list);
+  if (result != noErr) {
+    RTCLogError(@"Failed to render audio.");
+    return result;
+  }
+
+  // Get a pointer to the recorded audio and send it to the WebRTC ADB.
+  // Use the FineAudioBuffer instance to convert between native buffer size
+  // and the 10ms buffer size used by WebRTC.
+  fine_audio_buffer_->DeliverRecordedData(record_audio_buffer_, kFixedRecordDelayEstimate);
+  return noErr;
+}
+
+OSStatus AudioDeviceIOS::OnGetPlayoutData(AudioUnitRenderActionFlags* flags,
+                                          const AudioTimeStamp* time_stamp,
+                                          UInt32 bus_number,
+                                          UInt32 num_frames,
+                                          AudioBufferList* io_data) {
+  RTC_DCHECK_RUN_ON(&io_thread_checker_);
+  // Verify 16-bit, noninterleaved mono PCM signal format.
+  RTC_DCHECK_EQ(1, io_data->mNumberBuffers);
+  AudioBuffer* audio_buffer = &io_data->mBuffers[0];
+  RTC_DCHECK_EQ(1, audio_buffer->mNumberChannels);
+
+  // Produce silence and give audio unit a hint about it if playout is not
+  // activated.
+  if (!rtc::AtomicOps::AcquireLoad(&playing_)) {
+    const size_t size_in_bytes = audio_buffer->mDataByteSize;
+    RTC_CHECK_EQ(size_in_bytes / VoiceProcessingAudioUnit::kBytesPerSample, num_frames);
+    *flags |= kAudioUnitRenderAction_OutputIsSilence;
+    memset(static_cast<int8_t*>(audio_buffer->mData), 0, size_in_bytes);
+    return noErr;
+  }
+
+  // Measure time since last call to OnGetPlayoutData() and see if it is larger
+  // than a well defined threshold which depends on the current IO buffer size.
+  // If so, we have an indication of a glitch in the output audio since the
+  // core audio layer will most likely run dry in this state.
+  ++num_playout_callbacks_;
+  const int64_t now_time = rtc::TimeMillis();
+  if (time_stamp->mSampleTime != num_frames) {
+    const int64_t delta_time = now_time - last_playout_time_;
+    const int glitch_threshold = 1.6 * playout_parameters_.GetBufferSizeInMilliseconds();
+    if (delta_time > glitch_threshold) {
+      RTCLogWarning(@"Possible playout audio glitch detected.\n"
+                     "  Time since last OnGetPlayoutData was %lld ms.\n",
+                    delta_time);
+      // Exclude extreme delta values since they do most likely not correspond
+      // to a real glitch. Instead, the most probable cause is that a headset
+      // has been plugged in or out. There are more direct ways to detect
+      // audio device changes (see HandleValidRouteChange()) but experiments
+      // show that using it leads to more complex implementations.
+      // TODO(henrika): more tests might be needed to come up with an even
+      // better upper limit.
+      if (glitch_threshold < 120 && delta_time > 120) {
+        RTCLog(@"Glitch warning is ignored. Probably caused by device switch.");
+      } else {
+        thread_->Post(RTC_FROM_HERE, this, kMessageTypePlayoutGlitchDetected);
+      }
+    }
+  }
+  last_playout_time_ = now_time;
+
+  // Read decoded 16-bit PCM samples from WebRTC (using a size that matches
+  // the native I/O audio unit) and copy the result to the audio buffer in the
+  // |io_data| destination.
+  fine_audio_buffer_->GetPlayoutData(
+      rtc::ArrayView<int16_t>(static_cast<int16_t*>(audio_buffer->mData), num_frames),
+      kFixedPlayoutDelayEstimate);
+  return noErr;
+}
+
+void AudioDeviceIOS::OnMessage(rtc::Message* msg) {
+  switch (msg->message_id) {
+    case kMessageTypeInterruptionBegin:
+      HandleInterruptionBegin();
+      break;
+    case kMessageTypeInterruptionEnd:
+      HandleInterruptionEnd();
+      break;
+    case kMessageTypeValidRouteChange:
+      HandleValidRouteChange();
+      break;
+    case kMessageTypeCanPlayOrRecordChange: {
+      rtc::TypedMessageData<bool>* data = static_cast<rtc::TypedMessageData<bool>*>(msg->pdata);
+      HandleCanPlayOrRecordChange(data->data());
+      delete data;
+      break;
+    }
+    case kMessageTypePlayoutGlitchDetected:
+      HandlePlayoutGlitchDetected();
+      break;
+    case kMessageOutputVolumeChange:
+      HandleOutputVolumeChange();
+      break;
+  }
+}
+
+void AudioDeviceIOS::HandleInterruptionBegin() {
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  RTCLog(@"Interruption begin. IsInterrupted changed from %d to 1.", is_interrupted_);
+  if (audio_unit_ && audio_unit_->GetState() == VoiceProcessingAudioUnit::kStarted) {
+    RTCLog(@"Stopping the audio unit due to interruption begin.");
+    if (!audio_unit_->Stop()) {
+      RTCLogError(@"Failed to stop the audio unit for interruption begin.");
+    } else {
+      PrepareForNewStart();
+    }
+  }
+  is_interrupted_ = true;
+}
+
+void AudioDeviceIOS::HandleInterruptionEnd() {
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  RTCLog(@"Interruption ended. IsInterrupted changed from %d to 0. "
+          "Updating audio unit state.",
+         is_interrupted_);
+  is_interrupted_ = false;
+  UpdateAudioUnit([RTCAudioSession sharedInstance].canPlayOrRecord);
+}
+
+void AudioDeviceIOS::HandleValidRouteChange() {
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  RTCAudioSession* session = [RTCAudioSession sharedInstance];
+  RTCLog(@"%@", session);
+  HandleSampleRateChange(session.sampleRate);
+}
+
+void AudioDeviceIOS::HandleCanPlayOrRecordChange(bool can_play_or_record) {
+  RTCLog(@"Handling CanPlayOrRecord change to: %d", can_play_or_record);
+  UpdateAudioUnit(can_play_or_record);
+}
+
+void AudioDeviceIOS::HandleSampleRateChange(float sample_rate) {
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  RTCLog(@"Handling sample rate change to %f.", sample_rate);
+
+  // Don't do anything if we're interrupted.
+  if (is_interrupted_) {
+    RTCLog(@"Ignoring sample rate change to %f due to interruption.", sample_rate);
+    return;
+  }
+
+  // If we don't have an audio unit yet, or the audio unit is uninitialized,
+  // there is no work to do.
+  if (!audio_unit_ || audio_unit_->GetState() < VoiceProcessingAudioUnit::kInitialized) {
+    return;
+  }
+
+  // The audio unit is already initialized or started.
+  // Check to see if the sample rate or buffer size has changed.
+  RTCAudioSession* session = [RTCAudioSession sharedInstance];
+  const double session_sample_rate = session.sampleRate;
+  const NSTimeInterval session_buffer_duration = session.IOBufferDuration;
+  const size_t session_frames_per_buffer =
+      static_cast<size_t>(session_sample_rate * session_buffer_duration + .5);
+  const double current_sample_rate = playout_parameters_.sample_rate();
+  const size_t current_frames_per_buffer = playout_parameters_.frames_per_buffer();
+  RTCLog(@"Handling playout sample rate change to: %f\n"
+          "  Session sample rate: %f frames_per_buffer: %lu\n"
+          "  ADM sample rate: %f frames_per_buffer: %lu",
+         sample_rate,
+         session_sample_rate,
+         (unsigned long)session_frames_per_buffer,
+         current_sample_rate,
+         (unsigned long)current_frames_per_buffer);
+
+  // Sample rate and buffer size are the same, no work to do.
+  if (std::abs(current_sample_rate - session_sample_rate) <= DBL_EPSILON &&
+      current_frames_per_buffer == session_frames_per_buffer) {
+    RTCLog(@"Ignoring sample rate change since audio parameters are intact.");
+    return;
+  }
+
+  // Extra sanity check to ensure that the new sample rate is valid.
+  if (session_sample_rate <= 0.0) {
+    RTCLogError(@"Sample rate is invalid: %f", session_sample_rate);
+    return;
+  }
+
+  // We need to adjust our format and buffer sizes.
+  // The stream format is about to be changed and it requires that we first
+  // stop and uninitialize the audio unit to deallocate its resources.
+  RTCLog(@"Stopping and uninitializing audio unit to adjust buffers.");
+  bool restart_audio_unit = false;
+  if (audio_unit_->GetState() == VoiceProcessingAudioUnit::kStarted) {
+    audio_unit_->Stop();
+    restart_audio_unit = true;
+    PrepareForNewStart();
+  }
+  if (audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) {
+    audio_unit_->Uninitialize();
+  }
+
+  // Allocate new buffers given the new stream format.
+  SetupAudioBuffersForActiveAudioSession();
+
+  // Initialize the audio unit again with the new sample rate.
+  RTC_DCHECK_EQ(playout_parameters_.sample_rate(), session_sample_rate);
+  if (!audio_unit_->Initialize(session_sample_rate)) {
+    RTCLogError(@"Failed to initialize the audio unit with sample rate: %f", session_sample_rate);
+    return;
+  }
+
+  // Restart the audio unit if it was already running.
+  if (restart_audio_unit && !audio_unit_->Start()) {
+    RTCLogError(@"Failed to start audio unit with sample rate: %f", session_sample_rate);
+    return;
+  }
+  RTCLog(@"Successfully handled sample rate change.");
+}
+
+void AudioDeviceIOS::HandlePlayoutGlitchDetected() {
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  // Don't update metrics if we're interrupted since a "glitch" is expected
+  // in this state.
+  if (is_interrupted_) {
+    RTCLog(@"Ignoring audio glitch due to interruption.");
+    return;
+  }
+  // Avoid doing glitch detection for two seconds after a volume change
+  // has been detected to reduce the risk of false alarm.
+  if (last_output_volume_change_time_ > 0 &&
+      rtc::TimeSince(last_output_volume_change_time_) < 2000) {
+    RTCLog(@"Ignoring audio glitch due to recent output volume change.");
+    return;
+  }
+  num_detected_playout_glitches_++;
+  RTCLog(@"Number of detected playout glitches: %lld", num_detected_playout_glitches_);
+
+  int64_t glitch_count = num_detected_playout_glitches_;
+  dispatch_async(dispatch_get_main_queue(), ^{
+    RTCAudioSession* session = [RTCAudioSession sharedInstance];
+    [session notifyDidDetectPlayoutGlitch:glitch_count];
+  });
+}
+
+void AudioDeviceIOS::HandleOutputVolumeChange() {
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  RTCLog(@"Output volume change detected.");
+  // Store time of this detection so it can be used to defer detection of
+  // glitches too close in time to this event.
+  last_output_volume_change_time_ = rtc::TimeMillis();
+}
+
+void AudioDeviceIOS::UpdateAudioDeviceBuffer() {
+  LOGI() << "UpdateAudioDevicebuffer";
+  // AttachAudioBuffer() is called at construction by the main class but check
+  // just in case.
+  RTC_DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first";
+  RTC_DCHECK_GT(playout_parameters_.sample_rate(), 0);
+  RTC_DCHECK_GT(record_parameters_.sample_rate(), 0);
+  RTC_DCHECK_EQ(playout_parameters_.channels(), 1);
+  RTC_DCHECK_EQ(record_parameters_.channels(), 1);
+  // Inform the audio device buffer (ADB) about the new audio format.
+  audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate());
+  audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels());
+  audio_device_buffer_->SetRecordingSampleRate(record_parameters_.sample_rate());
+  audio_device_buffer_->SetRecordingChannels(record_parameters_.channels());
+}
+
+void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() {
+  LOGI() << "SetupAudioBuffersForActiveAudioSession";
+  // Verify the current values once the audio session has been activated.
+  RTCAudioSession* session = [RTCAudioSession sharedInstance];
+  double sample_rate = session.sampleRate;
+  NSTimeInterval io_buffer_duration = session.IOBufferDuration;
+  RTCLog(@"%@", session);
+
+  // Log a warning message for the case when we are unable to set the preferred
+  // hardware sample rate but continue and use the non-ideal sample rate after
+  // reinitializing the audio parameters. Most BT headsets only support 8kHz or
+  // 16kHz.
+  RTCAudioSessionConfiguration* webRTCConfig = [RTCAudioSessionConfiguration webRTCConfiguration];
+  if (sample_rate != webRTCConfig.sampleRate) {
+    RTC_LOG(LS_WARNING) << "Unable to set the preferred sample rate";
+  }
+
+  // Crash reports indicates that it can happen in rare cases that the reported
+  // sample rate is less than or equal to zero. If that happens and if a valid
+  // sample rate has already been set during initialization, the best guess we
+  // can do is to reuse the current sample rate.
+  if (sample_rate <= DBL_EPSILON && playout_parameters_.sample_rate() > 0) {
+    RTCLogError(@"Reported rate is invalid: %f. "
+                 "Using %d as sample rate instead.",
+                sample_rate, playout_parameters_.sample_rate());
+    sample_rate = playout_parameters_.sample_rate();
+  }
+
+  // At this stage, we also know the exact IO buffer duration and can add
+  // that info to the existing audio parameters where it is converted into
+  // number of audio frames.
+  // Example: IO buffer size = 0.008 seconds <=> 128 audio frames at 16kHz.
+  // Hence, 128 is the size we expect to see in upcoming render callbacks.
+  playout_parameters_.reset(sample_rate, playout_parameters_.channels(), io_buffer_duration);
+  RTC_DCHECK(playout_parameters_.is_complete());
+  record_parameters_.reset(sample_rate, record_parameters_.channels(), io_buffer_duration);
+  RTC_DCHECK(record_parameters_.is_complete());
+  RTC_LOG(LS_INFO) << " frames per I/O buffer: " << playout_parameters_.frames_per_buffer();
+  RTC_LOG(LS_INFO) << " bytes per I/O buffer: " << playout_parameters_.GetBytesPerBuffer();
+  RTC_DCHECK_EQ(playout_parameters_.GetBytesPerBuffer(), record_parameters_.GetBytesPerBuffer());
+
+  // Update the ADB parameters since the sample rate might have changed.
+  UpdateAudioDeviceBuffer();
+
+  // Create a modified audio buffer class which allows us to ask for,
+  // or deliver, any number of samples (and not only multiple of 10ms) to match
+  // the native audio unit buffer size.
+  RTC_DCHECK(audio_device_buffer_);
+  fine_audio_buffer_.reset(new FineAudioBuffer(audio_device_buffer_));
+}
+
+bool AudioDeviceIOS::CreateAudioUnit() {
+  RTC_DCHECK(!audio_unit_);
+
+  audio_unit_.reset(new VoiceProcessingAudioUnit(this));
+  if (!audio_unit_->Init()) {
+    audio_unit_.reset();
+    return false;
+  }
+
+  return true;
+}
+
+void AudioDeviceIOS::UpdateAudioUnit(bool can_play_or_record) {
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  RTCLog(@"Updating audio unit state. CanPlayOrRecord=%d IsInterrupted=%d",
+         can_play_or_record,
+         is_interrupted_);
+
+  if (is_interrupted_) {
+    RTCLog(@"Ignoring audio unit update due to interruption.");
+    return;
+  }
+
+  // If we're not initialized we don't need to do anything. Audio unit will
+  // be initialized on initialization.
+  if (!audio_is_initialized_) return;
+
+  // If we're initialized, we must have an audio unit.
+  RTC_DCHECK(audio_unit_);
+
+  bool should_initialize_audio_unit = false;
+  bool should_uninitialize_audio_unit = false;
+  bool should_start_audio_unit = false;
+  bool should_stop_audio_unit = false;
+
+  switch (audio_unit_->GetState()) {
+    case VoiceProcessingAudioUnit::kInitRequired:
+      RTCLog(@"VPAU state: InitRequired");
+      RTC_NOTREACHED();
+      break;
+    case VoiceProcessingAudioUnit::kUninitialized:
+      RTCLog(@"VPAU state: Uninitialized");
+      should_initialize_audio_unit = can_play_or_record;
+      should_start_audio_unit = should_initialize_audio_unit && (playing_ || recording_);
+      break;
+    case VoiceProcessingAudioUnit::kInitialized:
+      RTCLog(@"VPAU state: Initialized");
+      should_start_audio_unit = can_play_or_record && (playing_ || recording_);
+      should_uninitialize_audio_unit = !can_play_or_record;
+      break;
+    case VoiceProcessingAudioUnit::kStarted:
+      RTCLog(@"VPAU state: Started");
+      RTC_DCHECK(playing_ || recording_);
+      should_stop_audio_unit = !can_play_or_record;
+      should_uninitialize_audio_unit = should_stop_audio_unit;
+      break;
+  }
+
+  if (should_initialize_audio_unit) {
+    RTCLog(@"Initializing audio unit for UpdateAudioUnit");
+    ConfigureAudioSession();
+    SetupAudioBuffersForActiveAudioSession();
+    if (!audio_unit_->Initialize(playout_parameters_.sample_rate())) {
+      RTCLogError(@"Failed to initialize audio unit.");
+      return;
+    }
+  }
+
+  if (should_start_audio_unit) {
+    RTCLog(@"Starting audio unit for UpdateAudioUnit");
+    // Log session settings before trying to start audio streaming.
+    RTCAudioSession* session = [RTCAudioSession sharedInstance];
+    RTCLog(@"%@", session);
+    if (!audio_unit_->Start()) {
+      RTCLogError(@"Failed to start audio unit.");
+      return;
+    }
+  }
+
+  if (should_stop_audio_unit) {
+    RTCLog(@"Stopping audio unit for UpdateAudioUnit");
+    if (!audio_unit_->Stop()) {
+      RTCLogError(@"Failed to stop audio unit.");
+      return;
+    }
+  }
+
+  if (should_uninitialize_audio_unit) {
+    RTCLog(@"Uninitializing audio unit for UpdateAudioUnit");
+    audio_unit_->Uninitialize();
+    UnconfigureAudioSession();
+  }
+}
+
+bool AudioDeviceIOS::ConfigureAudioSession() {
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  RTCLog(@"Configuring audio session.");
+  if (has_configured_session_) {
+    RTCLogWarning(@"Audio session already configured.");
+    return false;
+  }
+  RTCAudioSession* session = [RTCAudioSession sharedInstance];
+  [session lockForConfiguration];
+  bool success = [session configureWebRTCSession:nil];
+  [session unlockForConfiguration];
+  if (success) {
+    has_configured_session_ = true;
+    RTCLog(@"Configured audio session.");
+  } else {
+    RTCLog(@"Failed to configure audio session.");
+  }
+  return success;
+}
+
+void AudioDeviceIOS::UnconfigureAudioSession() {
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  RTCLog(@"Unconfiguring audio session.");
+  if (!has_configured_session_) {
+    RTCLogWarning(@"Audio session already unconfigured.");
+    return;
+  }
+  RTCAudioSession* session = [RTCAudioSession sharedInstance];
+  [session lockForConfiguration];
+  [session unconfigureWebRTCSession:nil];
+  [session unlockForConfiguration];
+  has_configured_session_ = false;
+  RTCLog(@"Unconfigured audio session.");
+}
+
+bool AudioDeviceIOS::InitPlayOrRecord() {
+  LOGI() << "InitPlayOrRecord";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+
+  // There should be no audio unit at this point.
+  if (!CreateAudioUnit()) {
+    return false;
+  }
+
+  RTCAudioSession* session = [RTCAudioSession sharedInstance];
+  // Subscribe to audio session events.
+  [session pushDelegate:audio_session_observer_];
+  is_interrupted_ = session.isInterrupted ? true : false;
+
+  // Lock the session to make configuration changes.
+  [session lockForConfiguration];
+  NSError* error = nil;
+  if (![session beginWebRTCSession:&error]) {
+    [session unlockForConfiguration];
+    RTCLogError(@"Failed to begin WebRTC session: %@", error.localizedDescription);
+    audio_unit_.reset();
+    return false;
+  }
+
+  // If we are ready to play or record, and if the audio session can be
+  // configured, then initialize the audio unit.
+  if (session.canPlayOrRecord) {
+    if (!ConfigureAudioSession()) {
+      // One possible reason for failure is if an attempt was made to use the
+      // audio session during or after a Media Services failure.
+      // See AVAudioSessionErrorCodeMediaServicesFailed for details.
+      [session unlockForConfiguration];
+      audio_unit_.reset();
+      return false;
+    }
+    SetupAudioBuffersForActiveAudioSession();
+    audio_unit_->Initialize(playout_parameters_.sample_rate());
+  }
+
+  // Release the lock.
+  [session unlockForConfiguration];
+  return true;
+}
+
+void AudioDeviceIOS::ShutdownPlayOrRecord() {
+  LOGI() << "ShutdownPlayOrRecord";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+
+  // Stop the audio unit to prevent any additional audio callbacks.
+  audio_unit_->Stop();
+
+  // Close and delete the voice-processing I/O unit.
+  audio_unit_.reset();
+
+  // Detach thread checker for the AURemoteIO::IOThread to ensure that the
+  // next session uses a fresh thread id.
+  io_thread_checker_.DetachFromThread();
+
+  // Remove audio session notification observers.
+  RTCAudioSession* session = [RTCAudioSession sharedInstance];
+  [session removeDelegate:audio_session_observer_];
+
+  // All I/O should be stopped or paused prior to deactivating the audio
+  // session, hence we deactivate as last action.
+  [session lockForConfiguration];
+  UnconfigureAudioSession();
+  [session endWebRTCSession:nil];
+  [session unlockForConfiguration];
+}
+
+void AudioDeviceIOS::PrepareForNewStart() {
+  LOGI() << "PrepareForNewStart";
+  // The audio unit has been stopped and preparations are needed for an upcoming
+  // restart. It will result in audio callbacks from a new native I/O thread
+  // which means that we must detach thread checkers here to be prepared for an
+  // upcoming new audio stream.
+  io_thread_checker_.DetachFromThread();
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_device/ios/audio_device_not_implemented_ios.mm b/modules/audio_device/ios/audio_device_not_implemented_ios.mm
new file mode 100644
index 0000000..2e99aea
--- /dev/null
+++ b/modules/audio_device/ios/audio_device_not_implemented_ios.mm
@@ -0,0 +1,205 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_device/ios/audio_device_ios.h"
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+int32_t AudioDeviceIOS::ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const {
+  audioLayer = AudioDeviceModule::kPlatformDefaultAudio;
+  return 0;
+}
+
+int16_t AudioDeviceIOS::PlayoutDevices() {
+  // TODO(henrika): improve.
+  RTC_LOG_F(LS_WARNING) << "Not implemented";
+  return (int16_t)1;
+}
+
+int16_t AudioDeviceIOS::RecordingDevices() {
+  // TODO(henrika): improve.
+  RTC_LOG_F(LS_WARNING) << "Not implemented";
+  return (int16_t)1;
+}
+
+int32_t AudioDeviceIOS::InitSpeaker() {
+  return 0;
+}
+
+bool AudioDeviceIOS::SpeakerIsInitialized() const {
+  return true;
+}
+
+int32_t AudioDeviceIOS::SpeakerVolumeIsAvailable(bool& available) {
+  available = false;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::SetSpeakerVolume(uint32_t volume) {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::SpeakerVolume(uint32_t& volume) const {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::MaxSpeakerVolume(uint32_t& maxVolume) const {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::MinSpeakerVolume(uint32_t& minVolume) const {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::SpeakerMuteIsAvailable(bool& available) {
+  available = false;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::SetSpeakerMute(bool enable) {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::SpeakerMute(bool& enabled) const {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::SetPlayoutDevice(uint16_t index) {
+  RTC_LOG_F(LS_WARNING) << "Not implemented";
+  return 0;
+}
+
+int32_t AudioDeviceIOS::SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType) {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::InitMicrophone() {
+  return 0;
+}
+
+bool AudioDeviceIOS::MicrophoneIsInitialized() const {
+  return true;
+}
+
+int32_t AudioDeviceIOS::MicrophoneMuteIsAvailable(bool& available) {
+  available = false;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::SetMicrophoneMute(bool enable) {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::MicrophoneMute(bool& enabled) const {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::StereoRecordingIsAvailable(bool& available) {
+  available = false;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::SetStereoRecording(bool enable) {
+  RTC_LOG_F(LS_WARNING) << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::StereoRecording(bool& enabled) const {
+  enabled = false;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::StereoPlayoutIsAvailable(bool& available) {
+  available = false;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::SetStereoPlayout(bool enable) {
+  RTC_LOG_F(LS_WARNING) << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::StereoPlayout(bool& enabled) const {
+  enabled = false;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::MicrophoneVolumeIsAvailable(bool& available) {
+  available = false;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::SetMicrophoneVolume(uint32_t volume) {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::MicrophoneVolume(uint32_t& volume) const {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::MaxMicrophoneVolume(uint32_t& maxVolume) const {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::MinMicrophoneVolume(uint32_t& minVolume) const {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::PlayoutDeviceName(uint16_t index,
+                                          char name[kAdmMaxDeviceNameSize],
+                                          char guid[kAdmMaxGuidSize]) {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::RecordingDeviceName(uint16_t index,
+                                            char name[kAdmMaxDeviceNameSize],
+                                            char guid[kAdmMaxGuidSize]) {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::SetRecordingDevice(uint16_t index) {
+  RTC_LOG_F(LS_WARNING) << "Not implemented";
+  return 0;
+}
+
+int32_t AudioDeviceIOS::SetRecordingDevice(AudioDeviceModule::WindowsDeviceType) {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::PlayoutIsAvailable(bool& available) {
+  available = true;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::RecordingIsAvailable(bool& available) {
+  available = true;
+  return 0;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_device/ios/audio_device_unittest_ios.mm b/modules/audio_device/ios/audio_device_unittest_ios.mm
new file mode 100644
index 0000000..74cfcc2
--- /dev/null
+++ b/modules/audio_device/ios/audio_device_unittest_ios.mm
@@ -0,0 +1,877 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <algorithm>
+#include <limits>
+#include <list>
+#include <memory>
+#include <numeric>
+#include <string>
+#include <vector>
+
+#include "api/scoped_refptr.h"
+#include "modules/audio_device/audio_device_impl.h"
+#include "modules/audio_device/include/audio_device.h"
+#include "modules/audio_device/include/mock_audio_transport.h"
+#include "modules/audio_device/ios/audio_device_ios.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/critical_section.h"
+#include "rtc_base/event.h"
+#include "rtc_base/format_macros.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/time_utils.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/testsupport/file_utils.h"
+
+#import "sdk/objc/components/audio/RTCAudioSession+Private.h"
+#import "sdk/objc/components/audio/RTCAudioSession.h"
+
+using std::cout;
+using std::endl;
+using ::testing::_;
+using ::testing::AtLeast;
+using ::testing::Gt;
+using ::testing::Invoke;
+using ::testing::NiceMock;
+using ::testing::NotNull;
+using ::testing::Return;
+
+// #define ENABLE_DEBUG_PRINTF
+#ifdef ENABLE_DEBUG_PRINTF
+#define PRINTD(...) fprintf(stderr, __VA_ARGS__);
+#else
+#define PRINTD(...) ((void)0)
+#endif
+#define PRINT(...) fprintf(stderr, __VA_ARGS__);
+
+namespace webrtc {
+
+// Number of callbacks (input or output) the tests waits for before we set
+// an event indicating that the test was OK.
+static const size_t kNumCallbacks = 10;
+// Max amount of time we wait for an event to be set while counting callbacks.
+static const int kTestTimeOutInMilliseconds = 10 * 1000;
+// Number of bits per PCM audio sample.
+static const size_t kBitsPerSample = 16;
+// Number of bytes per PCM audio sample.
+static const size_t kBytesPerSample = kBitsPerSample / 8;
+// Average number of audio callbacks per second assuming 10ms packet size.
+static const size_t kNumCallbacksPerSecond = 100;
+// Play out a test file during this time (unit is in seconds).
+static const int kFilePlayTimeInSec = 15;
+// Run the full-duplex test during this time (unit is in seconds).
+// Note that first |kNumIgnoreFirstCallbacks| are ignored.
+static const int kFullDuplexTimeInSec = 10;
+// Wait for the callback sequence to stabilize by ignoring this amount of the
+// initial callbacks (avoids initial FIFO access).
+// Only used in the RunPlayoutAndRecordingInFullDuplex test.
+static const size_t kNumIgnoreFirstCallbacks = 50;
+// Sets the number of impulses per second in the latency test.
+// TODO(henrika): fine tune this setting for iOS.
+static const int kImpulseFrequencyInHz = 1;
+// Length of round-trip latency measurements. Number of transmitted impulses
+// is kImpulseFrequencyInHz * kMeasureLatencyTimeInSec - 1.
+// TODO(henrika): fine tune this setting for iOS.
+static const int kMeasureLatencyTimeInSec = 5;
+// Utilized in round-trip latency measurements to avoid capturing noise samples.
+// TODO(henrika): fine tune this setting for iOS.
+static const int kImpulseThreshold = 50;
+static const char kTag[] = "[..........] ";
+
+enum TransportType {
+  kPlayout = 0x1,
+  kRecording = 0x2,
+};
+
+// Interface for processing the audio stream. Real implementations can e.g.
+// run audio in loopback, read audio from a file or perform latency
+// measurements.
+class AudioStreamInterface {
+ public:
+  virtual void Write(const void* source, size_t num_frames) = 0;
+  virtual void Read(void* destination, size_t num_frames) = 0;
+
+ protected:
+  virtual ~AudioStreamInterface() {}
+};
+
+// Reads audio samples from a PCM file where the file is stored in memory at
+// construction.
+class FileAudioStream : public AudioStreamInterface {
+ public:
+  FileAudioStream(size_t num_callbacks,
+                  const std::string& file_name,
+                  int sample_rate)
+      : file_size_in_bytes_(0), sample_rate_(sample_rate), file_pos_(0) {
+    file_size_in_bytes_ = test::GetFileSize(file_name);
+    sample_rate_ = sample_rate;
+    EXPECT_GE(file_size_in_callbacks(), num_callbacks)
+        << "Size of test file is not large enough to last during the test.";
+    const size_t num_16bit_samples =
+        test::GetFileSize(file_name) / kBytesPerSample;
+    file_.reset(new int16_t[num_16bit_samples]);
+    FILE* audio_file = fopen(file_name.c_str(), "rb");
+    EXPECT_NE(audio_file, nullptr);
+    size_t num_samples_read =
+        fread(file_.get(), sizeof(int16_t), num_16bit_samples, audio_file);
+    EXPECT_EQ(num_samples_read, num_16bit_samples);
+    fclose(audio_file);
+  }
+
+  // AudioStreamInterface::Write() is not implemented.
+  void Write(const void* source, size_t num_frames) override {}
+
+  // Read samples from file stored in memory (at construction) and copy
+  // |num_frames| (<=> 10ms) to the |destination| byte buffer.
+  void Read(void* destination, size_t num_frames) override {
+    memcpy(destination, static_cast<int16_t*>(&file_[file_pos_]),
+           num_frames * sizeof(int16_t));
+    file_pos_ += num_frames;
+  }
+
+  int file_size_in_seconds() const {
+    return static_cast<int>(
+        file_size_in_bytes_ / (kBytesPerSample * sample_rate_));
+  }
+  size_t file_size_in_callbacks() const {
+    return file_size_in_seconds() * kNumCallbacksPerSecond;
+  }
+
+ private:
+  size_t file_size_in_bytes_;
+  int sample_rate_;
+  std::unique_ptr<int16_t[]> file_;
+  size_t file_pos_;
+};
+
+// Simple first in first out (FIFO) class that wraps a list of 16-bit audio
+// buffers of fixed size and allows Write and Read operations. The idea is to
+// store recorded audio buffers (using Write) and then read (using Read) these
+// stored buffers with as short delay as possible when the audio layer needs
+// data to play out. The number of buffers in the FIFO will stabilize under
+// normal conditions since there will be a balance between Write and Read calls.
+// The container is a std::list container and access is protected with a lock
+// since both sides (playout and recording) are driven by its own thread.
+class FifoAudioStream : public AudioStreamInterface {
+ public:
+  explicit FifoAudioStream(size_t frames_per_buffer)
+      : frames_per_buffer_(frames_per_buffer),
+        bytes_per_buffer_(frames_per_buffer_ * sizeof(int16_t)),
+        fifo_(new AudioBufferList),
+        largest_size_(0),
+        total_written_elements_(0),
+        write_count_(0) {
+    EXPECT_NE(fifo_.get(), nullptr);
+  }
+
+  ~FifoAudioStream() { Flush(); }
+
+  // Allocate new memory, copy |num_frames| samples from |source| into memory
+  // and add pointer to the memory location to end of the list.
+  // Increases the size of the FIFO by one element.
+  void Write(const void* source, size_t num_frames) override {
+    ASSERT_EQ(num_frames, frames_per_buffer_);
+    PRINTD("+");
+    if (write_count_++ < kNumIgnoreFirstCallbacks) {
+      return;
+    }
+    int16_t* memory = new int16_t[frames_per_buffer_];
+    memcpy(static_cast<int16_t*>(&memory[0]), source, bytes_per_buffer_);
+    rtc::CritScope lock(&lock_);
+    fifo_->push_back(memory);
+    const size_t size = fifo_->size();
+    if (size > largest_size_) {
+      largest_size_ = size;
+      PRINTD("(%" PRIuS ")", largest_size_);
+    }
+    total_written_elements_ += size;
+  }
+
+  // Read pointer to data buffer from front of list, copy |num_frames| of stored
+  // data into |destination| and delete the utilized memory allocation.
+  // Decreases the size of the FIFO by one element.
+  void Read(void* destination, size_t num_frames) override {
+    ASSERT_EQ(num_frames, frames_per_buffer_);
+    PRINTD("-");
+    rtc::CritScope lock(&lock_);
+    if (fifo_->empty()) {
+      memset(destination, 0, bytes_per_buffer_);
+    } else {
+      int16_t* memory = fifo_->front();
+      fifo_->pop_front();
+      memcpy(destination, static_cast<int16_t*>(&memory[0]), bytes_per_buffer_);
+      delete memory;
+    }
+  }
+
+  size_t size() const { return fifo_->size(); }
+
+  size_t largest_size() const { return largest_size_; }
+
+  size_t average_size() const {
+    return (total_written_elements_ == 0)
+               ? 0.0
+               : 0.5 +
+                     static_cast<float>(total_written_elements_) /
+                         (write_count_ - kNumIgnoreFirstCallbacks);
+  }
+
+ private:
+  void Flush() {
+    for (auto it = fifo_->begin(); it != fifo_->end(); ++it) {
+      delete *it;
+    }
+    fifo_->clear();
+  }
+
+  using AudioBufferList = std::list<int16_t*>;
+  rtc::CriticalSection lock_;
+  const size_t frames_per_buffer_;
+  const size_t bytes_per_buffer_;
+  std::unique_ptr<AudioBufferList> fifo_;
+  size_t largest_size_;
+  size_t total_written_elements_;
+  size_t write_count_;
+};
+
+// Inserts periodic impulses and measures the latency between the time of
+// transmission and time of receiving the same impulse.
+// Usage requires a special hardware called Audio Loopback Dongle.
+// See http://source.android.com/devices/audio/loopback.html for details.
+class LatencyMeasuringAudioStream : public AudioStreamInterface {
+ public:
+  explicit LatencyMeasuringAudioStream(size_t frames_per_buffer)
+      : frames_per_buffer_(frames_per_buffer),
+        bytes_per_buffer_(frames_per_buffer_ * sizeof(int16_t)),
+        play_count_(0),
+        rec_count_(0),
+        pulse_time_(0) {}
+
+  // Insert periodic impulses in first two samples of |destination|.
+  void Read(void* destination, size_t num_frames) override {
+    ASSERT_EQ(num_frames, frames_per_buffer_);
+    if (play_count_ == 0) {
+      PRINT("[");
+    }
+    play_count_++;
+    memset(destination, 0, bytes_per_buffer_);
+    if (play_count_ % (kNumCallbacksPerSecond / kImpulseFrequencyInHz) == 0) {
+      if (pulse_time_ == 0) {
+        pulse_time_ = rtc::TimeMillis();
+      }
+      PRINT(".");
+      const int16_t impulse = std::numeric_limits<int16_t>::max();
+      int16_t* ptr16 = static_cast<int16_t*>(destination);
+      for (size_t i = 0; i < 2; ++i) {
+        ptr16[i] = impulse;
+      }
+    }
+  }
+
+  // Detect received impulses in |source|, derive time between transmission and
+  // detection and add the calculated delay to list of latencies.
+  void Write(const void* source, size_t num_frames) override {
+    ASSERT_EQ(num_frames, frames_per_buffer_);
+    rec_count_++;
+    if (pulse_time_ == 0) {
+      // Avoid detection of new impulse response until a new impulse has
+      // been transmitted (sets |pulse_time_| to value larger than zero).
+      return;
+    }
+    const int16_t* ptr16 = static_cast<const int16_t*>(source);
+    std::vector<int16_t> vec(ptr16, ptr16 + num_frames);
+    // Find max value in the audio buffer.
+    int max = *std::max_element(vec.begin(), vec.end());
+    // Find index (element position in vector) of the max element.
+    int index_of_max =
+        std::distance(vec.begin(), std::find(vec.begin(), vec.end(), max));
+    if (max > kImpulseThreshold) {
+      PRINTD("(%d,%d)", max, index_of_max);
+      int64_t now_time = rtc::TimeMillis();
+      int extra_delay = IndexToMilliseconds(static_cast<double>(index_of_max));
+      PRINTD("[%d]", static_cast<int>(now_time - pulse_time_));
+      PRINTD("[%d]", extra_delay);
+      // Total latency is the difference between transmit time and detection
+      // tome plus the extra delay within the buffer in which we detected the
+      // received impulse. It is transmitted at sample 0 but can be received
+      // at sample N where N > 0. The term |extra_delay| accounts for N and it
+      // is a value between 0 and 10ms.
+      latencies_.push_back(now_time - pulse_time_ + extra_delay);
+      pulse_time_ = 0;
+    } else {
+      PRINTD("-");
+    }
+  }
+
+  size_t num_latency_values() const { return latencies_.size(); }
+
+  int min_latency() const {
+    if (latencies_.empty())
+      return 0;
+    return *std::min_element(latencies_.begin(), latencies_.end());
+  }
+
+  int max_latency() const {
+    if (latencies_.empty())
+      return 0;
+    return *std::max_element(latencies_.begin(), latencies_.end());
+  }
+
+  int average_latency() const {
+    if (latencies_.empty())
+      return 0;
+    return 0.5 +
+           static_cast<double>(
+               std::accumulate(latencies_.begin(), latencies_.end(), 0)) /
+               latencies_.size();
+  }
+
+  void PrintResults() const {
+    PRINT("] ");
+    for (auto it = latencies_.begin(); it != latencies_.end(); ++it) {
+      PRINT("%d ", *it);
+    }
+    PRINT("\n");
+    PRINT("%s[min, max, avg]=[%d, %d, %d] ms\n", kTag, min_latency(),
+          max_latency(), average_latency());
+  }
+
+  int IndexToMilliseconds(double index) const {
+    return 10.0 * (index / frames_per_buffer_) + 0.5;
+  }
+
+ private:
+  const size_t frames_per_buffer_;
+  const size_t bytes_per_buffer_;
+  size_t play_count_;
+  size_t rec_count_;
+  int64_t pulse_time_;
+  std::vector<int> latencies_;
+};
+// Mocks the AudioTransport object and proxies actions for the two callbacks
+// (RecordedDataIsAvailable and NeedMorePlayData) to different implementations
+// of AudioStreamInterface.
+class MockAudioTransportIOS : public test::MockAudioTransport {
+ public:
+  explicit MockAudioTransportIOS(int type)
+      : num_callbacks_(0),
+        type_(type),
+        play_count_(0),
+        rec_count_(0),
+        audio_stream_(nullptr) {}
+
+  virtual ~MockAudioTransportIOS() {}
+
+  // Set default actions of the mock object. We are delegating to fake
+  // implementations (of AudioStreamInterface) here.
+  void HandleCallbacks(rtc::Event* test_is_done,
+                       AudioStreamInterface* audio_stream,
+                       size_t num_callbacks) {
+    test_is_done_ = test_is_done;
+    audio_stream_ = audio_stream;
+    num_callbacks_ = num_callbacks;
+    if (play_mode()) {
+      ON_CALL(*this, NeedMorePlayData(_, _, _, _, _, _, _, _))
+          .WillByDefault(
+              Invoke(this, &MockAudioTransportIOS::RealNeedMorePlayData));
+    }
+    if (rec_mode()) {
+      ON_CALL(*this, RecordedDataIsAvailable(_, _, _, _, _, _, _, _, _, _))
+          .WillByDefault(Invoke(
+              this, &MockAudioTransportIOS::RealRecordedDataIsAvailable));
+    }
+  }
+
+  int32_t RealRecordedDataIsAvailable(const void* audioSamples,
+                                      const size_t nSamples,
+                                      const size_t nBytesPerSample,
+                                      const size_t nChannels,
+                                      const uint32_t samplesPerSec,
+                                      const uint32_t totalDelayMS,
+                                      const int32_t clockDrift,
+                                      const uint32_t currentMicLevel,
+                                      const bool keyPressed,
+                                      uint32_t& newMicLevel) {
+    EXPECT_TRUE(rec_mode()) << "No test is expecting these callbacks.";
+    rec_count_++;
+    // Process the recorded audio stream if an AudioStreamInterface
+    // implementation exists.
+    if (audio_stream_) {
+      audio_stream_->Write(audioSamples, nSamples);
+    }
+    if (ReceivedEnoughCallbacks()) {
+      if (test_is_done_) {
+        test_is_done_->Set();
+      }
+    }
+    return 0;
+  }
+
+  int32_t RealNeedMorePlayData(const size_t nSamples,
+                               const size_t nBytesPerSample,
+                               const size_t nChannels,
+                               const uint32_t samplesPerSec,
+                               void* audioSamples,
+                               size_t& nSamplesOut,
+                               int64_t* elapsed_time_ms,
+                               int64_t* ntp_time_ms) {
+    EXPECT_TRUE(play_mode()) << "No test is expecting these callbacks.";
+    play_count_++;
+    nSamplesOut = nSamples;
+    // Read (possibly processed) audio stream samples to be played out if an
+    // AudioStreamInterface implementation exists.
+    if (audio_stream_) {
+      audio_stream_->Read(audioSamples, nSamples);
+    } else {
+      memset(audioSamples, 0, nSamples * nBytesPerSample);
+    }
+    if (ReceivedEnoughCallbacks()) {
+      if (test_is_done_) {
+        test_is_done_->Set();
+      }
+    }
+    return 0;
+  }
+
+  bool ReceivedEnoughCallbacks() {
+    bool recording_done = false;
+    if (rec_mode())
+      recording_done = rec_count_ >= num_callbacks_;
+    else
+      recording_done = true;
+
+    bool playout_done = false;
+    if (play_mode())
+      playout_done = play_count_ >= num_callbacks_;
+    else
+      playout_done = true;
+
+    return recording_done && playout_done;
+  }
+
+  bool play_mode() const { return type_ & kPlayout; }
+  bool rec_mode() const { return type_ & kRecording; }
+
+ private:
+  rtc::Event* test_is_done_;
+  size_t num_callbacks_;
+  int type_;
+  size_t play_count_;
+  size_t rec_count_;
+  AudioStreamInterface* audio_stream_;
+};
+
+// AudioDeviceTest test fixture.
+class AudioDeviceTest : public ::testing::Test {
+ protected:
+  AudioDeviceTest() {
+    old_sev_ = rtc::LogMessage::GetLogToDebug();
+    // Set suitable logging level here. Change to rtc::LS_INFO for more verbose
+    // output. See webrtc/rtc_base/logging.h for complete list of options.
+    rtc::LogMessage::LogToDebug(rtc::LS_INFO);
+    // Add extra logging fields here (timestamps and thread id).
+    // rtc::LogMessage::LogTimestamps();
+    rtc::LogMessage::LogThreads();
+    // Creates an audio device using a default audio layer.
+    audio_device_ = CreateAudioDevice(AudioDeviceModule::kPlatformDefaultAudio);
+    EXPECT_NE(audio_device_.get(), nullptr);
+    EXPECT_EQ(0, audio_device_->Init());
+    EXPECT_EQ(0,
+              audio_device()->GetPlayoutAudioParameters(&playout_parameters_));
+    EXPECT_EQ(0, audio_device()->GetRecordAudioParameters(&record_parameters_));
+  }
+  virtual ~AudioDeviceTest() {
+    EXPECT_EQ(0, audio_device_->Terminate());
+    rtc::LogMessage::LogToDebug(old_sev_);
+  }
+
+  int playout_sample_rate() const { return playout_parameters_.sample_rate(); }
+  int record_sample_rate() const { return record_parameters_.sample_rate(); }
+  int playout_channels() const { return playout_parameters_.channels(); }
+  int record_channels() const { return record_parameters_.channels(); }
+  size_t playout_frames_per_10ms_buffer() const {
+    return playout_parameters_.frames_per_10ms_buffer();
+  }
+  size_t record_frames_per_10ms_buffer() const {
+    return record_parameters_.frames_per_10ms_buffer();
+  }
+
+  rtc::scoped_refptr<AudioDeviceModule> audio_device() const {
+    return audio_device_;
+  }
+
+  AudioDeviceModuleImpl* audio_device_impl() const {
+    return static_cast<AudioDeviceModuleImpl*>(audio_device_.get());
+  }
+
+  AudioDeviceBuffer* audio_device_buffer() const {
+    return audio_device_impl()->GetAudioDeviceBuffer();
+  }
+
+  rtc::scoped_refptr<AudioDeviceModule> CreateAudioDevice(
+      AudioDeviceModule::AudioLayer audio_layer) {
+    rtc::scoped_refptr<AudioDeviceModule> module(AudioDeviceModule::Create(audio_layer));
+    return module;
+  }
+
+  // Returns file name relative to the resource root given a sample rate.
+  std::string GetFileName(int sample_rate) {
+    EXPECT_TRUE(sample_rate == 48000 || sample_rate == 44100 ||
+                sample_rate == 16000);
+    char fname[64];
+    snprintf(fname, sizeof(fname), "audio_device/audio_short%d",
+             sample_rate / 1000);
+    std::string file_name(webrtc::test::ResourcePath(fname, "pcm"));
+    EXPECT_TRUE(test::FileExists(file_name));
+#ifdef ENABLE_DEBUG_PRINTF
+    PRINTD("file name: %s\n", file_name.c_str());
+    const size_t bytes = test::GetFileSize(file_name);
+    PRINTD("file size: %" PRIuS " [bytes]\n", bytes);
+    PRINTD("file size: %" PRIuS " [samples]\n", bytes / kBytesPerSample);
+    const int seconds =
+        static_cast<int>(bytes / (sample_rate * kBytesPerSample));
+    PRINTD("file size: %d [secs]\n", seconds);
+    PRINTD("file size: %" PRIuS " [callbacks]\n",
+           seconds * kNumCallbacksPerSecond);
+#endif
+    return file_name;
+  }
+
+  void StartPlayout() {
+    EXPECT_FALSE(audio_device()->Playing());
+    EXPECT_EQ(0, audio_device()->InitPlayout());
+    EXPECT_TRUE(audio_device()->PlayoutIsInitialized());
+    EXPECT_EQ(0, audio_device()->StartPlayout());
+    EXPECT_TRUE(audio_device()->Playing());
+  }
+
+  void StopPlayout() {
+    EXPECT_EQ(0, audio_device()->StopPlayout());
+    EXPECT_FALSE(audio_device()->Playing());
+  }
+
+  void StartRecording() {
+    EXPECT_FALSE(audio_device()->Recording());
+    EXPECT_EQ(0, audio_device()->InitRecording());
+    EXPECT_TRUE(audio_device()->RecordingIsInitialized());
+    EXPECT_EQ(0, audio_device()->StartRecording());
+    EXPECT_TRUE(audio_device()->Recording());
+  }
+
+  void StopRecording() {
+    EXPECT_EQ(0, audio_device()->StopRecording());
+    EXPECT_FALSE(audio_device()->Recording());
+  }
+
+  rtc::Event test_is_done_;
+  rtc::scoped_refptr<AudioDeviceModule> audio_device_;
+  AudioParameters playout_parameters_;
+  AudioParameters record_parameters_;
+  rtc::LoggingSeverity old_sev_;
+};
+
+TEST_F(AudioDeviceTest, ConstructDestruct) {
+  // Using the test fixture to create and destruct the audio device module.
+}
+
+TEST_F(AudioDeviceTest, InitTerminate) {
+  // Initialization is part of the test fixture.
+  EXPECT_TRUE(audio_device()->Initialized());
+  EXPECT_EQ(0, audio_device()->Terminate());
+  EXPECT_FALSE(audio_device()->Initialized());
+}
+
+// Tests that playout can be initiated, started and stopped. No audio callback
+// is registered in this test.
+// Failing when running on real iOS devices: bugs.webrtc.org/6889.
+TEST_F(AudioDeviceTest, DISABLED_StartStopPlayout) {
+  StartPlayout();
+  StopPlayout();
+  StartPlayout();
+  StopPlayout();
+}
+
+// Tests that recording can be initiated, started and stopped. No audio callback
+// is registered in this test.
+// Can sometimes fail when running on real devices: bugs.webrtc.org/7888.
+TEST_F(AudioDeviceTest, DISABLED_StartStopRecording) {
+  StartRecording();
+  StopRecording();
+  StartRecording();
+  StopRecording();
+}
+
+// Verify that calling StopPlayout() will leave us in an uninitialized state
+// which will require a new call to InitPlayout(). This test does not call
+// StartPlayout() while being uninitialized since doing so will hit a
+// RTC_DCHECK.
+TEST_F(AudioDeviceTest, StopPlayoutRequiresInitToRestart) {
+  EXPECT_EQ(0, audio_device()->InitPlayout());
+  EXPECT_EQ(0, audio_device()->StartPlayout());
+  EXPECT_EQ(0, audio_device()->StopPlayout());
+  EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
+}
+
+// Verify that we can create two ADMs and start playing on the second ADM.
+// Only the first active instance shall activate an audio session and the
+// last active instance shall deactivate the audio session. The test does not
+// explicitly verify correct audio session calls but instead focuses on
+// ensuring that audio starts for both ADMs.
+
+// Failing when running on real iOS devices: bugs.webrtc.org/6889.
+TEST_F(AudioDeviceTest, DISABLED_StartPlayoutOnTwoInstances) {
+  // Create and initialize a second/extra ADM instance. The default ADM is
+  // created by the test harness.
+  rtc::scoped_refptr<AudioDeviceModule> second_audio_device =
+      CreateAudioDevice(AudioDeviceModule::kPlatformDefaultAudio);
+  EXPECT_NE(second_audio_device.get(), nullptr);
+  EXPECT_EQ(0, second_audio_device->Init());
+
+  // Start playout for the default ADM but don't wait here. Instead use the
+  // upcoming second stream for that. We set the same expectation on number
+  // of callbacks as for the second stream.
+  NiceMock<MockAudioTransportIOS> mock(kPlayout);
+  mock.HandleCallbacks(nullptr, nullptr, 0);
+  EXPECT_CALL(
+      mock, NeedMorePlayData(playout_frames_per_10ms_buffer(), kBytesPerSample,
+                             playout_channels(), playout_sample_rate(),
+                             NotNull(), _, _, _))
+      .Times(AtLeast(kNumCallbacks));
+  EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+  StartPlayout();
+
+  // Initialize playout for the second ADM. If all is OK, the second ADM shall
+  // reuse the audio session activated when the first ADM started playing.
+  // This call will also ensure that we avoid a problem related to initializing
+  // two different audio unit instances back to back (see webrtc:5166 for
+  // details).
+  EXPECT_EQ(0, second_audio_device->InitPlayout());
+  EXPECT_TRUE(second_audio_device->PlayoutIsInitialized());
+
+  // Start playout for the second ADM and verify that it starts as intended.
+  // Passing this test ensures that initialization of the second audio unit
+  // has been done successfully and that there is no conflict with the already
+  // playing first ADM.
+  MockAudioTransportIOS mock2(kPlayout);
+  mock2.HandleCallbacks(&test_is_done_, nullptr, kNumCallbacks);
+  EXPECT_CALL(
+      mock2, NeedMorePlayData(playout_frames_per_10ms_buffer(), kBytesPerSample,
+                              playout_channels(), playout_sample_rate(),
+                              NotNull(), _, _, _))
+      .Times(AtLeast(kNumCallbacks));
+  EXPECT_EQ(0, second_audio_device->RegisterAudioCallback(&mock2));
+  EXPECT_EQ(0, second_audio_device->StartPlayout());
+  EXPECT_TRUE(second_audio_device->Playing());
+  test_is_done_.Wait(kTestTimeOutInMilliseconds);
+  EXPECT_EQ(0, second_audio_device->StopPlayout());
+  EXPECT_FALSE(second_audio_device->Playing());
+  EXPECT_FALSE(second_audio_device->PlayoutIsInitialized());
+
+  EXPECT_EQ(0, second_audio_device->Terminate());
+}
+
+// Start playout and verify that the native audio layer starts asking for real
+// audio samples to play out using the NeedMorePlayData callback.
+TEST_F(AudioDeviceTest, StartPlayoutVerifyCallbacks) {
+  MockAudioTransportIOS mock(kPlayout);
+  mock.HandleCallbacks(&test_is_done_, nullptr, kNumCallbacks);
+  EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_10ms_buffer(),
+                                     kBytesPerSample, playout_channels(),
+                                     playout_sample_rate(), NotNull(), _, _, _))
+      .Times(AtLeast(kNumCallbacks));
+  EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+  StartPlayout();
+  test_is_done_.Wait(kTestTimeOutInMilliseconds);
+  StopPlayout();
+}
+
+// Start recording and verify that the native audio layer starts feeding real
+// audio samples via the RecordedDataIsAvailable callback.
+TEST_F(AudioDeviceTest, StartRecordingVerifyCallbacks) {
+  MockAudioTransportIOS mock(kRecording);
+  mock.HandleCallbacks(&test_is_done_, nullptr, kNumCallbacks);
+  EXPECT_CALL(mock,
+              RecordedDataIsAvailable(
+                  NotNull(), record_frames_per_10ms_buffer(), kBytesPerSample,
+                  record_channels(), record_sample_rate(),
+                  _,  // TODO(henrika): fix delay
+                  0, 0, false, _)).Times(AtLeast(kNumCallbacks));
+
+  EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+  StartRecording();
+  test_is_done_.Wait(kTestTimeOutInMilliseconds);
+  StopRecording();
+}
+
+// Start playout and recording (full-duplex audio) and verify that audio is
+// active in both directions.
+TEST_F(AudioDeviceTest, StartPlayoutAndRecordingVerifyCallbacks) {
+  MockAudioTransportIOS mock(kPlayout | kRecording);
+  mock.HandleCallbacks(&test_is_done_, nullptr, kNumCallbacks);
+  EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_10ms_buffer(),
+                                     kBytesPerSample, playout_channels(),
+                                     playout_sample_rate(), NotNull(), _, _, _))
+      .Times(AtLeast(kNumCallbacks));
+  EXPECT_CALL(mock,
+              RecordedDataIsAvailable(
+                  NotNull(), record_frames_per_10ms_buffer(), kBytesPerSample,
+                  record_channels(), record_sample_rate(),
+                  _,  // TODO(henrika): fix delay
+                  0, 0, false, _)).Times(AtLeast(kNumCallbacks));
+  EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+  StartPlayout();
+  StartRecording();
+  test_is_done_.Wait(kTestTimeOutInMilliseconds);
+  StopRecording();
+  StopPlayout();
+}
+
+// Start playout and read audio from an external PCM file when the audio layer
+// asks for data to play out. Real audio is played out in this test but it does
+// not contain any explicit verification that the audio quality is perfect.
+TEST_F(AudioDeviceTest, RunPlayoutWithFileAsSource) {
+  // TODO(henrika): extend test when mono output is supported.
+  EXPECT_EQ(1, playout_channels());
+  NiceMock<MockAudioTransportIOS> mock(kPlayout);
+  const int num_callbacks = kFilePlayTimeInSec * kNumCallbacksPerSecond;
+  std::string file_name = GetFileName(playout_sample_rate());
+  std::unique_ptr<FileAudioStream> file_audio_stream(
+      new FileAudioStream(num_callbacks, file_name, playout_sample_rate()));
+  mock.HandleCallbacks(&test_is_done_, file_audio_stream.get(), num_callbacks);
+  // SetMaxPlayoutVolume();
+  EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+  StartPlayout();
+  test_is_done_.Wait(kTestTimeOutInMilliseconds);
+  StopPlayout();
+}
+
+TEST_F(AudioDeviceTest, Devices) {
+  // Device enumeration is not supported. Verify fixed values only.
+  EXPECT_EQ(1, audio_device()->PlayoutDevices());
+  EXPECT_EQ(1, audio_device()->RecordingDevices());
+}
+
+// Start playout and recording and store recorded data in an intermediate FIFO
+// buffer from which the playout side then reads its samples in the same order
+// as they were stored. Under ideal circumstances, a callback sequence would
+// look like: ...+-+-+-+-+-+-+-..., where '+' means 'packet recorded' and '-'
+// means 'packet played'. Under such conditions, the FIFO would only contain
+// one packet on average. However, under more realistic conditions, the size
+// of the FIFO will vary more due to an unbalance between the two sides.
+// This test tries to verify that the device maintains a balanced callback-
+// sequence by running in loopback for ten seconds while measuring the size
+// (max and average) of the FIFO. The size of the FIFO is increased by the
+// recording side and decreased by the playout side.
+// TODO(henrika): tune the final test parameters after running tests on several
+// different devices.
+TEST_F(AudioDeviceTest, RunPlayoutAndRecordingInFullDuplex) {
+  EXPECT_EQ(record_channels(), playout_channels());
+  EXPECT_EQ(record_sample_rate(), playout_sample_rate());
+  NiceMock<MockAudioTransportIOS> mock(kPlayout | kRecording);
+  std::unique_ptr<FifoAudioStream> fifo_audio_stream(
+      new FifoAudioStream(playout_frames_per_10ms_buffer()));
+  mock.HandleCallbacks(
+      &test_is_done_, fifo_audio_stream.get(), kFullDuplexTimeInSec * kNumCallbacksPerSecond);
+  // SetMaxPlayoutVolume();
+  EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+  StartRecording();
+  StartPlayout();
+  test_is_done_.Wait(std::max(kTestTimeOutInMilliseconds, 1000 * kFullDuplexTimeInSec));
+  StopPlayout();
+  StopRecording();
+  EXPECT_LE(fifo_audio_stream->average_size(), 10u);
+  EXPECT_LE(fifo_audio_stream->largest_size(), 20u);
+}
+
+// Measures loopback latency and reports the min, max and average values for
+// a full duplex audio session.
+// The latency is measured like so:
+// - Insert impulses periodically on the output side.
+// - Detect the impulses on the input side.
+// - Measure the time difference between the transmit time and receive time.
+// - Store time differences in a vector and calculate min, max and average.
+// This test requires a special hardware called Audio Loopback Dongle.
+// See http://source.android.com/devices/audio/loopback.html for details.
+TEST_F(AudioDeviceTest, DISABLED_MeasureLoopbackLatency) {
+  EXPECT_EQ(record_channels(), playout_channels());
+  EXPECT_EQ(record_sample_rate(), playout_sample_rate());
+  NiceMock<MockAudioTransportIOS> mock(kPlayout | kRecording);
+  std::unique_ptr<LatencyMeasuringAudioStream> latency_audio_stream(
+      new LatencyMeasuringAudioStream(playout_frames_per_10ms_buffer()));
+  mock.HandleCallbacks(&test_is_done_,
+                       latency_audio_stream.get(),
+                       kMeasureLatencyTimeInSec * kNumCallbacksPerSecond);
+  EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+  // SetMaxPlayoutVolume();
+  // DisableBuiltInAECIfAvailable();
+  StartRecording();
+  StartPlayout();
+  test_is_done_.Wait(std::max(kTestTimeOutInMilliseconds, 1000 * kMeasureLatencyTimeInSec));
+  StopPlayout();
+  StopRecording();
+  // Verify that the correct number of transmitted impulses are detected.
+  EXPECT_EQ(latency_audio_stream->num_latency_values(),
+            static_cast<size_t>(
+                kImpulseFrequencyInHz * kMeasureLatencyTimeInSec - 1));
+  latency_audio_stream->PrintResults();
+}
+
+// Verifies that the AudioDeviceIOS is_interrupted_ flag is reset correctly
+// after an iOS AVAudioSessionInterruptionTypeEnded notification event.
+// AudioDeviceIOS listens to RTCAudioSession interrupted notifications by:
+// - In AudioDeviceIOS.InitPlayOrRecord registers its audio_session_observer_
+//   callback with RTCAudioSession's delegate list.
+// - When RTCAudioSession receives an iOS audio interrupted notification, it
+//   passes the notification to callbacks in its delegate list which sets
+//   AudioDeviceIOS's is_interrupted_ flag to true.
+// - When AudioDeviceIOS.ShutdownPlayOrRecord is called, its
+//   audio_session_observer_ callback is removed from RTCAudioSessions's
+//   delegate list.
+//   So if RTCAudioSession receives an iOS end audio interruption notification,
+//   AudioDeviceIOS is not notified as its callback is not in RTCAudioSession's
+//   delegate list. This causes AudioDeviceIOS's is_interrupted_ flag to be in
+//   the wrong (true) state and the audio session will ignore audio changes.
+// As RTCAudioSession keeps its own interrupted state, the fix is to initialize
+// AudioDeviceIOS's is_interrupted_ flag to RTCAudioSession's isInterrupted
+// flag in AudioDeviceIOS.InitPlayOrRecord.
+TEST_F(AudioDeviceTest, testInterruptedAudioSession) {
+  RTCAudioSession *session = [RTCAudioSession sharedInstance];
+  std::unique_ptr<webrtc::AudioDeviceIOS> audio_device;
+  audio_device.reset(new webrtc::AudioDeviceIOS());
+  std::unique_ptr<webrtc::AudioDeviceBuffer> audio_buffer;
+  audio_buffer.reset(new webrtc::AudioDeviceBuffer());
+  audio_device->AttachAudioBuffer(audio_buffer.get());
+  audio_device->Init();
+  audio_device->InitPlayout();
+  // Force interruption.
+  [session notifyDidBeginInterruption];
+
+  // Wait for notification to propagate.
+  rtc::MessageQueueManager::ProcessAllMessageQueuesForTesting();
+  EXPECT_TRUE(audio_device->is_interrupted_);
+
+  // Force it for testing.
+  audio_device->playing_ = false;
+  audio_device->ShutdownPlayOrRecord();
+  // Force it for testing.
+  audio_device->audio_is_initialized_ = false;
+
+  [session notifyDidEndInterruptionWithShouldResumeSession:YES];
+  // Wait for notification to propagate.
+  rtc::MessageQueueManager::ProcessAllMessageQueuesForTesting();
+  EXPECT_TRUE(audio_device->is_interrupted_);
+
+  audio_device->Init();
+  audio_device->InitPlayout();
+  EXPECT_FALSE(audio_device->is_interrupted_);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_device/ios/audio_session_observer.h b/modules/audio_device/ios/audio_session_observer.h
new file mode 100644
index 0000000..c79cdd14
--- /dev/null
+++ b/modules/audio_device/ios/audio_session_observer.h
@@ -0,0 +1,42 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_IOS_AUDIO_SESSION_OBSERVER_H_
+#define MODULES_AUDIO_DEVICE_IOS_AUDIO_SESSION_OBSERVER_H_
+
+#include "rtc_base/async_invoker.h"
+#include "rtc_base/thread.h"
+
+namespace webrtc {
+
+// Observer interface for listening to AVAudioSession events.
+class AudioSessionObserver {
+ public:
+  // Called when audio session interruption begins.
+  virtual void OnInterruptionBegin() = 0;
+
+  // Called when audio session interruption ends.
+  virtual void OnInterruptionEnd() = 0;
+
+  // Called when audio route changes.
+  virtual void OnValidRouteChange() = 0;
+
+  // Called when the ability to play or record changes.
+  virtual void OnCanPlayOrRecordChange(bool can_play_or_record) = 0;
+
+  virtual void OnChangedOutputVolume() = 0;
+
+ protected:
+  virtual ~AudioSessionObserver() {}
+};
+
+}  // namespace webrtc
+
+#endif  //  MODULES_AUDIO_DEVICE_IOS_AUDIO_SESSION_OBSERVER_H_
diff --git a/modules/audio_device/ios/objc/RTCAudioSession.h b/modules/audio_device/ios/objc/RTCAudioSession.h
new file mode 100644
index 0000000..23abc3d
--- /dev/null
+++ b/modules/audio_device/ios/objc/RTCAudioSession.h
@@ -0,0 +1,11 @@
+/*
+ *  Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "sdk/objc/components/audio/RTCAudioSession.h"
diff --git a/modules/audio_device/ios/objc/RTCAudioSessionConfiguration.h b/modules/audio_device/ios/objc/RTCAudioSessionConfiguration.h
new file mode 100644
index 0000000..2584053
--- /dev/null
+++ b/modules/audio_device/ios/objc/RTCAudioSessionConfiguration.h
@@ -0,0 +1,11 @@
+/*
+ *  Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "sdk/objc/components/audio/RTCAudioSessionConfiguration.h"
diff --git a/modules/audio_device/ios/objc/RTCAudioSessionDelegateAdapter.h b/modules/audio_device/ios/objc/RTCAudioSessionDelegateAdapter.h
new file mode 100644
index 0000000..54f4c26
--- /dev/null
+++ b/modules/audio_device/ios/objc/RTCAudioSessionDelegateAdapter.h
@@ -0,0 +1,29 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "sdk/objc/components/audio/RTCAudioSession.h"
+
+namespace webrtc {
+class AudioSessionObserver;
+}
+
+/** Adapter that forwards RTCAudioSessionDelegate calls to the appropriate
+ *  methods on the AudioSessionObserver.
+ */
+@interface RTCAudioSessionDelegateAdapter : NSObject <RTCAudioSessionDelegate>
+
+- (instancetype)init NS_UNAVAILABLE;
+
+/** |observer| is a raw pointer and should be kept alive
+ *  for this object's lifetime.
+ */
+- (instancetype)initWithObserver:(webrtc::AudioSessionObserver *)observer NS_DESIGNATED_INITIALIZER;
+
+@end
diff --git a/modules/audio_device/ios/objc/RTCAudioSessionDelegateAdapter.mm b/modules/audio_device/ios/objc/RTCAudioSessionDelegateAdapter.mm
new file mode 100644
index 0000000..818e077
--- /dev/null
+++ b/modules/audio_device/ios/objc/RTCAudioSessionDelegateAdapter.mm
@@ -0,0 +1,89 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "modules/audio_device/ios/objc/RTCAudioSessionDelegateAdapter.h"
+
+#include "modules/audio_device/ios/audio_session_observer.h"
+
+#import "sdk/objc/base/RTCLogging.h"
+
+@implementation RTCAudioSessionDelegateAdapter {
+  webrtc::AudioSessionObserver *_observer;
+}
+
+- (instancetype)initWithObserver:(webrtc::AudioSessionObserver *)observer {
+  NSParameterAssert(observer);
+  if (self = [super init]) {
+    _observer = observer;
+  }
+  return self;
+}
+
+#pragma mark - RTCAudioSessionDelegate
+
+- (void)audioSessionDidBeginInterruption:(RTCAudioSession *)session {
+  _observer->OnInterruptionBegin();
+}
+
+- (void)audioSessionDidEndInterruption:(RTCAudioSession *)session
+                   shouldResumeSession:(BOOL)shouldResumeSession {
+  _observer->OnInterruptionEnd();
+}
+
+- (void)audioSessionDidChangeRoute:(RTCAudioSession *)session
+           reason:(AVAudioSessionRouteChangeReason)reason
+    previousRoute:(AVAudioSessionRouteDescription *)previousRoute {
+  switch (reason) {
+    case AVAudioSessionRouteChangeReasonUnknown:
+    case AVAudioSessionRouteChangeReasonNewDeviceAvailable:
+    case AVAudioSessionRouteChangeReasonOldDeviceUnavailable:
+    case AVAudioSessionRouteChangeReasonCategoryChange:
+      // It turns out that we see a category change (at least in iOS 9.2)
+      // when making a switch from a BT device to e.g. Speaker using the
+      // iOS Control Center and that we therefore must check if the sample
+      // rate has changed. And if so is the case, restart the audio unit.
+    case AVAudioSessionRouteChangeReasonOverride:
+    case AVAudioSessionRouteChangeReasonWakeFromSleep:
+    case AVAudioSessionRouteChangeReasonNoSuitableRouteForCategory:
+      _observer->OnValidRouteChange();
+      break;
+    case AVAudioSessionRouteChangeReasonRouteConfigurationChange:
+      // The set of input and output ports has not changed, but their
+      // configuration has, e.g., a port’s selected data source has
+      // changed. Ignore this type of route change since we are focusing
+      // on detecting headset changes.
+      RTCLog(@"Ignoring RouteConfigurationChange");
+      break;
+  }
+}
+
+- (void)audioSessionMediaServerTerminated:(RTCAudioSession *)session {
+}
+
+- (void)audioSessionMediaServerReset:(RTCAudioSession *)session {
+}
+
+- (void)audioSession:(RTCAudioSession *)session
+    didChangeCanPlayOrRecord:(BOOL)canPlayOrRecord {
+  _observer->OnCanPlayOrRecordChange(canPlayOrRecord);
+}
+
+- (void)audioSessionDidStartPlayOrRecord:(RTCAudioSession *)session {
+}
+
+- (void)audioSessionDidStopPlayOrRecord:(RTCAudioSession *)session {
+}
+
+- (void)audioSession:(RTCAudioSession *)audioSession
+    didChangeOutputVolume:(float)outputVolume {
+  _observer->OnChangedOutputVolume();
+}
+
+@end
diff --git a/modules/audio_device/ios/voice_processing_audio_unit.h b/modules/audio_device/ios/voice_processing_audio_unit.h
new file mode 100644
index 0000000..3105c12
--- /dev/null
+++ b/modules/audio_device/ios/voice_processing_audio_unit.h
@@ -0,0 +1,137 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_IOS_VOICE_PROCESSING_AUDIO_UNIT_H_
+#define MODULES_AUDIO_DEVICE_IOS_VOICE_PROCESSING_AUDIO_UNIT_H_
+
+#include <AudioUnit/AudioUnit.h>
+
+namespace webrtc {
+
+class VoiceProcessingAudioUnitObserver {
+ public:
+  // Callback function called on a real-time priority I/O thread from the audio
+  // unit. This method is used to signal that recorded audio is available.
+  virtual OSStatus OnDeliverRecordedData(AudioUnitRenderActionFlags* flags,
+                                         const AudioTimeStamp* time_stamp,
+                                         UInt32 bus_number,
+                                         UInt32 num_frames,
+                                         AudioBufferList* io_data) = 0;
+
+  // Callback function called on a real-time priority I/O thread from the audio
+  // unit. This method is used to provide audio samples to the audio unit.
+  virtual OSStatus OnGetPlayoutData(AudioUnitRenderActionFlags* io_action_flags,
+                                    const AudioTimeStamp* time_stamp,
+                                    UInt32 bus_number,
+                                    UInt32 num_frames,
+                                    AudioBufferList* io_data) = 0;
+
+ protected:
+  ~VoiceProcessingAudioUnitObserver() {}
+};
+
+// Convenience class to abstract away the management of a Voice Processing
+// I/O Audio Unit. The Voice Processing I/O unit has the same characteristics
+// as the Remote I/O unit (supports full duplex low-latency audio input and
+// output) and adds AEC for for two-way duplex communication. It also adds AGC,
+// adjustment of voice-processing quality, and muting. Hence, ideal for
+// VoIP applications.
+class VoiceProcessingAudioUnit {
+ public:
+  explicit VoiceProcessingAudioUnit(VoiceProcessingAudioUnitObserver* observer);
+  ~VoiceProcessingAudioUnit();
+
+  // TODO(tkchin): enum for state and state checking.
+  enum State : int32_t {
+    // Init() should be called.
+    kInitRequired,
+    // Audio unit created but not initialized.
+    kUninitialized,
+    // Initialized but not started. Equivalent to stopped.
+    kInitialized,
+    // Initialized and started.
+    kStarted,
+  };
+
+  // Number of bytes per audio sample for 16-bit signed integer representation.
+  static const UInt32 kBytesPerSample;
+
+  // Initializes this class by creating the underlying audio unit instance.
+  // Creates a Voice-Processing I/O unit and configures it for full-duplex
+  // audio. The selected stream format is selected to avoid internal resampling
+  // and to match the 10ms callback rate for WebRTC as well as possible.
+  // Does not intialize the audio unit.
+  bool Init();
+
+  VoiceProcessingAudioUnit::State GetState() const;
+
+  // Initializes the underlying audio unit with the given sample rate.
+  bool Initialize(Float64 sample_rate);
+
+  // Starts the underlying audio unit.
+  bool Start();
+
+  // Stops the underlying audio unit.
+  bool Stop();
+
+  // Uninitializes the underlying audio unit.
+  bool Uninitialize();
+
+  // Calls render on the underlying audio unit.
+  OSStatus Render(AudioUnitRenderActionFlags* flags,
+                  const AudioTimeStamp* time_stamp,
+                  UInt32 output_bus_number,
+                  UInt32 num_frames,
+                  AudioBufferList* io_data);
+
+ private:
+  // The C API used to set callbacks requires static functions. When these are
+  // called, they will invoke the relevant instance method by casting
+  // in_ref_con to VoiceProcessingAudioUnit*.
+  static OSStatus OnGetPlayoutData(void* in_ref_con,
+                                   AudioUnitRenderActionFlags* flags,
+                                   const AudioTimeStamp* time_stamp,
+                                   UInt32 bus_number,
+                                   UInt32 num_frames,
+                                   AudioBufferList* io_data);
+  static OSStatus OnDeliverRecordedData(void* in_ref_con,
+                                        AudioUnitRenderActionFlags* flags,
+                                        const AudioTimeStamp* time_stamp,
+                                        UInt32 bus_number,
+                                        UInt32 num_frames,
+                                        AudioBufferList* io_data);
+
+  // Notifies observer that samples are needed for playback.
+  OSStatus NotifyGetPlayoutData(AudioUnitRenderActionFlags* flags,
+                                const AudioTimeStamp* time_stamp,
+                                UInt32 bus_number,
+                                UInt32 num_frames,
+                                AudioBufferList* io_data);
+  // Notifies observer that recorded samples are available for render.
+  OSStatus NotifyDeliverRecordedData(AudioUnitRenderActionFlags* flags,
+                                     const AudioTimeStamp* time_stamp,
+                                     UInt32 bus_number,
+                                     UInt32 num_frames,
+                                     AudioBufferList* io_data);
+
+  // Returns the predetermined format with a specific sample rate. See
+  // implementation file for details on format.
+  AudioStreamBasicDescription GetFormat(Float64 sample_rate) const;
+
+  // Deletes the underlying audio unit.
+  void DisposeAudioUnit();
+
+  VoiceProcessingAudioUnitObserver* observer_;
+  AudioUnit vpio_unit_;
+  VoiceProcessingAudioUnit::State state_;
+};
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_DEVICE_IOS_VOICE_PROCESSING_AUDIO_UNIT_H_
diff --git a/modules/audio_device/ios/voice_processing_audio_unit.mm b/modules/audio_device/ios/voice_processing_audio_unit.mm
new file mode 100644
index 0000000..41477d1
--- /dev/null
+++ b/modules/audio_device/ios/voice_processing_audio_unit.mm
@@ -0,0 +1,468 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "modules/audio_device/ios/voice_processing_audio_unit.h"
+
+#include "rtc_base/checks.h"
+#include "rtc_base/system/fallthrough.h"
+#include "system_wrappers/include/metrics.h"
+
+#import "sdk/objc/base//RTCLogging.h"
+#import "sdk/objc/components/audio/RTCAudioSessionConfiguration.h"
+
+#if !defined(NDEBUG)
+static void LogStreamDescription(AudioStreamBasicDescription description) {
+  char formatIdString[5];
+  UInt32 formatId = CFSwapInt32HostToBig(description.mFormatID);
+  bcopy(&formatId, formatIdString, 4);
+  formatIdString[4] = '\0';
+  RTCLog(@"AudioStreamBasicDescription: {\n"
+          "  mSampleRate: %.2f\n"
+          "  formatIDString: %s\n"
+          "  mFormatFlags: 0x%X\n"
+          "  mBytesPerPacket: %u\n"
+          "  mFramesPerPacket: %u\n"
+          "  mBytesPerFrame: %u\n"
+          "  mChannelsPerFrame: %u\n"
+          "  mBitsPerChannel: %u\n"
+          "  mReserved: %u\n}",
+         description.mSampleRate, formatIdString,
+         static_cast<unsigned int>(description.mFormatFlags),
+         static_cast<unsigned int>(description.mBytesPerPacket),
+         static_cast<unsigned int>(description.mFramesPerPacket),
+         static_cast<unsigned int>(description.mBytesPerFrame),
+         static_cast<unsigned int>(description.mChannelsPerFrame),
+         static_cast<unsigned int>(description.mBitsPerChannel),
+         static_cast<unsigned int>(description.mReserved));
+}
+#endif
+
+namespace webrtc {
+
+// Calls to AudioUnitInitialize() can fail if called back-to-back on different
+// ADM instances. A fall-back solution is to allow multiple sequential calls
+// with as small delay between each. This factor sets the max number of allowed
+// initialization attempts.
+static const int kMaxNumberOfAudioUnitInitializeAttempts = 5;
+// A VP I/O unit's bus 1 connects to input hardware (microphone).
+static const AudioUnitElement kInputBus = 1;
+// A VP I/O unit's bus 0 connects to output hardware (speaker).
+static const AudioUnitElement kOutputBus = 0;
+
+// Returns the automatic gain control (AGC) state on the processed microphone
+// signal. Should be on by default for Voice Processing audio units.
+static OSStatus GetAGCState(AudioUnit audio_unit, UInt32* enabled) {
+  RTC_DCHECK(audio_unit);
+  UInt32 size = sizeof(*enabled);
+  OSStatus result = AudioUnitGetProperty(audio_unit,
+                                         kAUVoiceIOProperty_VoiceProcessingEnableAGC,
+                                         kAudioUnitScope_Global,
+                                         kInputBus,
+                                         enabled,
+                                         &size);
+  RTCLog(@"VPIO unit AGC: %u", static_cast<unsigned int>(*enabled));
+  return result;
+}
+
+VoiceProcessingAudioUnit::VoiceProcessingAudioUnit(
+    VoiceProcessingAudioUnitObserver* observer)
+    : observer_(observer), vpio_unit_(nullptr), state_(kInitRequired) {
+  RTC_DCHECK(observer);
+}
+
+VoiceProcessingAudioUnit::~VoiceProcessingAudioUnit() {
+  DisposeAudioUnit();
+}
+
+const UInt32 VoiceProcessingAudioUnit::kBytesPerSample = 2;
+
+bool VoiceProcessingAudioUnit::Init() {
+  RTC_DCHECK_EQ(state_, kInitRequired);
+
+  // Create an audio component description to identify the Voice Processing
+  // I/O audio unit.
+  AudioComponentDescription vpio_unit_description;
+  vpio_unit_description.componentType = kAudioUnitType_Output;
+  vpio_unit_description.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
+  vpio_unit_description.componentManufacturer = kAudioUnitManufacturer_Apple;
+  vpio_unit_description.componentFlags = 0;
+  vpio_unit_description.componentFlagsMask = 0;
+
+  // Obtain an audio unit instance given the description.
+  AudioComponent found_vpio_unit_ref =
+      AudioComponentFindNext(nullptr, &vpio_unit_description);
+
+  // Create a Voice Processing IO audio unit.
+  OSStatus result = noErr;
+  result = AudioComponentInstanceNew(found_vpio_unit_ref, &vpio_unit_);
+  if (result != noErr) {
+    vpio_unit_ = nullptr;
+    RTCLogError(@"AudioComponentInstanceNew failed. Error=%ld.", (long)result);
+    return false;
+  }
+
+  // Enable input on the input scope of the input element.
+  UInt32 enable_input = 1;
+  result = AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO,
+                                kAudioUnitScope_Input, kInputBus, &enable_input,
+                                sizeof(enable_input));
+  if (result != noErr) {
+    DisposeAudioUnit();
+    RTCLogError(@"Failed to enable input on input scope of input element. "
+                 "Error=%ld.",
+                (long)result);
+    return false;
+  }
+
+  // Enable output on the output scope of the output element.
+  UInt32 enable_output = 1;
+  result = AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO,
+                                kAudioUnitScope_Output, kOutputBus,
+                                &enable_output, sizeof(enable_output));
+  if (result != noErr) {
+    DisposeAudioUnit();
+    RTCLogError(@"Failed to enable output on output scope of output element. "
+                 "Error=%ld.",
+                (long)result);
+    return false;
+  }
+
+  // Specify the callback function that provides audio samples to the audio
+  // unit.
+  AURenderCallbackStruct render_callback;
+  render_callback.inputProc = OnGetPlayoutData;
+  render_callback.inputProcRefCon = this;
+  result = AudioUnitSetProperty(
+      vpio_unit_, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input,
+      kOutputBus, &render_callback, sizeof(render_callback));
+  if (result != noErr) {
+    DisposeAudioUnit();
+    RTCLogError(@"Failed to specify the render callback on the output bus. "
+                 "Error=%ld.",
+                (long)result);
+    return false;
+  }
+
+  // Disable AU buffer allocation for the recorder, we allocate our own.
+  // TODO(henrika): not sure that it actually saves resource to make this call.
+  UInt32 flag = 0;
+  result = AudioUnitSetProperty(
+      vpio_unit_, kAudioUnitProperty_ShouldAllocateBuffer,
+      kAudioUnitScope_Output, kInputBus, &flag, sizeof(flag));
+  if (result != noErr) {
+    DisposeAudioUnit();
+    RTCLogError(@"Failed to disable buffer allocation on the input bus. "
+                 "Error=%ld.",
+                (long)result);
+    return false;
+  }
+
+  // Specify the callback to be called by the I/O thread to us when input audio
+  // is available. The recorded samples can then be obtained by calling the
+  // AudioUnitRender() method.
+  AURenderCallbackStruct input_callback;
+  input_callback.inputProc = OnDeliverRecordedData;
+  input_callback.inputProcRefCon = this;
+  result = AudioUnitSetProperty(vpio_unit_,
+                                kAudioOutputUnitProperty_SetInputCallback,
+                                kAudioUnitScope_Global, kInputBus,
+                                &input_callback, sizeof(input_callback));
+  if (result != noErr) {
+    DisposeAudioUnit();
+    RTCLogError(@"Failed to specify the input callback on the input bus. "
+                 "Error=%ld.",
+                (long)result);
+    return false;
+  }
+
+  state_ = kUninitialized;
+  return true;
+}
+
+VoiceProcessingAudioUnit::State VoiceProcessingAudioUnit::GetState() const {
+  return state_;
+}
+
+bool VoiceProcessingAudioUnit::Initialize(Float64 sample_rate) {
+  RTC_DCHECK_GE(state_, kUninitialized);
+  RTCLog(@"Initializing audio unit with sample rate: %f", sample_rate);
+
+  OSStatus result = noErr;
+  AudioStreamBasicDescription format = GetFormat(sample_rate);
+  UInt32 size = sizeof(format);
+#if !defined(NDEBUG)
+  LogStreamDescription(format);
+#endif
+
+  // Set the format on the output scope of the input element/bus.
+  result =
+      AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat,
+                           kAudioUnitScope_Output, kInputBus, &format, size);
+  if (result != noErr) {
+    RTCLogError(@"Failed to set format on output scope of input bus. "
+                 "Error=%ld.",
+                (long)result);
+    return false;
+  }
+
+  // Set the format on the input scope of the output element/bus.
+  result =
+      AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat,
+                           kAudioUnitScope_Input, kOutputBus, &format, size);
+  if (result != noErr) {
+    RTCLogError(@"Failed to set format on input scope of output bus. "
+                 "Error=%ld.",
+                (long)result);
+    return false;
+  }
+
+  // Initialize the Voice Processing I/O unit instance.
+  // Calls to AudioUnitInitialize() can fail if called back-to-back on
+  // different ADM instances. The error message in this case is -66635 which is
+  // undocumented. Tests have shown that calling AudioUnitInitialize a second
+  // time, after a short sleep, avoids this issue.
+  // See webrtc:5166 for details.
+  int failed_initalize_attempts = 0;
+  result = AudioUnitInitialize(vpio_unit_);
+  while (result != noErr) {
+    RTCLogError(@"Failed to initialize the Voice Processing I/O unit. "
+                 "Error=%ld.",
+                (long)result);
+    ++failed_initalize_attempts;
+    if (failed_initalize_attempts == kMaxNumberOfAudioUnitInitializeAttempts) {
+      // Max number of initialization attempts exceeded, hence abort.
+      RTCLogError(@"Too many initialization attempts.");
+      return false;
+    }
+    RTCLog(@"Pause 100ms and try audio unit initialization again...");
+    [NSThread sleepForTimeInterval:0.1f];
+    result = AudioUnitInitialize(vpio_unit_);
+  }
+  if (result == noErr) {
+    RTCLog(@"Voice Processing I/O unit is now initialized.");
+  }
+
+  // AGC should be enabled by default for Voice Processing I/O units but it is
+  // checked below and enabled explicitly if needed. This scheme is used
+  // to be absolutely sure that the AGC is enabled since we have seen cases
+  // where only zeros are recorded and a disabled AGC could be one of the
+  // reasons why it happens.
+  int agc_was_enabled_by_default = 0;
+  UInt32 agc_is_enabled = 0;
+  result = GetAGCState(vpio_unit_, &agc_is_enabled);
+  if (result != noErr) {
+    RTCLogError(@"Failed to get AGC state (1st attempt). "
+                 "Error=%ld.",
+                (long)result);
+    // Example of error code: kAudioUnitErr_NoConnection (-10876).
+    // All error codes related to audio units are negative and are therefore
+    // converted into a postive value to match the UMA APIs.
+    RTC_HISTOGRAM_COUNTS_SPARSE_100000(
+        "WebRTC.Audio.GetAGCStateErrorCode1", (-1) * result);
+  } else if (agc_is_enabled) {
+    // Remember that the AGC was enabled by default. Will be used in UMA.
+    agc_was_enabled_by_default = 1;
+  } else {
+    // AGC was initially disabled => try to enable it explicitly.
+    UInt32 enable_agc = 1;
+    result =
+        AudioUnitSetProperty(vpio_unit_,
+                             kAUVoiceIOProperty_VoiceProcessingEnableAGC,
+                             kAudioUnitScope_Global, kInputBus, &enable_agc,
+                             sizeof(enable_agc));
+    if (result != noErr) {
+      RTCLogError(@"Failed to enable the built-in AGC. "
+                   "Error=%ld.",
+                  (long)result);
+      RTC_HISTOGRAM_COUNTS_SPARSE_100000(
+          "WebRTC.Audio.SetAGCStateErrorCode", (-1) * result);
+    }
+    result = GetAGCState(vpio_unit_, &agc_is_enabled);
+    if (result != noErr) {
+      RTCLogError(@"Failed to get AGC state (2nd attempt). "
+                   "Error=%ld.",
+                  (long)result);
+      RTC_HISTOGRAM_COUNTS_SPARSE_100000(
+          "WebRTC.Audio.GetAGCStateErrorCode2", (-1) * result);
+    }
+  }
+
+  // Track if the built-in AGC was enabled by default (as it should) or not.
+  RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.BuiltInAGCWasEnabledByDefault",
+                        agc_was_enabled_by_default);
+  RTCLog(@"WebRTC.Audio.BuiltInAGCWasEnabledByDefault: %d",
+         agc_was_enabled_by_default);
+  // As a final step, add an UMA histogram for tracking the AGC state.
+  // At this stage, the AGC should be enabled, and if it is not, more work is
+  // needed to find out the root cause.
+  RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.BuiltInAGCIsEnabled", agc_is_enabled);
+  RTCLog(@"WebRTC.Audio.BuiltInAGCIsEnabled: %u",
+         static_cast<unsigned int>(agc_is_enabled));
+
+  state_ = kInitialized;
+  return true;
+}
+
+bool VoiceProcessingAudioUnit::Start() {
+  RTC_DCHECK_GE(state_, kUninitialized);
+  RTCLog(@"Starting audio unit.");
+
+  OSStatus result = AudioOutputUnitStart(vpio_unit_);
+  if (result != noErr) {
+    RTCLogError(@"Failed to start audio unit. Error=%ld", (long)result);
+    return false;
+  } else {
+    RTCLog(@"Started audio unit");
+  }
+  state_ = kStarted;
+  return true;
+}
+
+bool VoiceProcessingAudioUnit::Stop() {
+  RTC_DCHECK_GE(state_, kUninitialized);
+  RTCLog(@"Stopping audio unit.");
+
+  OSStatus result = AudioOutputUnitStop(vpio_unit_);
+  if (result != noErr) {
+    RTCLogError(@"Failed to stop audio unit. Error=%ld", (long)result);
+    return false;
+  } else {
+    RTCLog(@"Stopped audio unit");
+  }
+
+  state_ = kInitialized;
+  return true;
+}
+
+bool VoiceProcessingAudioUnit::Uninitialize() {
+  RTC_DCHECK_GE(state_, kUninitialized);
+  RTCLog(@"Unintializing audio unit.");
+
+  OSStatus result = AudioUnitUninitialize(vpio_unit_);
+  if (result != noErr) {
+    RTCLogError(@"Failed to uninitialize audio unit. Error=%ld", (long)result);
+    return false;
+  } else {
+    RTCLog(@"Uninitialized audio unit.");
+  }
+
+  state_ = kUninitialized;
+  return true;
+}
+
+OSStatus VoiceProcessingAudioUnit::Render(AudioUnitRenderActionFlags* flags,
+                                          const AudioTimeStamp* time_stamp,
+                                          UInt32 output_bus_number,
+                                          UInt32 num_frames,
+                                          AudioBufferList* io_data) {
+  RTC_DCHECK(vpio_unit_) << "Init() not called.";
+
+  OSStatus result = AudioUnitRender(vpio_unit_, flags, time_stamp,
+                                    output_bus_number, num_frames, io_data);
+  if (result != noErr) {
+    RTCLogError(@"Failed to render audio unit. Error=%ld", (long)result);
+  }
+  return result;
+}
+
+OSStatus VoiceProcessingAudioUnit::OnGetPlayoutData(
+    void* in_ref_con,
+    AudioUnitRenderActionFlags* flags,
+    const AudioTimeStamp* time_stamp,
+    UInt32 bus_number,
+    UInt32 num_frames,
+    AudioBufferList* io_data) {
+  VoiceProcessingAudioUnit* audio_unit =
+      static_cast<VoiceProcessingAudioUnit*>(in_ref_con);
+  return audio_unit->NotifyGetPlayoutData(flags, time_stamp, bus_number,
+                                          num_frames, io_data);
+}
+
+OSStatus VoiceProcessingAudioUnit::OnDeliverRecordedData(
+    void* in_ref_con,
+    AudioUnitRenderActionFlags* flags,
+    const AudioTimeStamp* time_stamp,
+    UInt32 bus_number,
+    UInt32 num_frames,
+    AudioBufferList* io_data) {
+  VoiceProcessingAudioUnit* audio_unit =
+      static_cast<VoiceProcessingAudioUnit*>(in_ref_con);
+  return audio_unit->NotifyDeliverRecordedData(flags, time_stamp, bus_number,
+                                               num_frames, io_data);
+}
+
+OSStatus VoiceProcessingAudioUnit::NotifyGetPlayoutData(
+    AudioUnitRenderActionFlags* flags,
+    const AudioTimeStamp* time_stamp,
+    UInt32 bus_number,
+    UInt32 num_frames,
+    AudioBufferList* io_data) {
+  return observer_->OnGetPlayoutData(flags, time_stamp, bus_number, num_frames,
+                                     io_data);
+}
+
+OSStatus VoiceProcessingAudioUnit::NotifyDeliverRecordedData(
+    AudioUnitRenderActionFlags* flags,
+    const AudioTimeStamp* time_stamp,
+    UInt32 bus_number,
+    UInt32 num_frames,
+    AudioBufferList* io_data) {
+  return observer_->OnDeliverRecordedData(flags, time_stamp, bus_number,
+                                          num_frames, io_data);
+}
+
+AudioStreamBasicDescription VoiceProcessingAudioUnit::GetFormat(
+    Float64 sample_rate) const {
+  // Set the application formats for input and output:
+  // - use same format in both directions
+  // - avoid resampling in the I/O unit by using the hardware sample rate
+  // - linear PCM => noncompressed audio data format with one frame per packet
+  // - no need to specify interleaving since only mono is supported
+  AudioStreamBasicDescription format;
+  RTC_DCHECK_EQ(1, kRTCAudioSessionPreferredNumberOfChannels);
+  format.mSampleRate = sample_rate;
+  format.mFormatID = kAudioFormatLinearPCM;
+  format.mFormatFlags =
+      kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
+  format.mBytesPerPacket = kBytesPerSample;
+  format.mFramesPerPacket = 1;  // uncompressed.
+  format.mBytesPerFrame = kBytesPerSample;
+  format.mChannelsPerFrame = kRTCAudioSessionPreferredNumberOfChannels;
+  format.mBitsPerChannel = 8 * kBytesPerSample;
+  return format;
+}
+
+void VoiceProcessingAudioUnit::DisposeAudioUnit() {
+  if (vpio_unit_) {
+    switch (state_) {
+      case kStarted:
+        Stop();
+        // Fall through.
+        RTC_FALLTHROUGH();
+      case kInitialized:
+        Uninitialize();
+        break;
+      case kUninitialized:
+        RTC_FALLTHROUGH();
+      case kInitRequired:
+        break;
+    }
+
+    RTCLog(@"Disposing audio unit.");
+    OSStatus result = AudioComponentInstanceDispose(vpio_unit_);
+    if (result != noErr) {
+      RTCLogError(@"AudioComponentInstanceDispose failed. Error=%ld.",
+                  (long)result);
+    }
+    vpio_unit_ = nullptr;
+  }
+}
+
+}  // namespace webrtc