ObjC ADM: record/play implementation via RTCAudioDevice [3/3]

# Overview
This CL chain exposes new API from ObjC WebRTC SDK to inject custom
means to play and record audio. The goal of CLs is achieved by having
additional implementation of `webrtc::AudioDeviceModule`
called `ObjCAudioDeviceModule`. The feature
of `ObjCAudioDeviceModule` is that it does not directly use any
of OS-provided audio APIs like AudioUnit, AVAudioEngine, AudioQueue,
AVCaptureSession etc. Instead it delegates communication with specific
system audio API to user-injectable audio device instance which
implements `RTCAudioDevice` protocol.
`RTCAudioDevice` is new API added to ObC WebRTC SDK in the CL chain.

# AudioDeviceBuffer
`ObjCAudioDeviceModule` does conform to heavy `AudioDeviceModule`
interface providing stubs for unrelated methods. It also implements
common low-level management of audio device buffer, which glues audio
PCM flow to/from WebRTC.
`ObjCAudioDeviceModule` owns single `webrtc::AudioDeviceBuffer` which
with the help of two `FineAudioBuffer` (one for recording and one for
playout) is exchanged audio PCMs with user-provided `RTCAudioDevice`
instance.
`webrtc::AudioDeviceBuffer` is configured to work with specific audio:
it has to know sample rate and channels count of audio being played and
recorded. These formats could be different between playout and
recording. `ObjCAudioDeviceModule` stores current audio  parameters
applied  to `webrtc::AudioDeviceBuffer` as fields of
type `webrtc::AudioParameters`. `RTCAudioDevice` has it's own variable
audio parameters like sample rate, channels  count and IO buffer
duration. The audio parameters of `RTCAudioDevice` must be kept in sync
with audio parameters applied to `webrtc::AudioDeviceBuffer`, otherwise
audio playout and recording will be corrupted: audio is sent only
partially over the wire and/or audio is played with artifacts.
`ObjCAudioDeviceModule` reads current `RTCAudioDevice` audio parameters
when playout or recording is initialized. Whenever `RTCAudioDevice`
audio parameters parameters are changed, there must be a notification to
`ObjCAudioDeviceModule` to allow it to reconfigure
it's `webrtc::AudioDeviceBuffer`. The notification is performed
via `RTCAudioDeviceDelegate` object, which is provided
by `ObjCAudioDeviceModule` during initialization of `RTCAudioDevice`.

# Threading
`ObjCAudioDeviceModule` is stick to same thread between initialization
and termination. The only exception is two IO functions invoked by SDK
user code presumably from real-time audio IO thread.
Implementation of `RTCAudioDevice` may rely on the fact that all the
methods of `RTCAudioDevice` are called on the same thread between
initialization and termination. `ObjCAudioDeviceModule` is also expect
that the implementation of `RTCAudioDevice` will call methods related
to notification of audio parameters changes and audio interruption are
invoked on `ObjCAudioDeviceModule` thread. To facilitate this
requirement `RTCAudioDeviceDelegate` provides two functions to execute
sync and async block on `ObjCAudioDeviceModule` thread.
Async block could be useful when handling audio session notifications to
dispatch whole block re-configuring audio objects used
by `RTCAudioDevice` implementation.
Sync block could be used to make sure changes to audio parameters
of ADB owned by `ObjCAudioDeviceModule` are notified, before interrupted
playout/recording restarted.

Bug: webrtc:14193
Change-Id: I5587ec6bbee3cf02bad70dd59b822feb0ada7f86
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/269006
Reviewed-by: Henrik Andreasson <henrika@google.com>
Commit-Queue: Yury Yarashevich <yura.yaroshevich@gmail.com>
Reviewed-by: Peter Hanspers <peterhanspers@webrtc.org>
Reviewed-by: Henrik Andreassson <henrika@webrtc.org>
Reviewed-by: Tomas Gunnarsson <tommi@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#37928}
diff --git a/sdk/BUILD.gn b/sdk/BUILD.gn
index 03e5013..a30c6bc 100644
--- a/sdk/BUILD.gn
+++ b/sdk/BUILD.gn
@@ -406,22 +406,37 @@
 
     rtc_library("audio_device_objc") {
       visibility = [ "*" ]
-
+      allow_poison = [ "default_task_queue" ]
       sources = [
         "objc/native/src/objc_audio_device.h",
         "objc/native/src/objc_audio_device.mm",
+        "objc/native/src/objc_audio_device_delegate.h",
+        "objc/native/src/objc_audio_device_delegate.mm",
       ]
 
       deps = [
         ":audio_device_api_objc",
+        "../api:array_view",
+        "../api:make_ref_counted",
+        "../api:refcountedbase",
+        "../api:scoped_refptr",
+        "../api:sequence_checker",
+        "../api/task_queue",
+        "../api/task_queue:default_task_queue_factory",
         "../modules/audio_device:audio_device_api",
+        "../modules/audio_device:audio_device_buffer",
+        "../rtc_base:buffer",
+        "../rtc_base:checks",
         "../rtc_base:logging",
+        "../rtc_base:safe_minmax",
+        "../rtc_base:threading",
+        "../rtc_base:timeutils",
       ]
     }
 
     rtc_library("objc_audio_device_module") {
       visibility = [ "*" ]
-
+      allow_poison = [ "default_task_queue" ]
       sources = [
         "objc/native/api/objc_audio_device_module.h",
         "objc/native/api/objc_audio_device_module.mm",
diff --git a/sdk/objc/native/src/objc_audio_device.h b/sdk/objc/native/src/objc_audio_device.h
index 7700855..fcfe7a6 100644
--- a/sdk/objc/native/src/objc_audio_device.h
+++ b/sdk/objc/native/src/objc_audio_device.h
@@ -11,17 +11,25 @@
 #ifndef SDK_OBJC_NATIVE_SRC_OBJC_AUDIO_DEVICE_H_
 #define SDK_OBJC_NATIVE_SRC_OBJC_AUDIO_DEVICE_H_
 
+#include <memory>
+
 #import "components/audio/RTCAudioDevice.h"
+
+#include "modules/audio_device/audio_device_buffer.h"
 #include "modules/audio_device/include/audio_device.h"
+#include "rtc_base/thread.h"
+
+@class ObjCAudioDeviceDelegate;
 
 namespace webrtc {
 
+class FineAudioBuffer;
+
 namespace objc_adm {
 
 class ObjCAudioDeviceModule : public AudioDeviceModule {
  public:
-  explicit ObjCAudioDeviceModule(
-      id<RTC_OBJC_TYPE(RTCAudioDevice)> audio_device);
+  explicit ObjCAudioDeviceModule(id<RTC_OBJC_TYPE(RTCAudioDevice)> audio_device);
   ~ObjCAudioDeviceModule() override;
 
   // Retrieve the currently utilized audio layer
@@ -126,8 +134,140 @@
   int GetRecordAudioParameters(AudioParameters* params) const override;
 #endif  // WEBRTC_IOS
 
+ public:
+  OSStatus OnDeliverRecordedData(AudioUnitRenderActionFlags* flags,
+                                 const AudioTimeStamp* time_stamp,
+                                 NSInteger bus_number,
+                                 UInt32 num_frames,
+                                 const AudioBufferList* io_data,
+                                 void* render_context,
+                                 RTC_OBJC_TYPE(RTCAudioDeviceRenderRecordedDataBlock) render_block);
+
+  OSStatus OnGetPlayoutData(AudioUnitRenderActionFlags* flags,
+                            const AudioTimeStamp* time_stamp,
+                            NSInteger bus_number,
+                            UInt32 num_frames,
+                            AudioBufferList* io_data);
+
+  // Notifies `ObjCAudioDeviceModule` that at least one of the audio input
+  // parameters or audio input latency of `RTCAudioDevice` has changed. It necessary to
+  // update `record_parameters_` with current audio parameter of `RTCAudioDevice`
+  // via `UpdateAudioParameters` and if parameters are actually change then
+  // ADB parameters are updated with `UpdateInputAudioDeviceBuffer`. Audio input latency
+  // stored in `cached_recording_delay_ms_` is also updated with current latency
+  // of `RTCAudioDevice`.
+  void HandleAudioInputParametersChange();
+
+  // Same as `HandleAudioInputParametersChange` but should be called when audio output
+  // parameters of `RTCAudioDevice` has changed.
+  void HandleAudioOutputParametersChange();
+
+  // Notifies `ObjCAudioDeviceModule` about audio input interruption happen due to
+  // any reason so `ObjCAudioDeviceModule` is can prepare to restart of audio IO.
+  void HandleAudioInputInterrupted();
+
+  // Same as `ObjCAudioDeviceModule` but should be called when audio output
+  // is interrupted.
+  void HandleAudioOutputInterrupted();
+
+ private:
+  // Update our audio parameters if they are different from current device audio parameters
+  // Returns true when our parameters are update, false - otherwise.
+  // `ObjCAudioDeviceModule` has audio device buffer (ADB) which has audio parameters
+  // of playout & recording. The ADB is configured to work with specific sample rate & channel
+  // count. `ObjCAudioDeviceModule` stores audio parameters which were used to configure ADB in the
+  // fields `playout_parameters_` and `recording_parameters_`.
+  // `RTCAudioDevice` protocol has its own audio parameters exposed as individual properties.
+  // `RTCAudioDevice` audio parameters might change when playout/recording is already in progress,
+  // for example, when device is switched. `RTCAudioDevice` audio parameters must be kept in sync
+  // with ADB audio parameters. This method is invoked when `RTCAudioDevice` reports that it's audio
+  // parameters (`device_params`) are changed and it detects if there any difference with our
+  // current audio parameters (`params`). Our parameters are updated in case of actual change and
+  // method returns true. In case of actual change there is follow-up call to either
+  // `UpdateOutputAudioDeviceBuffer` or `UpdateInputAudioDeviceBuffer` to apply updated
+  // `playout_parameters_` or `recording_parameters_` to ADB.
+
+  bool UpdateAudioParameters(AudioParameters& params, const AudioParameters& device_params);
+
+  // Update our cached audio latency with device latency. Device latency is reported by
+  // `RTCAudioDevice` object. Whenever latency is changed, `RTCAudioDevice` is obliged to notify ADM
+  // about the change via `HandleAudioInputParametersChange` or `HandleAudioOutputParametersChange`.
+  // Current device IO latency is cached in the atomic field and used from audio IO thread
+  // to be reported to audio device buffer. It is highly recommended by Apple not to call any
+  // ObjC methods from audio IO thread, that is why implementation relies on caching latency
+  // into a field and being notified when latency is changed, which is the case when device
+  // is switched.
+  void UpdateAudioDelay(std::atomic<int>& delay_ms, const NSTimeInterval device_latency);
+
+  // Uses current `playout_parameters_` to inform the audio device buffer (ADB)
+  // about our internal audio parameters.
+  void UpdateOutputAudioDeviceBuffer();
+
+  // Uses current `record_parameters_` to inform the audio device buffer (ADB)
+  // about our internal audio parameters.
+  void UpdateInputAudioDeviceBuffer();
+
  private:
   id<RTC_OBJC_TYPE(RTCAudioDevice)> audio_device_;
+
+  const std::unique_ptr<TaskQueueFactory> task_queue_factory_;
+
+  // AudioDeviceBuffer is a buffer to consume audio recorded by `RTCAudioDevice`
+  // and provide audio to be played via `RTCAudioDevice`.
+  // Audio PCMs could have different sample rate and channels count, but expected
+  // to be in 16-bit integer interleaved linear PCM format.
+  // The current parameters ADB configured to work with is stored in field
+  // `playout_parameters_` for playout and `record_parameters_` for recording.
+  // These parameters and ADB must kept in sync with `RTCAudioDevice` audio parameters.
+  std::unique_ptr<AudioDeviceBuffer> audio_device_buffer_;
+
+  // Set to 1 when recording is active and 0 otherwise.
+  std::atomic<bool> recording_ = false;
+
+  // Set to 1 when playout is active and 0 otherwise.
+  std::atomic<bool> playing_ = false;
+
+  // Stores cached value of `RTCAudioDevice outputLatency` to be used from
+  // audio IO thread. Latency is updated on audio output parameters change.
+  std::atomic<int> cached_playout_delay_ms_ = 0;
+
+  // Same as `cached_playout_delay_ms_` but for audio input
+  std::atomic<int> cached_recording_delay_ms_ = 0;
+
+  // Thread that is initialized audio device module.
+  rtc::Thread* thread_;
+
+  // Ensures that methods are called from the same thread as this object is
+  // initialized on.
+  SequenceChecker thread_checker_;
+
+  // I/O audio thread checker.
+  SequenceChecker io_playout_thread_checker_;
+  SequenceChecker io_record_thread_checker_;
+
+  bool is_initialized_ RTC_GUARDED_BY(thread_checker_) = false;
+  bool is_playout_initialized_ RTC_GUARDED_BY(thread_checker_) = false;
+  bool is_recording_initialized_ RTC_GUARDED_BY(thread_checker_) = false;
+
+  // Contains audio parameters (sample rate, #channels, buffer size etc.) for
+  // the playout and recording sides.
+  AudioParameters playout_parameters_;
+  AudioParameters record_parameters_;
+
+  // `FineAudioBuffer` takes an `AudioDeviceBuffer` which delivers audio data
+  // in chunks of 10ms. `RTCAudioDevice` might deliver recorded data in
+  // chunks which are not 10ms long. `FineAudioBuffer` implements adaptation
+  // from undetermined chunk size to 10ms chunks.
+  std::unique_ptr<FineAudioBuffer> record_fine_audio_buffer_;
+
+  // Same as `record_fine_audio_buffer_` but for audio output.
+  std::unique_ptr<FineAudioBuffer> playout_fine_audio_buffer_;
+
+  // Temporary storage for recorded data.
+  rtc::BufferT<int16_t> record_audio_buffer_;
+
+  // Delegate object provided to RTCAudioDevice during initialization
+  ObjCAudioDeviceDelegate* audio_device_delegate_;
 };
 
 }  // namespace objc_adm
diff --git a/sdk/objc/native/src/objc_audio_device.mm b/sdk/objc/native/src/objc_audio_device.mm
index 1f8a5f8..273cd75 100644
--- a/sdk/objc/native/src/objc_audio_device.mm
+++ b/sdk/objc/native/src/objc_audio_device.mm
@@ -9,16 +9,46 @@
  */
 
 #include "objc_audio_device.h"
+#include "objc_audio_device_delegate.h"
 
+#import "components/audio/RTCAudioDevice.h"
+#include "modules/audio_device/fine_audio_buffer.h"
+
+#include "api/task_queue/default_task_queue_factory.h"
 #include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_minmax.h"
+#include "rtc_base/time_utils.h"
+
+namespace {
+
+webrtc::AudioParameters RecordParameters(id<RTCAudioDevice> audio_device) {
+  const double sample_rate = static_cast<int>([audio_device inputSampleRate]);
+  const size_t channels = static_cast<size_t>([audio_device inputNumberOfChannels]);
+  const size_t frames_per_buffer =
+      static_cast<size_t>(sample_rate * [audio_device inputIOBufferDuration] + .5);
+  return webrtc::AudioParameters(sample_rate, channels, frames_per_buffer);
+}
+
+webrtc::AudioParameters PlayoutParameters(id<RTCAudioDevice> audio_device) {
+  const double sample_rate = static_cast<int>([audio_device outputSampleRate]);
+  const size_t channels = static_cast<size_t>([audio_device outputNumberOfChannels]);
+  const size_t frames_per_buffer =
+      static_cast<size_t>(sample_rate * [audio_device outputIOBufferDuration] + .5);
+  return webrtc::AudioParameters(sample_rate, channels, frames_per_buffer);
+}
+
+}  // namespace
 
 namespace webrtc {
 namespace objc_adm {
 
 ObjCAudioDeviceModule::ObjCAudioDeviceModule(id<RTC_OBJC_TYPE(RTCAudioDevice)> audio_device)
-    : audio_device_(audio_device) {
+    : audio_device_(audio_device), task_queue_factory_(CreateDefaultTaskQueueFactory()) {
   RTC_DLOG_F(LS_VERBOSE) << "";
   RTC_DCHECK(audio_device_);
+  thread_checker_.Detach();
+  io_playout_thread_checker_.Detach();
+  io_record_thread_checker_.Detach();
 }
 
 ObjCAudioDeviceModule::~ObjCAudioDeviceModule() {
@@ -26,105 +56,475 @@
 }
 
 int32_t ObjCAudioDeviceModule::RegisterAudioCallback(AudioTransport* audioCallback) {
-  RTC_DLOG_F(LS_VERBOSE) << "Not yet implemented";
-  return -1;
+  RTC_DLOG_F(LS_VERBOSE) << "";
+  RTC_DCHECK(audio_device_buffer_);
+  return audio_device_buffer_->RegisterAudioCallback(audioCallback);
 }
 
 int32_t ObjCAudioDeviceModule::Init() {
-  RTC_DLOG_F(LS_VERBOSE) << "Not yet implemented";
-  return -1;
+  RTC_DLOG_F(LS_VERBOSE) << "";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+
+  if (Initialized()) {
+    RTC_LOG_F(LS_INFO) << "Already initialized";
+    return 0;
+  }
+  io_playout_thread_checker_.Detach();
+  io_record_thread_checker_.Detach();
+
+  thread_ = rtc::Thread::Current();
+  audio_device_buffer_.reset(new webrtc::AudioDeviceBuffer(task_queue_factory_.get()));
+
+  if (![audio_device_ isInitialized]) {
+    if (audio_device_delegate_ == nil) {
+      audio_device_delegate_ =
+          [[ObjCAudioDeviceDelegate alloc] initWithAudioDeviceModule:rtc::scoped_refptr(this)
+                                                   audioDeviceThread:thread_];
+    }
+
+    if (![audio_device_ initializeWithDelegate:audio_device_delegate_]) {
+      RTC_LOG_F(LS_WARNING) << "Failed to initialize audio device";
+      [audio_device_delegate_ resetAudioDeviceModule];
+      audio_device_delegate_ = nil;
+      return -1;
+    }
+  }
+
+  playout_parameters_.reset([audio_device_delegate_ preferredOutputSampleRate], 1);
+  UpdateOutputAudioDeviceBuffer();
+
+  record_parameters_.reset([audio_device_delegate_ preferredInputSampleRate], 1);
+  UpdateInputAudioDeviceBuffer();
+
+  is_initialized_ = true;
+
+  RTC_LOG_F(LS_INFO) << "Did initialize";
+  return 0;
 }
 
 int32_t ObjCAudioDeviceModule::Terminate() {
-  RTC_DLOG_F(LS_VERBOSE) << "Not yet implemented";
-  return -1;
+  RTC_DLOG_F(LS_VERBOSE) << "";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+
+  if (!Initialized()) {
+    RTC_LOG_F(LS_INFO) << "Not initialized";
+    return 0;
+  }
+
+  if ([audio_device_ isInitialized]) {
+    if (![audio_device_ terminate]) {
+      RTC_LOG_F(LS_ERROR) << "Failed to terminate audio device";
+      return -1;
+    }
+  }
+
+  if (audio_device_delegate_ != nil) {
+    [audio_device_delegate_ resetAudioDeviceModule];
+    audio_device_delegate_ = nil;
+  }
+
+  is_initialized_ = false;
+  is_playout_initialized_ = false;
+  is_recording_initialized_ = false;
+  thread_ = nullptr;
+
+  RTC_LOG_F(LS_INFO) << "Did terminate";
+  return 0;
 }
 
 bool ObjCAudioDeviceModule::Initialized() const {
-  RTC_DLOG_F(LS_VERBOSE) << "Not yet implemented";
-  return false;
+  RTC_DLOG_F(LS_VERBOSE) << "";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  return is_initialized_ && [audio_device_ isInitialized];
 }
 
 int32_t ObjCAudioDeviceModule::PlayoutIsAvailable(bool* available) {
-  RTC_DLOG_F(LS_VERBOSE) << "Not yet implemented";
-  return -1;
+  RTC_DLOG_F(LS_VERBOSE) << "";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  *available = Initialized();
+  return 0;
 }
 
 bool ObjCAudioDeviceModule::PlayoutIsInitialized() const {
-  RTC_DLOG_F(LS_VERBOSE) << "Not yet implemented";
-  return false;
+  RTC_DLOG_F(LS_VERBOSE) << "";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  return Initialized() && is_playout_initialized_ && [audio_device_ isPlayoutInitialized];
 }
 
 int32_t ObjCAudioDeviceModule::InitPlayout() {
-  RTC_DLOG_F(LS_VERBOSE) << "Not yet implemented";
-  return -1;
+  RTC_DLOG_F(LS_VERBOSE) << "";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  if (!Initialized()) {
+    return -1;
+  }
+  if (PlayoutIsInitialized()) {
+    return 0;
+  }
+  RTC_DCHECK(!playing_.load());
+
+  if (![audio_device_ isPlayoutInitialized]) {
+    if (![audio_device_ initializePlayout]) {
+      RTC_LOG_F(LS_ERROR) << "Failed to initialize audio device playout";
+      return -1;
+    }
+  }
+
+  if (UpdateAudioParameters(playout_parameters_, PlayoutParameters(audio_device_))) {
+    UpdateOutputAudioDeviceBuffer();
+  }
+
+  is_playout_initialized_ = true;
+  RTC_LOG_F(LS_INFO) << "Did initialize playout";
+  return 0;
 }
 
 bool ObjCAudioDeviceModule::Playing() const {
-  RTC_DLOG_F(LS_VERBOSE) << "Not yet implemented";
-  return false;
+  RTC_DLOG_F(LS_VERBOSE) << "";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  return playing_.load() && [audio_device_ isPlaying];
 }
 
 int32_t ObjCAudioDeviceModule::StartPlayout() {
-  RTC_DLOG_F(LS_VERBOSE) << "Not yet implemented";
-  return -1;
+  RTC_DLOG_F(LS_VERBOSE) << "";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  if (!PlayoutIsInitialized()) {
+    return -1;
+  }
+  if (Playing()) {
+    return 0;
+  }
+
+  audio_device_buffer_->StartPlayout();
+  if (playout_fine_audio_buffer_) {
+    playout_fine_audio_buffer_->ResetPlayout();
+  }
+  if (![audio_device_ startPlayout]) {
+    RTC_LOG_F(LS_ERROR) << "Failed to start audio device playout";
+    return -1;
+  }
+  playing_.store(true, std::memory_order_release);
+  RTC_LOG_F(LS_INFO) << "Did start playout";
+  return 0;
 }
 
 int32_t ObjCAudioDeviceModule::StopPlayout() {
-  RTC_DLOG_F(LS_VERBOSE) << "Not yet implemented";
-  return -1;
+  RTC_DLOG_F(LS_VERBOSE) << "";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+
+  if (![audio_device_ stopPlayout]) {
+    RTC_LOG_F(LS_WARNING) << "Failed to stop playout";
+    return -1;
+  }
+
+  audio_device_buffer_->StopPlayout();
+  playing_.store(false, std::memory_order_release);
+  RTC_LOG_F(LS_INFO) << "Did stop playout";
+  return 0;
 }
 
 int32_t ObjCAudioDeviceModule::PlayoutDelay(uint16_t* delayMS) const {
-  RTC_DLOG_F(LS_VERBOSE) << "Not yet implemented";
-  return -1;
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  *delayMS = static_cast<uint16_t>(rtc::SafeClamp<int>(
+      cached_playout_delay_ms_.load(), 0, std::numeric_limits<uint16_t>::max()));
+  return 0;
 }
 
 int32_t ObjCAudioDeviceModule::RecordingIsAvailable(bool* available) {
-  RTC_DLOG_F(LS_VERBOSE) << "Not yet implemented";
-  return -1;
+  RTC_DLOG_F(LS_VERBOSE) << "";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  *available = Initialized();
+  return 0;
 }
 
 bool ObjCAudioDeviceModule::RecordingIsInitialized() const {
-  RTC_DLOG_F(LS_VERBOSE) << "Not yet implemented";
-  return false;
+  RTC_DLOG_F(LS_VERBOSE) << "";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  return Initialized() && is_recording_initialized_ && [audio_device_ isRecordingInitialized];
 }
 
 int32_t ObjCAudioDeviceModule::InitRecording() {
-  RTC_DLOG_F(LS_VERBOSE) << "Not yet implemented";
-  return -1;
+  RTC_DLOG_F(LS_VERBOSE) << "";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  if (!Initialized()) {
+    return -1;
+  }
+  if (RecordingIsInitialized()) {
+    return 0;
+  }
+  RTC_DCHECK(!recording_.load());
+
+  if (![audio_device_ isRecordingInitialized]) {
+    if (![audio_device_ initializeRecording]) {
+      RTC_LOG_F(LS_ERROR) << "Failed to initialize audio device recording";
+      return -1;
+    }
+  }
+
+  if (UpdateAudioParameters(record_parameters_, RecordParameters(audio_device_))) {
+    UpdateInputAudioDeviceBuffer();
+  }
+
+  is_recording_initialized_ = true;
+  RTC_LOG_F(LS_INFO) << "Did initialize recording";
+  return 0;
 }
 
 bool ObjCAudioDeviceModule::Recording() const {
-  RTC_DLOG_F(LS_VERBOSE) << "Not yet implemented";
-  return false;
+  RTC_DLOG_F(LS_VERBOSE) << "";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  return recording_.load() && [audio_device_ isRecording];
 }
 
 int32_t ObjCAudioDeviceModule::StartRecording() {
-  RTC_DLOG_F(LS_VERBOSE) << "Not yet implemented";
-  return -1;
+  RTC_DLOG_F(LS_VERBOSE) << "";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  if (!RecordingIsInitialized()) {
+    return -1;
+  }
+  if (Recording()) {
+    return 0;
+  }
+
+  audio_device_buffer_->StartRecording();
+  if (record_fine_audio_buffer_) {
+    record_fine_audio_buffer_->ResetRecord();
+  }
+
+  if (![audio_device_ startRecording]) {
+    RTC_LOG_F(LS_ERROR) << "Failed to start audio device recording";
+    return -1;
+  }
+  recording_.store(true, std::memory_order_release);
+  RTC_LOG_F(LS_INFO) << "Did start recording";
+  return 0;
 }
 
 int32_t ObjCAudioDeviceModule::StopRecording() {
-  RTC_DLOG_F(LS_VERBOSE) << "Not yet implemented";
-  return -1;
+  RTC_DLOG_F(LS_VERBOSE) << "";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+
+  if (![audio_device_ stopRecording]) {
+    RTC_LOG_F(LS_WARNING) << "Failed to stop recording";
+    return -1;
+  }
+  audio_device_buffer_->StopRecording();
+  recording_.store(false, std::memory_order_release);
+  RTC_LOG_F(LS_INFO) << "Did stop recording";
+  return 0;
 }
 
 #if defined(WEBRTC_IOS)
 
 int ObjCAudioDeviceModule::GetPlayoutAudioParameters(AudioParameters* params) const {
-  RTC_DLOG_F(LS_VERBOSE) << "Not yet implemented";
-  return -1;
+  RTC_DLOG_F(LS_VERBOSE) << "";
+  RTC_DCHECK(playout_parameters_.is_valid());
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  *params = playout_parameters_;
+  return 0;
 }
 
 int ObjCAudioDeviceModule::GetRecordAudioParameters(AudioParameters* params) const {
-  RTC_DLOG_F(LS_VERBOSE) << "Not yet implemented";
-  return -1;
+  RTC_DLOG_F(LS_VERBOSE) << "";
+  RTC_DCHECK(record_parameters_.is_valid());
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  *params = record_parameters_;
+  return 0;
 }
 
 #endif  // WEBRTC_IOS
 
-#pragma mark - Not implemented/Not relevant
+void ObjCAudioDeviceModule::UpdateOutputAudioDeviceBuffer() {
+  RTC_DLOG_F(LS_VERBOSE) << "";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  RTC_DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first";
+
+  RTC_DCHECK_GT(playout_parameters_.sample_rate(), 0);
+  RTC_DCHECK(playout_parameters_.channels() == 1 || playout_parameters_.channels() == 2);
+
+  audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate());
+  audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels());
+  playout_fine_audio_buffer_.reset(new FineAudioBuffer(audio_device_buffer_.get()));
+}
+
+void ObjCAudioDeviceModule::UpdateInputAudioDeviceBuffer() {
+  RTC_DLOG_F(LS_VERBOSE) << "";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  RTC_DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first";
+
+  RTC_DCHECK_GT(record_parameters_.sample_rate(), 0);
+  RTC_DCHECK(record_parameters_.channels() == 1 || record_parameters_.channels() == 2);
+
+  audio_device_buffer_->SetRecordingSampleRate(record_parameters_.sample_rate());
+  audio_device_buffer_->SetRecordingChannels(record_parameters_.channels());
+  record_fine_audio_buffer_.reset(new FineAudioBuffer(audio_device_buffer_.get()));
+}
+
+void ObjCAudioDeviceModule::UpdateAudioDelay(std::atomic<int>& delay_ms,
+                                             const NSTimeInterval device_latency) {
+  RTC_DLOG_F(LS_VERBOSE) << "";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  int latency_ms = static_cast<int>(rtc::kNumMillisecsPerSec * device_latency);
+  if (latency_ms <= 0) {
+    return;
+  }
+  const int old_latency_ms = delay_ms.exchange(latency_ms);
+  if (old_latency_ms != latency_ms) {
+    RTC_LOG_F(LS_INFO) << "Did change audio IO latency from: " << old_latency_ms
+                       << " ms to: " << latency_ms << " ms";
+  }
+}
+
+bool ObjCAudioDeviceModule::UpdateAudioParameters(AudioParameters& params,
+                                                  const AudioParameters& device_params) {
+  RTC_DLOG_F(LS_VERBOSE) << "";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  if (!device_params.is_complete()) {
+    RTC_LOG_F(LS_INFO) << "Device params are incomplete: " << device_params.ToString();
+    return false;
+  }
+  if (params.channels() == device_params.channels() &&
+      params.frames_per_buffer() == device_params.frames_per_buffer() &&
+      params.sample_rate() == device_params.sample_rate()) {
+    RTC_LOG_F(LS_INFO) << "Device params: " << device_params.ToString()
+                       << " are not different from: " << params.ToString();
+    return false;
+  }
+
+  RTC_LOG_F(LS_INFO) << "Audio params will be changed from: " << params.ToString()
+                     << " to: " << device_params.ToString();
+  params.reset(
+      device_params.sample_rate(), device_params.channels(), device_params.frames_per_buffer());
+  return true;
+}
+
+OSStatus ObjCAudioDeviceModule::OnDeliverRecordedData(
+    AudioUnitRenderActionFlags* flags,
+    const AudioTimeStamp* time_stamp,
+    NSInteger bus_number,
+    UInt32 num_frames,
+    const AudioBufferList* io_data,
+    void* render_context,
+    RTC_OBJC_TYPE(RTCAudioDeviceRenderRecordedDataBlock) render_block) {
+  RTC_DCHECK_RUN_ON(&io_record_thread_checker_);
+  OSStatus result = noErr;
+  // Simply return if recording is not enabled.
+  if (!recording_.load()) return result;
+
+  if (io_data != nullptr) {
+    // AudioBuffer already fullfilled with audio data
+    RTC_DCHECK_EQ(1, io_data->mNumberBuffers);
+    const AudioBuffer* audio_buffer = &io_data->mBuffers[0];
+    RTC_DCHECK(audio_buffer->mNumberChannels == 1 || audio_buffer->mNumberChannels == 2);
+
+    record_fine_audio_buffer_->DeliverRecordedData(
+        rtc::ArrayView<const int16_t>(static_cast<int16_t*>(audio_buffer->mData), num_frames),
+        cached_recording_delay_ms_.load());
+    return noErr;
+  }
+  RTC_DCHECK(render_block != nullptr) << "Either io_data or render_block must be provided";
+
+  // Set the size of our own audio buffer and clear it first to avoid copying
+  // in combination with potential reallocations.
+  // On real iOS devices, the size will only be set once (at first callback).
+  const int channels_count = record_parameters_.channels();
+  record_audio_buffer_.Clear();
+  record_audio_buffer_.SetSize(num_frames * channels_count);
+
+  // Allocate AudioBuffers to be used as storage for the received audio.
+  // The AudioBufferList structure works as a placeholder for the
+  // AudioBuffer structure, which holds a pointer to the actual data buffer
+  // in `record_audio_buffer_`. Recorded audio will be rendered into this memory
+  // at each input callback when calling `render_block`.
+  AudioBufferList audio_buffer_list;
+  audio_buffer_list.mNumberBuffers = 1;
+  AudioBuffer* audio_buffer = &audio_buffer_list.mBuffers[0];
+  audio_buffer->mNumberChannels = channels_count;
+  audio_buffer->mDataByteSize =
+      record_audio_buffer_.size() * sizeof(decltype(record_audio_buffer_)::value_type);
+  audio_buffer->mData = reinterpret_cast<int8_t*>(record_audio_buffer_.data());
+
+  // Obtain the recorded audio samples by initiating a rendering cycle into own buffer.
+  result =
+      render_block(flags, time_stamp, bus_number, num_frames, &audio_buffer_list, render_context);
+  if (result != noErr) {
+    RTC_LOG_F(LS_ERROR) << "Failed to render audio: " << result;
+    return result;
+  }
+
+  // Get a pointer to the recorded audio and send it to the WebRTC ADB.
+  // Use the FineAudioBuffer instance to convert between native buffer size
+  // and the 10ms buffer size used by WebRTC.
+  record_fine_audio_buffer_->DeliverRecordedData(record_audio_buffer_,
+                                                 cached_recording_delay_ms_.load());
+  return noErr;
+}
+
+OSStatus ObjCAudioDeviceModule::OnGetPlayoutData(AudioUnitRenderActionFlags* flags,
+                                                 const AudioTimeStamp* time_stamp,
+                                                 NSInteger bus_number,
+                                                 UInt32 num_frames,
+                                                 AudioBufferList* io_data) {
+  RTC_DCHECK_RUN_ON(&io_playout_thread_checker_);
+  // Verify 16-bit, noninterleaved mono or stereo PCM signal format.
+  RTC_DCHECK_EQ(1, io_data->mNumberBuffers);
+  AudioBuffer* audio_buffer = &io_data->mBuffers[0];
+  RTC_DCHECK(audio_buffer->mNumberChannels == 1 || audio_buffer->mNumberChannels == 2);
+  RTC_DCHECK_EQ(audio_buffer->mDataByteSize,
+                sizeof(int16_t) * num_frames * audio_buffer->mNumberChannels);
+
+  // Produce silence and give player a hint about it if playout is not
+  // activated.
+  if (!playing_.load()) {
+    *flags |= kAudioUnitRenderAction_OutputIsSilence;
+    memset(static_cast<int8_t*>(audio_buffer->mData), 0, audio_buffer->mDataByteSize);
+    return noErr;
+  }
+
+  // Read decoded 16-bit PCM samples from WebRTC into the
+  // `io_data` destination buffer.
+  playout_fine_audio_buffer_->GetPlayoutData(
+      rtc::ArrayView<int16_t>(static_cast<int16_t*>(audio_buffer->mData),
+                              num_frames * audio_buffer->mNumberChannels),
+      cached_playout_delay_ms_.load());
+
+  return noErr;
+}
+
+void ObjCAudioDeviceModule::HandleAudioInputInterrupted() {
+  RTC_DLOG_F(LS_VERBOSE) << "";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  io_record_thread_checker_.Detach();
+}
+
+void ObjCAudioDeviceModule::HandleAudioOutputInterrupted() {
+  RTC_DLOG_F(LS_VERBOSE) << "";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  io_playout_thread_checker_.Detach();
+}
+
+void ObjCAudioDeviceModule::HandleAudioInputParametersChange() {
+  RTC_DLOG_F(LS_VERBOSE) << "";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+
+  if (UpdateAudioParameters(record_parameters_, RecordParameters(audio_device_))) {
+    UpdateInputAudioDeviceBuffer();
+  }
+
+  UpdateAudioDelay(cached_recording_delay_ms_, [audio_device_ inputLatency]);
+}
+
+void ObjCAudioDeviceModule::HandleAudioOutputParametersChange() {
+  RTC_DLOG_F(LS_VERBOSE) << "";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+
+  if (UpdateAudioParameters(playout_parameters_, PlayoutParameters(audio_device_))) {
+    UpdateOutputAudioDeviceBuffer();
+  }
+
+  UpdateAudioDelay(cached_playout_delay_ms_, [audio_device_ outputLatency]);
+}
+
+#pragma mark - Not implemented/Not relevant methods from AudioDeviceModule
 
 int32_t ObjCAudioDeviceModule::ActiveAudioLayer(AudioLayer* audioLayer) const {
   return -1;
@@ -308,4 +708,4 @@
 
 }  // namespace objc_adm
 
-}  // namespace webrtc
\ No newline at end of file
+}  // namespace webrtc
diff --git a/sdk/objc/native/src/objc_audio_device_delegate.h b/sdk/objc/native/src/objc_audio_device_delegate.h
new file mode 100644
index 0000000..3af079d
--- /dev/null
+++ b/sdk/objc/native/src/objc_audio_device_delegate.h
@@ -0,0 +1,35 @@
+/*
+ *  Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SDK_OBJC_NATIVE_SRC_OBJC_AUDIO_DEVICE_DELEGATE_H_
+#define SDK_OBJC_NATIVE_SRC_OBJC_AUDIO_DEVICE_DELEGATE_H_
+
+#include "api/scoped_refptr.h"
+#include "rtc_base/thread.h"
+
+#import "components/audio/RTCAudioDevice.h"
+
+namespace webrtc {
+namespace objc_adm {
+class ObjCAudioDeviceModule;
+}  // namespace objc_adm
+}  // namespace webrtc
+
+@interface ObjCAudioDeviceDelegate : NSObject <RTC_OBJC_TYPE (RTCAudioDeviceDelegate)>
+
+- (instancetype)initWithAudioDeviceModule:
+                    (rtc::scoped_refptr<webrtc::objc_adm::ObjCAudioDeviceModule>)audioDeviceModule
+                        audioDeviceThread:(rtc::Thread*)thread;
+
+- (void)resetAudioDeviceModule;
+
+@end
+
+#endif  // SDK_OBJC_NATIVE_SRC_OBJC_AUDIO_DEVICE_DELEGATE_H_
diff --git a/sdk/objc/native/src/objc_audio_device_delegate.mm b/sdk/objc/native/src/objc_audio_device_delegate.mm
new file mode 100644
index 0000000..b7bb4e4
--- /dev/null
+++ b/sdk/objc/native/src/objc_audio_device_delegate.mm
@@ -0,0 +1,194 @@
+/*
+ *  Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <AudioUnit/AudioUnit.h>
+#import <Foundation/Foundation.h>
+
+#import "objc_audio_device.h"
+#import "objc_audio_device_delegate.h"
+
+#include "api/make_ref_counted.h"
+#include "api/ref_counted_base.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/thread.h"
+
+namespace {
+
+constexpr double kPreferredInputSampleRate = 48000.0;
+constexpr double kPreferredOutputSampleRate = 48000.0;
+
+// WebRTC processes audio in chunks of 10ms. Preferring 20ms audio chunks
+// is a compromize between performance and power consumption.
+constexpr NSTimeInterval kPeferredInputIOBufferDuration = 0.02;
+constexpr NSTimeInterval kPeferredOutputIOBufferDuration = 0.02;
+
+class AudioDeviceDelegateImpl final : public rtc::RefCountedNonVirtual<AudioDeviceDelegateImpl> {
+ public:
+  AudioDeviceDelegateImpl(
+      rtc::scoped_refptr<webrtc::objc_adm::ObjCAudioDeviceModule> audio_device_module,
+      rtc::Thread* thread)
+      : audio_device_module_(audio_device_module), thread_(thread) {
+    RTC_DCHECK(audio_device_module_);
+    RTC_DCHECK(thread_);
+  }
+
+  webrtc::objc_adm::ObjCAudioDeviceModule* audio_device_module() const {
+    return audio_device_module_.get();
+  }
+
+  rtc::Thread* thread() const { return thread_; }
+
+  void reset_audio_device_module() { audio_device_module_ = nullptr; }
+
+ private:
+  rtc::scoped_refptr<webrtc::objc_adm::ObjCAudioDeviceModule> audio_device_module_;
+  rtc::Thread* thread_;
+};
+
+}  // namespace
+
+@implementation ObjCAudioDeviceDelegate {
+  rtc::scoped_refptr<AudioDeviceDelegateImpl> impl_;
+}
+
+@synthesize getPlayoutData = getPlayoutData_;
+
+@synthesize deliverRecordedData = deliverRecordedData_;
+
+@synthesize preferredInputSampleRate = preferredInputSampleRate_;
+
+@synthesize preferredInputIOBufferDuration = preferredInputIOBufferDuration_;
+
+@synthesize preferredOutputSampleRate = preferredOutputSampleRate_;
+
+@synthesize preferredOutputIOBufferDuration = preferredOutputIOBufferDuration_;
+
+- (instancetype)initWithAudioDeviceModule:
+                    (rtc::scoped_refptr<webrtc::objc_adm::ObjCAudioDeviceModule>)audioDeviceModule
+                        audioDeviceThread:(rtc::Thread*)thread {
+  RTC_DCHECK_RUN_ON(thread);
+  if (self = [super init]) {
+    impl_ = rtc::make_ref_counted<AudioDeviceDelegateImpl>(audioDeviceModule, thread);
+    preferredInputSampleRate_ = kPreferredInputSampleRate;
+    preferredInputIOBufferDuration_ = kPeferredInputIOBufferDuration;
+    preferredOutputSampleRate_ = kPreferredOutputSampleRate;
+    preferredOutputIOBufferDuration_ = kPeferredOutputIOBufferDuration;
+
+    rtc::scoped_refptr<AudioDeviceDelegateImpl> playout_delegate = impl_;
+    getPlayoutData_ = ^OSStatus(AudioUnitRenderActionFlags* _Nonnull actionFlags,
+                                const AudioTimeStamp* _Nonnull timestamp,
+                                NSInteger inputBusNumber,
+                                UInt32 frameCount,
+                                AudioBufferList* _Nonnull outputData) {
+      webrtc::objc_adm::ObjCAudioDeviceModule* audio_device =
+          playout_delegate->audio_device_module();
+      if (audio_device) {
+        return audio_device->OnGetPlayoutData(
+            actionFlags, timestamp, inputBusNumber, frameCount, outputData);
+      } else {
+        *actionFlags |= kAudioUnitRenderAction_OutputIsSilence;
+        RTC_LOG(LS_VERBOSE) << "No alive audio device";
+        return noErr;
+      }
+    };
+
+    rtc::scoped_refptr<AudioDeviceDelegateImpl> record_delegate = impl_;
+    deliverRecordedData_ =
+        ^OSStatus(AudioUnitRenderActionFlags* _Nonnull actionFlags,
+                  const AudioTimeStamp* _Nonnull timestamp,
+                  NSInteger inputBusNumber,
+                  UInt32 frameCount,
+                  const AudioBufferList* _Nullable inputData,
+                  void* renderContext,
+                  RTC_OBJC_TYPE(RTCAudioDeviceRenderRecordedDataBlock) _Nullable renderBlock) {
+          webrtc::objc_adm::ObjCAudioDeviceModule* audio_device =
+              record_delegate->audio_device_module();
+          if (audio_device) {
+            return audio_device->OnDeliverRecordedData(actionFlags,
+                                                       timestamp,
+                                                       inputBusNumber,
+                                                       frameCount,
+                                                       inputData,
+                                                       renderContext,
+                                                       renderBlock);
+          } else {
+            RTC_LOG(LS_VERBOSE) << "No alive audio device";
+            return noErr;
+          }
+        };
+  }
+  return self;
+}
+
+- (void)notifyAudioInputParametersChange {
+  RTC_DCHECK_RUN_ON(impl_->thread());
+  webrtc::objc_adm::ObjCAudioDeviceModule* audio_device_module = impl_->audio_device_module();
+  if (audio_device_module) {
+    audio_device_module->HandleAudioInputParametersChange();
+  }
+}
+
+- (void)notifyAudioOutputParametersChange {
+  RTC_DCHECK_RUN_ON(impl_->thread());
+  webrtc::objc_adm::ObjCAudioDeviceModule* audio_device_module = impl_->audio_device_module();
+  if (audio_device_module) {
+    audio_device_module->HandleAudioOutputParametersChange();
+  }
+}
+
+- (void)notifyAudioInputInterrupted {
+  RTC_DCHECK_RUN_ON(impl_->thread());
+  webrtc::objc_adm::ObjCAudioDeviceModule* audio_device_module = impl_->audio_device_module();
+  if (audio_device_module) {
+    audio_device_module->HandleAudioInputInterrupted();
+  }
+}
+
+- (void)notifyAudioOutputInterrupted {
+  RTC_DCHECK_RUN_ON(impl_->thread());
+  webrtc::objc_adm::ObjCAudioDeviceModule* audio_device_module = impl_->audio_device_module();
+  if (audio_device_module) {
+    audio_device_module->HandleAudioOutputInterrupted();
+  }
+}
+
+- (void)dispatchAsync:(dispatch_block_t)block {
+  rtc::Thread* thread = impl_->thread();
+  RTC_DCHECK(thread);
+  thread->PostTask([block] {
+    @autoreleasepool {
+      block();
+    }
+  });
+}
+
+- (void)dispatchSync:(dispatch_block_t)block {
+  rtc::Thread* thread = impl_->thread();
+  RTC_DCHECK(thread);
+  if (thread->IsCurrent()) {
+    @autoreleasepool {
+      block();
+    }
+  } else {
+    thread->Invoke<void>(RTC_FROM_HERE, [block] {
+      @autoreleasepool {
+        block();
+      }
+    });
+  }
+}
+
+- (void)resetAudioDeviceModule {
+  RTC_DCHECK_RUN_ON(impl_->thread());
+  impl_->reset_audio_device_module();
+}
+
+@end