Propagate field trials into AudioDeviceIOS

Bug: webrtc:413413572
Change-Id: I96fb90f4bf0304045fc3141c96955fd3f2df043a
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/389220
Commit-Queue: Danil Chapovalov <danilchap@webrtc.org>
Reviewed-by: Henrik Andreassson <henrika@webrtc.org>
Reviewed-by: Kári Helgason <kthelgason@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#44619}
diff --git a/modules/audio_device/audio_device_impl.cc b/modules/audio_device/audio_device_impl.cc
index da98848..585d637 100644
--- a/modules/audio_device/audio_device_impl.cc
+++ b/modules/audio_device/audio_device_impl.cc
@@ -102,7 +102,7 @@
   }
 
   // Create the platform-dependent implementation.
-  if (audio_device->CreatePlatformSpecificObjects() == -1) {
+  if (audio_device->CreatePlatformSpecificObjects(env) == -1) {
     return nullptr;
   }
 
@@ -166,7 +166,8 @@
   return 0;
 }
 
-int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects() {
+int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects(
+    [[maybe_unused]] const Environment& env) {
   RTC_LOG(LS_INFO) << __FUNCTION__;
   if (audio_device_ != nullptr) {
     RTC_LOG(LS_INFO) << "Reusing provided audio device";
@@ -241,10 +242,11 @@
 // iOS ADM implementation.
 #if defined(WEBRTC_IOS)
   if (audio_layer == kPlatformDefaultAudio) {
-    audio_device_.reset(new ios_adm::AudioDeviceIOS(
+    audio_device_ = std::make_unique<ios_adm::AudioDeviceIOS>(
+        env,
         /*bypass_voice_processing=*/false,
         /*muted_speech_event_handler=*/nullptr,
-        /*render_error_handler=*/nullptr));
+        /*render_error_handler=*/nullptr);
     RTC_LOG(LS_INFO) << "iPhone Audio APIs will be utilized.";
   }
 // END #if defined(WEBRTC_IOS)
diff --git a/modules/audio_device/audio_device_impl.h b/modules/audio_device/audio_device_impl.h
index 609daf5..c7a229d 100644
--- a/modules/audio_device/audio_device_impl.h
+++ b/modules/audio_device/audio_device_impl.h
@@ -61,7 +61,7 @@
   ~AudioDeviceModuleImpl() override;
 
   int32_t CheckPlatform();
-  int32_t CreatePlatformSpecificObjects();
+  int32_t CreatePlatformSpecificObjects(const Environment& env);
   int32_t AttachAudioBuffer();
 
   // Retrieve the currently utilized audio layer
diff --git a/modules/audio_device/include/test_audio_device.cc b/modules/audio_device/include/test_audio_device.cc
index f6d5386..3b58ff2 100644
--- a/modules/audio_device/include/test_audio_device.cc
+++ b/modules/audio_device/include/test_audio_device.cc
@@ -453,7 +453,7 @@
   }
 
   // Create the platform-dependent implementation.
-  if (audio_device->CreatePlatformSpecificObjects() == -1) {
+  if (audio_device->CreatePlatformSpecificObjects(env) == -1) {
     return nullptr;
   }
 
diff --git a/sdk/BUILD.gn b/sdk/BUILD.gn
index c0c8eb4..c443527 100644
--- a/sdk/BUILD.gn
+++ b/sdk/BUILD.gn
@@ -231,6 +231,10 @@
       rtc_library("native_api_audio_device_module") {
         visibility = [ "*" ]
 
+        # TODO: bugs.webrtc.org/413413572 - Remove the poison when users of this
+        # api provide Environment explicitly.
+        allow_poison = [ "environment_construction" ]
+
         sources = [
           "objc/native/api/audio_device_module.h",
           "objc/native/api/audio_device_module.mm",
@@ -240,7 +244,10 @@
           ":audio_device",
           ":audio_device_module_error_handler",
           "../api:make_ref_counted",
+          "../api:scoped_refptr",
           "../api/audio:audio_device",
+          "../api/environment",
+          "../api/environment:environment_factory",
           "../modules/audio_device:audio_device_generic",
           "../rtc_base:checks",
           "../rtc_base:logging",
@@ -306,8 +313,8 @@
           "../api:scoped_refptr",
           "../api:sequence_checker",
           "../api/audio:audio_device",
+          "../api/environment",
           "../api/task_queue",
-          "../api/task_queue:default_task_queue_factory",
           "../api/task_queue:pending_task_safety_flag",
           "../modules/audio_device:audio_device_buffer",
           "../modules/audio_device:audio_device_config",
@@ -485,7 +492,6 @@
 
     rtc_library("audio_device_objc") {
       visibility = [ "*" ]
-      allow_poison = [ "environment_construction" ]
       sources = [
         "objc/native/src/objc_audio_device.h",
         "objc/native/src/objc_audio_device.mm",
@@ -501,8 +507,8 @@
         "../api:scoped_refptr",
         "../api:sequence_checker",
         "../api/audio:audio_device",
+        "../api/environment",
         "../api/task_queue",
-        "../api/task_queue:default_task_queue_factory",
         "../modules/audio_device:audio_device_buffer",
         "../rtc_base:buffer",
         "../rtc_base:checks",
@@ -528,7 +534,6 @@
 
     rtc_library("objc_audio_device_module") {
       visibility = [ "*" ]
-      allow_poison = [ "environment_construction" ]
       sources = [
         "objc/native/api/objc_audio_device_module.h",
         "objc/native/api/objc_audio_device_module.mm",
@@ -538,7 +543,9 @@
         ":audio_device_api_objc",
         ":audio_device_objc",
         "../api:make_ref_counted",
+        "../api:scoped_refptr",
         "../api/audio:audio_device",
+        "../api/environment",
         "../rtc_base:logging",
       ]
       if (is_mac) {
@@ -1209,6 +1216,7 @@
             "../api/audio:audio_processing",
             "../api/audio_codecs:builtin_audio_decoder_factory",
             "../api/audio_codecs:builtin_audio_encoder_factory",
+            "../api/environment",
             "../api/environment:environment_factory",
             "../api/task_queue:default_task_queue_factory",
             "../api/video:video_frame",
diff --git a/sdk/objc/api/peerconnection/RTCPeerConnectionFactory.mm b/sdk/objc/api/peerconnection/RTCPeerConnectionFactory.mm
index 250ba40..5df145e 100644
--- a/sdk/objc/api/peerconnection/RTCPeerConnectionFactory.mm
+++ b/sdk/objc/api/peerconnection/RTCPeerConnectionFactory.mm
@@ -62,14 +62,6 @@
 
 @synthesize nativeFactory = _nativeFactory;
 
-- (webrtc::scoped_refptr<webrtc::AudioDeviceModule>)audioDeviceModule {
-#if defined(WEBRTC_IOS)
-  return webrtc::CreateAudioDeviceModule();
-#else
-  return nullptr;
-#endif
-}
-
 - (instancetype)init {
   webrtc::PeerConnectionFactoryDependencies dependencies;
   dependencies.audio_encoder_factory =
@@ -80,7 +72,10 @@
       [[RTC_OBJC_TYPE(RTCVideoEncoderFactoryH264) alloc] init]);
   dependencies.video_decoder_factory = webrtc::ObjCToNativeVideoDecoderFactory(
       [[RTC_OBJC_TYPE(RTCVideoDecoderFactoryH264) alloc] init]);
-  dependencies.adm = [self audioDeviceModule];
+  dependencies.env = webrtc::CreateEnvironment();
+#ifdef WEBRTC_IOS
+  dependencies.adm = webrtc::CreateAudioDeviceModule(*dependencies.env);
+#endif
   return [self initWithMediaAndDependencies:dependencies];
 }
 
@@ -105,6 +100,7 @@
   return [self initWithNoMedia];
 #else
   webrtc::PeerConnectionFactoryDependencies dependencies;
+  dependencies.env = webrtc::CreateEnvironment();
   dependencies.audio_encoder_factory =
       webrtc::CreateBuiltinAudioEncoderFactory();
   dependencies.audio_decoder_factory =
@@ -118,9 +114,12 @@
         webrtc::ObjCToNativeVideoDecoderFactory(decoderFactory);
   }
   if (audioDevice) {
-    dependencies.adm = webrtc::CreateAudioDeviceModule(audioDevice);
+    dependencies.adm =
+        webrtc::CreateAudioDeviceModule(*dependencies.env, audioDevice);
+#ifdef WEBRTC_IOS
   } else {
-    dependencies.adm = [self audioDeviceModule];
+    dependencies.adm = webrtc::CreateAudioDeviceModule(*dependencies.env);
+#endif
   }
   return [self initWithMediaAndDependencies:dependencies];
 #endif
diff --git a/sdk/objc/api/peerconnection/RTCPeerConnectionFactoryBuilder+DefaultComponents.mm b/sdk/objc/api/peerconnection/RTCPeerConnectionFactoryBuilder+DefaultComponents.mm
index e5eeba5..3e18fd1 100644
--- a/sdk/objc/api/peerconnection/RTCPeerConnectionFactoryBuilder+DefaultComponents.mm
+++ b/sdk/objc/api/peerconnection/RTCPeerConnectionFactoryBuilder+DefaultComponents.mm
@@ -19,6 +19,7 @@
 #include "sdk/objc/native/api/video_encoder_factory.h"
 
 #if defined(WEBRTC_IOS)
+#include "api/environment/environment_factory.h"
 #import "sdk/objc/native/api/audio_device_module.h"
 #endif
 
@@ -42,7 +43,8 @@
   [builder setVideoDecoderFactory:std::move(videoDecoderFactory)];
 
 #if defined(WEBRTC_IOS)
-  [builder setAudioDeviceModule:webrtc::CreateAudioDeviceModule()];
+  [builder setAudioDeviceModule:webrtc::CreateAudioDeviceModule(
+                                    webrtc::CreateEnvironment())];
 #endif
   return builder;
 }
diff --git a/sdk/objc/native/api/audio_device_module.h b/sdk/objc/native/api/audio_device_module.h
index 34f99e8..5a8944c 100644
--- a/sdk/objc/native/api/audio_device_module.h
+++ b/sdk/objc/native/api/audio_device_module.h
@@ -14,6 +14,8 @@
 #include <memory>
 
 #include "api/audio/audio_device.h"
+#include "api/environment/environment.h"
+#include "api/scoped_refptr.h"
 #include "sdk/objc/native/api/audio_device_module_error_handler.h"
 
 namespace webrtc {
@@ -23,20 +25,26 @@
 // Warning: Setting `bypass_voice_processing` will have unpredictable
 // consequences for the audio path in the device. It is not advisable to use in
 // most scenarios.
-webrtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceModule(
+scoped_refptr<AudioDeviceModule> CreateAudioDeviceModule(
+    const Environment& env,
     bool bypass_voice_processing = false);
 
-// If `muted_speech_event_handler` is exist, audio unit will catch speech
-// activity while muted.
-webrtc::scoped_refptr<AudioDeviceModule> CreateMutedDetectAudioDeviceModule(
-    AudioDeviceModule::MutedSpeechEventHandler muted_speech_event_handler,
+[[deprecated("Pass `env` explicitly instead of relying on the default")]]
+scoped_refptr<AudioDeviceModule> CreateAudioDeviceModule(
     bool bypass_voice_processing = false);
 
 // If `muted_speech_event_handler` is exist, audio unit will catch speech
 // activity while muted.
 // Provide `error_handler` to receive callbacks on errors such as microphone
 // init failed or playout start failied.
-webrtc::scoped_refptr<AudioDeviceModule> CreateMutedDetectAudioDeviceModule(
+scoped_refptr<AudioDeviceModule> CreateMutedDetectAudioDeviceModule(
+    const Environment& env,
+    AudioDeviceModule::MutedSpeechEventHandler muted_speech_event_handler,
+    ADMErrorHandler error_handler,
+    bool bypass_voice_processing = false);
+
+[[deprecated("Pass `env` explicitly instead of relying on the default")]]
+scoped_refptr<AudioDeviceModule> CreateMutedDetectAudioDeviceModule(
     AudioDeviceModule::MutedSpeechEventHandler muted_speech_event_handler,
     ADMErrorHandler error_handler,
     bool bypass_voice_processing = false);
diff --git a/sdk/objc/native/api/audio_device_module.mm b/sdk/objc/native/api/audio_device_module.mm
index 898886b..8325179 100644
--- a/sdk/objc/native/api/audio_device_module.mm
+++ b/sdk/objc/native/api/audio_device_module.mm
@@ -10,49 +10,57 @@
 
 #include "audio_device_module.h"
 
+#include "api/environment/environment.h"
+#include "api/environment/environment_factory.h"
 #include "api/make_ref_counted.h"
 #include "rtc_base/logging.h"
-
 #include "sdk/objc/native/src/audio/audio_device_module_ios.h"
 
+#ifndef WEBRTC_IOS
+#error This file shouldn't be compiled on platforms other than IOS.
+#endif
+
 namespace webrtc {
 
-webrtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceModule(
-    bool bypass_voice_processing) {
+scoped_refptr<AudioDeviceModule> CreateAudioDeviceModule(
+    const Environment& env, bool bypass_voice_processing) {
   RTC_DLOG(LS_INFO) << __FUNCTION__;
-#if defined(WEBRTC_IOS)
-  return webrtc::make_ref_counted<ios_adm::AudioDeviceModuleIOS>(
+  return make_ref_counted<ios_adm::AudioDeviceModuleIOS>(
+      env,
       bypass_voice_processing,
       /*muted_speech_event_handler=*/nullptr,
       /*error_handler=*/nullptr);
-#else
-  RTC_LOG(LS_ERROR)
-      << "current platform is not supported => this module will self destruct!";
-  return nullptr;
-#endif
 }
 
-webrtc::scoped_refptr<AudioDeviceModule> CreateMutedDetectAudioDeviceModule(
-    AudioDeviceModule::MutedSpeechEventHandler muted_speech_event_handler,
+scoped_refptr<AudioDeviceModule> CreateAudioDeviceModule(
     bool bypass_voice_processing) {
   RTC_DLOG(LS_INFO) << __FUNCTION__;
-  return CreateMutedDetectAudioDeviceModule(muted_speech_event_handler,
-                                            /*error_handler=*/nullptr,
-                                            bypass_voice_processing);
+  return make_ref_counted<ios_adm::AudioDeviceModuleIOS>(
+      CreateEnvironment(),
+      bypass_voice_processing,
+      /*muted_speech_event_handler=*/nullptr,
+      /*error_handler=*/nullptr);
 }
 
-webrtc::scoped_refptr<AudioDeviceModule> CreateMutedDetectAudioDeviceModule(
+scoped_refptr<AudioDeviceModule> CreateMutedDetectAudioDeviceModule(
+    const Environment& env,
     AudioDeviceModule::MutedSpeechEventHandler muted_speech_event_handler,
     ADMErrorHandler error_handler,
     bool bypass_voice_processing) {
   RTC_DLOG(LS_INFO) << __FUNCTION__;
-#if defined(WEBRTC_IOS)
-  return webrtc::make_ref_counted<ios_adm::AudioDeviceModuleIOS>(
-      bypass_voice_processing, muted_speech_event_handler, error_handler);
-#else
-  RTC_LOG(LS_ERROR)
-      << "current platform is not supported => this module will self destruct!";
-  return nullptr;
-#endif
+  return make_ref_counted<ios_adm::AudioDeviceModuleIOS>(
+      env, bypass_voice_processing, muted_speech_event_handler, error_handler);
+}
+
+scoped_refptr<AudioDeviceModule> CreateMutedDetectAudioDeviceModule(
+    AudioDeviceModule::MutedSpeechEventHandler muted_speech_event_handler,
+    ADMErrorHandler error_handler,
+    bool bypass_voice_processing) {
+  RTC_DLOG(LS_INFO) << __FUNCTION__;
+  return make_ref_counted<ios_adm::AudioDeviceModuleIOS>(
+      CreateEnvironment(),
+      bypass_voice_processing,
+      muted_speech_event_handler,
+      error_handler);
 }
 }  // namespace webrtc
diff --git a/sdk/objc/native/api/objc_audio_device_module.h b/sdk/objc/native/api/objc_audio_device_module.h
index a4387df..be1b344 100644
--- a/sdk/objc/native/api/objc_audio_device_module.h
+++ b/sdk/objc/native/api/objc_audio_device_module.h
@@ -12,11 +12,14 @@
 #define SDK_OBJC_NATIVE_API_OBJC_AUDIO_DEVICE_MODULE_H_
 
 #include "api/audio/audio_device.h"
+#include "api/environment/environment.h"
+#include "api/scoped_refptr.h"
 #import "components/audio/RTCAudioDevice.h"
 
 namespace webrtc {
 
-webrtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceModule(
+scoped_refptr<AudioDeviceModule> CreateAudioDeviceModule(
+    const Environment& env,
     id<RTC_OBJC_TYPE(RTCAudioDevice)> audio_device);
 
 }  // namespace webrtc
diff --git a/sdk/objc/native/api/objc_audio_device_module.mm b/sdk/objc/native/api/objc_audio_device_module.mm
index cedb38f..3eb78ec 100644
--- a/sdk/objc/native/api/objc_audio_device_module.mm
+++ b/sdk/objc/native/api/objc_audio_device_module.mm
@@ -10,6 +10,7 @@
 
 #include "objc_audio_device_module.h"
 
+#include "api/environment/environment.h"
 #include "api/make_ref_counted.h"
 #include "rtc_base/logging.h"
 
@@ -17,11 +18,10 @@
 
 namespace webrtc {
 
-webrtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceModule(
-    id<RTC_OBJC_TYPE(RTCAudioDevice)> audio_device) {
+scoped_refptr<AudioDeviceModule> CreateAudioDeviceModule(
+    const Environment& env, id<RTC_OBJC_TYPE(RTCAudioDevice)> audio_device) {
   RTC_DLOG(LS_INFO) << __FUNCTION__;
-  return webrtc::make_ref_counted<objc_adm::ObjCAudioDeviceModule>(
-      audio_device);
+  return make_ref_counted<objc_adm::ObjCAudioDeviceModule>(env, audio_device);
 }
 
 }  // namespace webrtc
diff --git a/sdk/objc/native/src/audio/audio_device_ios.h b/sdk/objc/native/src/audio/audio_device_ios.h
index cc70ee7..7dcffb0 100644
--- a/sdk/objc/native/src/audio/audio_device_ios.h
+++ b/sdk/objc/native/src/audio/audio_device_ios.h
@@ -14,6 +14,7 @@
 #include <atomic>
 #include <memory>
 
+#include "api/environment/environment.h"
 #include "api/scoped_refptr.h"
 #include "api/sequence_checker.h"
 #include "api/task_queue/pending_task_safety_flag.h"
@@ -56,6 +57,7 @@
                        public VoiceProcessingAudioUnitObserver {
  public:
   explicit AudioDeviceIOS(
+      const Environment& env,
       bool bypass_voice_processing,
       AudioDeviceModule::MutedSpeechEventHandler muted_speech_event_handler,
       AudioDeviceIOSRenderErrorHandler render_error_handler);
@@ -220,6 +222,8 @@
   // Resets thread-checkers before a call is restarted.
   void PrepareForNewStart();
 
+  const Environment env_;
+
   // Determines whether voice processing should be enabled or disabled.
   const bool bypass_voice_processing_;
 
diff --git a/sdk/objc/native/src/audio/audio_device_ios.mm b/sdk/objc/native/src/audio/audio_device_ios.mm
index c7e8b62..53dec77 100644
--- a/sdk/objc/native/src/audio/audio_device_ios.mm
+++ b/sdk/objc/native/src/audio/audio_device_ios.mm
@@ -17,6 +17,7 @@
 #include <cmath>
 
 #include "api/array_view.h"
+#include "api/environment/environment.h"
 #include "api/task_queue/pending_task_safety_flag.h"
 #include "helpers.h"
 #include "modules/audio_device/fine_audio_buffer.h"
@@ -25,7 +26,6 @@
 #include "rtc_base/thread.h"
 #include "rtc_base/thread_annotations.h"
 #include "rtc_base/time_utils.h"
-#include "system_wrappers/include/field_trial.h"
 #include "system_wrappers/include/metrics.h"
 
 #import "base/RTCLogging.h"
@@ -96,10 +96,12 @@
 #endif  // !defined(NDEBUG)
 
 AudioDeviceIOS::AudioDeviceIOS(
+    const Environment& env,
     bool bypass_voice_processing,
     AudioDeviceModule::MutedSpeechEventHandler muted_speech_event_handler,
     AudioDeviceIOSRenderErrorHandler render_error_handler)
-    : bypass_voice_processing_(bypass_voice_processing),
+    : env_(env),
+      bypass_voice_processing_(bypass_voice_processing),
       muted_speech_event_handler_(muted_speech_event_handler),
       render_error_handler_(render_error_handler),
       disregard_next_render_error_(false),
@@ -579,7 +581,7 @@
          is_interrupted_);
   is_interrupted_ = false;
   if (!audio_unit_) return;
-  if (webrtc::field_trial::IsEnabled("WebRTC-Audio-iOS-Holding")) {
+  if (env_.field_trials().IsEnabled("WebRTC-Audio-iOS-Holding")) {
     // Work around an issue where audio does not restart properly after an
     // interruption by restarting the audio unit when the interruption ends.
     if (audio_unit_->GetState() == VoiceProcessingAudioUnit::kStarted) {
diff --git a/sdk/objc/native/src/audio/audio_device_module_ios.h b/sdk/objc/native/src/audio/audio_device_module_ios.h
index 394e1ff..5ff5062 100644
--- a/sdk/objc/native/src/audio/audio_device_module_ios.h
+++ b/sdk/objc/native/src/audio/audio_device_module_ios.h
@@ -14,7 +14,6 @@
 #include <memory>
 
 #include "api/audio/audio_device.h"
-#include "api/task_queue/task_queue_factory.h"
 #include "audio_device_ios.h"
 #include "modules/audio_device/audio_device_buffer.h"
 #include "rtc_base/checks.h"
@@ -31,6 +30,7 @@
   int32_t AttachAudioBuffer();
 
   explicit AudioDeviceModuleIOS(
+      const Environment& env,
       bool bypass_voice_processing,
       MutedSpeechEventHandler muted_speech_event_handler,
       ADMErrorHandler error_handler);
@@ -137,11 +137,12 @@
 #endif  // WEBRTC_IOS
  private:
   void ReportError(ADMError error) const;
+
+  const Environment env_;
   const bool bypass_voice_processing_;
   MutedSpeechEventHandler muted_speech_event_handler_;
   ADMErrorHandler error_handler_;
   bool initialized_ = false;
-  const std::unique_ptr<TaskQueueFactory> task_queue_factory_;
   std::unique_ptr<AudioDeviceIOS> audio_device_;
   std::unique_ptr<AudioDeviceBuffer> audio_device_buffer_;
 };
diff --git a/sdk/objc/native/src/audio/audio_device_module_ios.mm b/sdk/objc/native/src/audio/audio_device_module_ios.mm
index 3b338f2..7420d05 100644
--- a/sdk/objc/native/src/audio/audio_device_module_ios.mm
+++ b/sdk/objc/native/src/audio/audio_device_module_ios.mm
@@ -10,7 +10,9 @@
 
 #include "audio_device_module_ios.h"
 
-#include "api/task_queue/default_task_queue_factory.h"
+#include <memory>
+
+#include "api/environment/environment.h"
 #include "modules/audio_device/audio_device_config.h"
 #include "modules/audio_device/audio_device_generic.h"
 #include "rtc_base/checks.h"
@@ -42,13 +44,14 @@
 namespace ios_adm {
 
 AudioDeviceModuleIOS::AudioDeviceModuleIOS(
+    const Environment& env,
     bool bypass_voice_processing,
     MutedSpeechEventHandler muted_speech_event_handler,
     ADMErrorHandler error_handler)
-    : bypass_voice_processing_(bypass_voice_processing),
+    : env_(env),
+      bypass_voice_processing_(bypass_voice_processing),
       muted_speech_event_handler_(muted_speech_event_handler),
-      error_handler_(error_handler),
-      task_queue_factory_(CreateDefaultTaskQueueFactory()) {
+      error_handler_(error_handler) {
   RTC_LOG(LS_INFO) << "current platform is IOS";
   RTC_LOG(LS_INFO) << "iPhone Audio APIs will be utilized.";
 }
@@ -87,10 +90,13 @@
   AudioDeviceIOSRenderErrorHandler error_handler = ^(OSStatus error) {
     ReportError(kRecordingDeviceFailed);
   };
-  audio_device_buffer_.reset(
-      new webrtc::AudioDeviceBuffer(task_queue_factory_.get()));
-  audio_device_.reset(new ios_adm::AudioDeviceIOS(
-      bypass_voice_processing_, muted_speech_event_handler_, error_handler));
+  audio_device_buffer_ =
+      std::make_unique<AudioDeviceBuffer>(&env_.task_queue_factory());
+  audio_device_ =
+      std::make_unique<ios_adm::AudioDeviceIOS>(env_,
+                                                bypass_voice_processing_,
+                                                muted_speech_event_handler_,
+                                                error_handler);
   RTC_CHECK(audio_device_);
 
   this->AttachAudioBuffer();
diff --git a/sdk/objc/native/src/objc_audio_device.h b/sdk/objc/native/src/objc_audio_device.h
index 27eb0af..d8edde6 100644
--- a/sdk/objc/native/src/objc_audio_device.h
+++ b/sdk/objc/native/src/objc_audio_device.h
@@ -16,6 +16,7 @@
 #import "components/audio/RTCAudioDevice.h"
 
 #include "api/audio/audio_device.h"
+#include "api/environment/environment.h"
 #include "modules/audio_device/audio_device_buffer.h"
 #include "rtc_base/thread.h"
 
@@ -30,7 +31,7 @@
 class ObjCAudioDeviceModule : public AudioDeviceModule {
  public:
   explicit ObjCAudioDeviceModule(
-      id<RTC_OBJC_TYPE(RTCAudioDevice)> audio_device);
+      const Environment& env, id<RTC_OBJC_TYPE(RTCAudioDevice)> audio_device);
   ~ObjCAudioDeviceModule() override;
 
   // Retrieve the currently utilized audio layer
@@ -219,7 +220,7 @@
  private:
   id<RTC_OBJC_TYPE(RTCAudioDevice)> audio_device_;
 
-  const std::unique_ptr<TaskQueueFactory> task_queue_factory_;
+  const Environment env_;
 
   // AudioDeviceBuffer is a buffer to consume audio recorded by `RTCAudioDevice`
   // and provide audio to be played via `RTCAudioDevice`.
diff --git a/sdk/objc/native/src/objc_audio_device.mm b/sdk/objc/native/src/objc_audio_device.mm
index da595ba..d17dd63 100644
--- a/sdk/objc/native/src/objc_audio_device.mm
+++ b/sdk/objc/native/src/objc_audio_device.mm
@@ -9,12 +9,12 @@
  */
 
 #include "objc_audio_device.h"
-#include "objc_audio_device_delegate.h"
+
+#include <memory>
 
 #import "components/audio/RTCAudioDevice.h"
 #include "modules/audio_device/fine_audio_buffer.h"
-
-#include "api/task_queue/default_task_queue_factory.h"
+#include "objc_audio_device_delegate.h"
 #include "rtc_base/logging.h"
 #include "rtc_base/numerics/safe_minmax.h"
 #include "rtc_base/time_utils.h"
@@ -49,9 +49,8 @@
 namespace objc_adm {
 
 ObjCAudioDeviceModule::ObjCAudioDeviceModule(
-    id<RTC_OBJC_TYPE(RTCAudioDevice)> audio_device)
-    : audio_device_(audio_device),
-      task_queue_factory_(CreateDefaultTaskQueueFactory()) {
+    const Environment& env, id<RTC_OBJC_TYPE(RTCAudioDevice)> audio_device)
+    : audio_device_(audio_device), env_(env) {
   RTC_DLOG_F(LS_VERBOSE) << "";
   RTC_DCHECK(audio_device_);
   thread_checker_.Detach();
@@ -82,8 +81,8 @@
   io_record_thread_checker_.Detach();
 
   thread_ = Thread::Current();
-  audio_device_buffer_.reset(
-      new webrtc::AudioDeviceBuffer(task_queue_factory_.get()));
+  audio_device_buffer_ =
+      std::make_unique<webrtc::AudioDeviceBuffer>(&env_.task_queue_factory());
 
   if (![audio_device_ isInitialized]) {
     if (audio_device_delegate_ == nil) {
diff --git a/sdk/objc/unittests/RTCAudioDeviceModule_xctest.mm b/sdk/objc/unittests/RTCAudioDeviceModule_xctest.mm
index e0ca410..b87460c 100644
--- a/sdk/objc/unittests/RTCAudioDeviceModule_xctest.mm
+++ b/sdk/objc/unittests/RTCAudioDeviceModule_xctest.mm
@@ -16,6 +16,7 @@
 #import "sdk/objc/native/api/audio_device_module.h"
 #endif
 
+#include "api/environment/environment_factory.h"
 #include "api/scoped_refptr.h"
 
 typedef int32_t (^NeedMorePlayDataBlock)(const size_t nSamples,
@@ -156,7 +157,8 @@
   _testEnabled = true;
 #endif
 
-  audioDeviceModule = webrtc::CreateAudioDeviceModule();
+  audioDeviceModule =
+      webrtc::CreateAudioDeviceModule(webrtc::CreateEnvironment());
   XCTAssertEqual(0, audioDeviceModule->Init());
   XCTAssertEqual(
       0, audioDeviceModule->GetPlayoutAudioParameters(&playoutParameters));
@@ -264,7 +266,7 @@
   // Create and initialize a second/extra ADM instance. The default ADM is
   // created by the test harness.
   webrtc::scoped_refptr<webrtc::AudioDeviceModule> secondAudioDeviceModule =
-      webrtc::CreateAudioDeviceModule();
+      webrtc::CreateAudioDeviceModule(webrtc::CreateEnvironment());
   XCTAssertNotEqual(secondAudioDeviceModule.get(), nullptr);
   XCTAssertEqual(0, secondAudioDeviceModule->Init());
 
diff --git a/sdk/objc/unittests/RTCAudioDevice_xctest.mm b/sdk/objc/unittests/RTCAudioDevice_xctest.mm
index 5bdb90c..f117832 100644
--- a/sdk/objc/unittests/RTCAudioDevice_xctest.mm
+++ b/sdk/objc/unittests/RTCAudioDevice_xctest.mm
@@ -12,6 +12,8 @@
 
 #include <stdlib.h>
 
+#include "api/environment/environment.h"
+#include "api/environment/environment_factory.h"
 #include "api/task_queue/default_task_queue_factory.h"
 
 #import "sdk/objc/components/audio/RTCAudioSession+Private.h"
@@ -45,8 +47,10 @@
   _testEnabled = true;
 #endif
 
-  _audioDeviceModule = webrtc::CreateAudioDeviceModule();
+  webrtc::Environment env = webrtc::CreateEnvironment();
+  _audioDeviceModule = webrtc::CreateAudioDeviceModule(env);
   _audio_device.reset(new webrtc::ios_adm::AudioDeviceIOS(
+      env,
       /*bypass_voice_processing=*/false,
       /*muted_speech_event_handler=*/nullptr,
       /*render_error_handler=*/nullptr));
@@ -149,6 +153,7 @@
       };
 
   _audio_device.reset(new webrtc::ios_adm::AudioDeviceIOS(
+      webrtc::CreateEnvironment(),
       /*bypass_voice_processing=*/false,
       /*muted_speech_event_handler=*/muted_speech_event_handler,
       /*render_error_handler=*/nullptr));
@@ -169,6 +174,7 @@
           };
 
   _audio_device.reset(new webrtc::ios_adm::AudioDeviceIOS(
+      webrtc::CreateEnvironment(),
       /*bypass_voice_processing=*/false,
       /*muted_speech_event_handler=*/muted_speech_event_handler,
       /*render_error_handler=*/nullptr));