Move webrtc::AudioDeviceModule include to api/ folder
Bug: webrtc:15874
Change-Id: I5bdb19d5e710838b41e6ca283d406c9f1f21286b
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/348060
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Reviewed-by: Henrik Andreassson <henrika@webrtc.org>
Commit-Queue: Florent Castelli <orphis@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#42137}
diff --git a/api/BUILD.gn b/api/BUILD.gn
index 2b484f8..152f2cb 100644
--- a/api/BUILD.gn
+++ b/api/BUILD.gn
@@ -80,12 +80,12 @@
":libjingle_peerconnection_api",
":scoped_refptr",
"../api/rtc_event_log:rtc_event_log_factory",
- "../modules/audio_device:audio_device_api",
"../pc:peer_connection_factory",
"../pc:webrtc_sdp",
"../rtc_base:threading",
"../rtc_base/system:rtc_export",
"../stats:rtc_stats",
+ "audio:audio_device",
"audio:audio_mixer_api",
"audio:audio_processing",
"audio_codecs:audio_codecs_api",
diff --git a/api/DEPS b/api/DEPS
index 0536d70..b34925e 100644
--- a/api/DEPS
+++ b/api/DEPS
@@ -75,6 +75,10 @@
"+rtc_base/socket_address.h",
],
+ "audio_device_defines\.h": [
+ "+rtc_base/strings/string_builder.h",
+ ],
+
"candidate\.h": [
"+rtc_base/network_constants.h",
"+rtc_base/socket_address.h",
diff --git a/api/audio/BUILD.gn b/api/audio/BUILD.gn
index 2fc4b19..de654c7 100644
--- a/api/audio/BUILD.gn
+++ b/api/audio/BUILD.gn
@@ -8,6 +8,22 @@
import("../../webrtc.gni")
+rtc_source_set("audio_device") {
+ visibility = [ "*" ]
+ sources = [
+ "audio_device.h",
+ "audio_device_defines.h",
+ ]
+ deps = [
+ "..:ref_count",
+ "..:scoped_refptr",
+ "../../rtc_base:checks",
+ "../../rtc_base:stringutils",
+ "../task_queue",
+ ]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
+}
+
rtc_library("audio_frame_api") {
visibility = [ "*" ]
sources = [
diff --git a/api/audio/audio_device.h b/api/audio/audio_device.h
new file mode 100644
index 0000000..65e5c5f
--- /dev/null
+++ b/api/audio/audio_device.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_AUDIO_AUDIO_DEVICE_H_
+#define API_AUDIO_AUDIO_DEVICE_H_
+
+#include "absl/types/optional.h"
+#include "api/audio/audio_device_defines.h"
+#include "api/ref_count.h"
+#include "api/scoped_refptr.h"
+#include "api/task_queue/task_queue_factory.h"
+
+namespace webrtc {
+
+class AudioDeviceModuleForTest;
+
+class AudioDeviceModule : public webrtc::RefCountInterface {
+ public:
+ enum AudioLayer {
+ kPlatformDefaultAudio = 0,
+ kWindowsCoreAudio,
+ kWindowsCoreAudio2,
+ kLinuxAlsaAudio,
+ kLinuxPulseAudio,
+ kAndroidJavaAudio,
+ kAndroidOpenSLESAudio,
+ kAndroidJavaInputAndOpenSLESOutputAudio,
+ kAndroidAAudioAudio,
+ kAndroidJavaInputAndAAudioOutputAudio,
+ kDummyAudio,
+ };
+
+ enum WindowsDeviceType {
+ kDefaultCommunicationDevice = -1,
+ kDefaultDevice = -2
+ };
+
+ struct Stats {
+ // The fields below correspond to similarly-named fields in the WebRTC stats
+ // spec. https://w3c.github.io/webrtc-stats/#playoutstats-dict*
+ double synthesized_samples_duration_s = 0;
+ uint64_t synthesized_samples_events = 0;
+ double total_samples_duration_s = 0;
+ double total_playout_delay_s = 0;
+ uint64_t total_samples_count = 0;
+ };
+
+ public:
+ // Creates a default ADM for usage in production code.
+ static rtc::scoped_refptr<AudioDeviceModule> Create(
+ AudioLayer audio_layer,
+ TaskQueueFactory* task_queue_factory);
+ // Creates an ADM with support for extra test methods. Don't use this factory
+ // in production code.
+ static rtc::scoped_refptr<AudioDeviceModuleForTest> CreateForTest(
+ AudioLayer audio_layer,
+ TaskQueueFactory* task_queue_factory);
+
+ // Retrieve the currently utilized audio layer
+ virtual int32_t ActiveAudioLayer(AudioLayer* audioLayer) const = 0;
+
+ // Full-duplex transportation of PCM audio
+ virtual int32_t RegisterAudioCallback(AudioTransport* audioCallback) = 0;
+
+ // Main initialization and termination
+ virtual int32_t Init() = 0;
+ virtual int32_t Terminate() = 0;
+ virtual bool Initialized() const = 0;
+
+ // Device enumeration
+ virtual int16_t PlayoutDevices() = 0;
+ virtual int16_t RecordingDevices() = 0;
+ virtual int32_t PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) = 0;
+ virtual int32_t RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) = 0;
+
+ // Device selection
+ virtual int32_t SetPlayoutDevice(uint16_t index) = 0;
+ virtual int32_t SetPlayoutDevice(WindowsDeviceType device) = 0;
+ virtual int32_t SetRecordingDevice(uint16_t index) = 0;
+ virtual int32_t SetRecordingDevice(WindowsDeviceType device) = 0;
+
+ // Audio transport initialization
+ virtual int32_t PlayoutIsAvailable(bool* available) = 0;
+ virtual int32_t InitPlayout() = 0;
+ virtual bool PlayoutIsInitialized() const = 0;
+ virtual int32_t RecordingIsAvailable(bool* available) = 0;
+ virtual int32_t InitRecording() = 0;
+ virtual bool RecordingIsInitialized() const = 0;
+
+ // Audio transport control
+ virtual int32_t StartPlayout() = 0;
+ virtual int32_t StopPlayout() = 0;
+ virtual bool Playing() const = 0;
+ virtual int32_t StartRecording() = 0;
+ virtual int32_t StopRecording() = 0;
+ virtual bool Recording() const = 0;
+
+ // Audio mixer initialization
+ virtual int32_t InitSpeaker() = 0;
+ virtual bool SpeakerIsInitialized() const = 0;
+ virtual int32_t InitMicrophone() = 0;
+ virtual bool MicrophoneIsInitialized() const = 0;
+
+ // Speaker volume controls
+ virtual int32_t SpeakerVolumeIsAvailable(bool* available) = 0;
+ virtual int32_t SetSpeakerVolume(uint32_t volume) = 0;
+ virtual int32_t SpeakerVolume(uint32_t* volume) const = 0;
+ virtual int32_t MaxSpeakerVolume(uint32_t* maxVolume) const = 0;
+ virtual int32_t MinSpeakerVolume(uint32_t* minVolume) const = 0;
+
+ // Microphone volume controls
+ virtual int32_t MicrophoneVolumeIsAvailable(bool* available) = 0;
+ virtual int32_t SetMicrophoneVolume(uint32_t volume) = 0;
+ virtual int32_t MicrophoneVolume(uint32_t* volume) const = 0;
+ virtual int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const = 0;
+ virtual int32_t MinMicrophoneVolume(uint32_t* minVolume) const = 0;
+
+ // Speaker mute control
+ virtual int32_t SpeakerMuteIsAvailable(bool* available) = 0;
+ virtual int32_t SetSpeakerMute(bool enable) = 0;
+ virtual int32_t SpeakerMute(bool* enabled) const = 0;
+
+ // Microphone mute control
+ virtual int32_t MicrophoneMuteIsAvailable(bool* available) = 0;
+ virtual int32_t SetMicrophoneMute(bool enable) = 0;
+ virtual int32_t MicrophoneMute(bool* enabled) const = 0;
+
+ // Stereo support
+ virtual int32_t StereoPlayoutIsAvailable(bool* available) const = 0;
+ virtual int32_t SetStereoPlayout(bool enable) = 0;
+ virtual int32_t StereoPlayout(bool* enabled) const = 0;
+ virtual int32_t StereoRecordingIsAvailable(bool* available) const = 0;
+ virtual int32_t SetStereoRecording(bool enable) = 0;
+ virtual int32_t StereoRecording(bool* enabled) const = 0;
+
+ // Playout delay
+ virtual int32_t PlayoutDelay(uint16_t* delayMS) const = 0;
+
+ // Only supported on Android.
+ virtual bool BuiltInAECIsAvailable() const = 0;
+ virtual bool BuiltInAGCIsAvailable() const = 0;
+ virtual bool BuiltInNSIsAvailable() const = 0;
+
+ // Enables the built-in audio effects. Only supported on Android.
+ virtual int32_t EnableBuiltInAEC(bool enable) = 0;
+ virtual int32_t EnableBuiltInAGC(bool enable) = 0;
+ virtual int32_t EnableBuiltInNS(bool enable) = 0;
+
+ // Play underrun count. Only supported on Android.
+ // TODO(alexnarest): Make it abstract after upstream projects support it.
+ virtual int32_t GetPlayoutUnderrunCount() const { return -1; }
+
+ // Used to generate RTC stats. If not implemented, RTCAudioPlayoutStats will
+ // not be present in the stats.
+ virtual absl::optional<Stats> GetStats() const { return absl::nullopt; }
+
+// Only supported on iOS.
+#if defined(WEBRTC_IOS)
+ virtual int GetPlayoutAudioParameters(AudioParameters* params) const = 0;
+ virtual int GetRecordAudioParameters(AudioParameters* params) const = 0;
+#endif // WEBRTC_IOS
+
+ protected:
+ ~AudioDeviceModule() override {}
+};
+
+// Extends the default ADM interface with some extra test methods.
+// Intended for usage in tests only and requires a unique factory method.
+class AudioDeviceModuleForTest : public AudioDeviceModule {
+ public:
+ // Triggers internal restart sequences of audio streaming. Can be used by
+ // tests to emulate events corresponding to e.g. removal of an active audio
+ // device or other actions which causes the stream to be disconnected.
+ virtual int RestartPlayoutInternally() = 0;
+ virtual int RestartRecordingInternally() = 0;
+
+ virtual int SetPlayoutSampleRate(uint32_t sample_rate) = 0;
+ virtual int SetRecordingSampleRate(uint32_t sample_rate) = 0;
+};
+
+} // namespace webrtc
+
+#endif // API_AUDIO_AUDIO_DEVICE_H_
diff --git a/api/audio/audio_device_defines.h b/api/audio/audio_device_defines.h
new file mode 100644
index 0000000..304b876
--- /dev/null
+++ b/api/audio/audio_device_defines.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_AUDIO_AUDIO_DEVICE_DEFINES_H_
+#define API_AUDIO_AUDIO_DEVICE_DEFINES_H_
+
+#include <stddef.h>
+
+#include <cstdint>
+#include <string>
+
+#include "absl/types/optional.h"
+#include "rtc_base/strings/string_builder.h"
+
+namespace webrtc {
+
+static const int kAdmMaxDeviceNameSize = 128;
+static const int kAdmMaxFileNameSize = 512;
+static const int kAdmMaxGuidSize = 128;
+
+static const int kAdmMinPlayoutBufferSizeMs = 10;
+static const int kAdmMaxPlayoutBufferSizeMs = 250;
+
+// ----------------------------------------------------------------------------
+// AudioTransport
+// ----------------------------------------------------------------------------
+
+class AudioTransport {
+ public:
+ // TODO(bugs.webrtc.org/13620) Deprecate this function
+ virtual int32_t RecordedDataIsAvailable(const void* audioSamples,
+ size_t nSamples,
+ size_t nBytesPerSample,
+ size_t nChannels,
+ uint32_t samplesPerSec,
+ uint32_t totalDelayMS,
+ int32_t clockDrift,
+ uint32_t currentMicLevel,
+ bool keyPressed,
+ uint32_t& newMicLevel) = 0; // NOLINT
+
+ virtual int32_t RecordedDataIsAvailable(
+ const void* audioSamples,
+ size_t nSamples,
+ size_t nBytesPerSample,
+ size_t nChannels,
+ uint32_t samplesPerSec,
+ uint32_t totalDelayMS,
+ int32_t clockDrift,
+ uint32_t currentMicLevel,
+ bool keyPressed,
+ uint32_t& newMicLevel,
+ absl::optional<int64_t> estimatedCaptureTimeNS) { // NOLINT
+ // TODO(webrtc:13620) Make the default behaver of the new API to behave as
+ // the old API. This can be pure virtual if all uses of the old API is
+ // removed.
+ return RecordedDataIsAvailable(
+ audioSamples, nSamples, nBytesPerSample, nChannels, samplesPerSec,
+ totalDelayMS, clockDrift, currentMicLevel, keyPressed, newMicLevel);
+ }
+
+ // Implementation has to setup safe values for all specified out parameters.
+ virtual int32_t NeedMorePlayData(size_t nSamples,
+ size_t nBytesPerSample,
+ size_t nChannels,
+ uint32_t samplesPerSec,
+ void* audioSamples,
+ size_t& nSamplesOut, // NOLINT
+ int64_t* elapsed_time_ms,
+ int64_t* ntp_time_ms) = 0; // NOLINT
+
+ // Method to pull mixed render audio data from all active VoE channels.
+ // The data will not be passed as reference for audio processing internally.
+ virtual void PullRenderData(int bits_per_sample,
+ int sample_rate,
+ size_t number_of_channels,
+ size_t number_of_frames,
+ void* audio_data,
+ int64_t* elapsed_time_ms,
+ int64_t* ntp_time_ms) = 0;
+
+ protected:
+ virtual ~AudioTransport() {}
+};
+
+// Helper class for storage of fundamental audio parameters such as sample rate,
+// number of channels, native buffer size etc.
+// Note that one audio frame can contain more than one channel sample and each
+// sample is assumed to be a 16-bit PCM sample. Hence, one audio frame in
+// stereo contains 2 * (16/8) = 4 bytes of data.
+class AudioParameters {
+ public:
+ // This implementation does only support 16-bit PCM samples.
+ static const size_t kBitsPerSample = 16;
+ AudioParameters()
+ : sample_rate_(0),
+ channels_(0),
+ frames_per_buffer_(0),
+ frames_per_10ms_buffer_(0) {}
+ AudioParameters(int sample_rate, size_t channels, size_t frames_per_buffer)
+ : sample_rate_(sample_rate),
+ channels_(channels),
+ frames_per_buffer_(frames_per_buffer),
+ frames_per_10ms_buffer_(static_cast<size_t>(sample_rate / 100)) {}
+ void reset(int sample_rate, size_t channels, size_t frames_per_buffer) {
+ sample_rate_ = sample_rate;
+ channels_ = channels;
+ frames_per_buffer_ = frames_per_buffer;
+ frames_per_10ms_buffer_ = static_cast<size_t>(sample_rate / 100);
+ }
+ size_t bits_per_sample() const { return kBitsPerSample; }
+ void reset(int sample_rate, size_t channels, double buffer_duration) {
+ reset(sample_rate, channels,
+ static_cast<size_t>(sample_rate * buffer_duration + 0.5));
+ }
+ void reset(int sample_rate, size_t channels) {
+ reset(sample_rate, channels, static_cast<size_t>(0));
+ }
+ int sample_rate() const { return sample_rate_; }
+ size_t channels() const { return channels_; }
+ size_t frames_per_buffer() const { return frames_per_buffer_; }
+ size_t frames_per_10ms_buffer() const { return frames_per_10ms_buffer_; }
+ size_t GetBytesPerFrame() const { return channels_ * kBitsPerSample / 8; }
+ size_t GetBytesPerBuffer() const {
+ return frames_per_buffer_ * GetBytesPerFrame();
+ }
+ // The WebRTC audio device buffer (ADB) only requires that the sample rate
+ // and number of channels are configured. Hence, to be "valid", only these
+ // two attributes must be set.
+ bool is_valid() const { return ((sample_rate_ > 0) && (channels_ > 0)); }
+ // Most platforms also require that a native buffer size is defined.
+ // An audio parameter instance is considered to be "complete" if it is both
+ // "valid" (can be used by the ADB) and also has a native frame size.
+ bool is_complete() const { return (is_valid() && (frames_per_buffer_ > 0)); }
+ size_t GetBytesPer10msBuffer() const {
+ return frames_per_10ms_buffer_ * GetBytesPerFrame();
+ }
+ double GetBufferSizeInMilliseconds() const {
+ if (sample_rate_ == 0)
+ return 0.0;
+ return frames_per_buffer_ / (sample_rate_ / 1000.0);
+ }
+ double GetBufferSizeInSeconds() const {
+ if (sample_rate_ == 0)
+ return 0.0;
+ return static_cast<double>(frames_per_buffer_) / (sample_rate_);
+ }
+ std::string ToString() const {
+ char ss_buf[1024];
+ rtc::SimpleStringBuilder ss(ss_buf);
+ ss << "AudioParameters: ";
+ ss << "sample_rate=" << sample_rate() << ", channels=" << channels();
+ ss << ", frames_per_buffer=" << frames_per_buffer();
+ ss << ", frames_per_10ms_buffer=" << frames_per_10ms_buffer();
+ ss << ", bytes_per_frame=" << GetBytesPerFrame();
+ ss << ", bytes_per_buffer=" << GetBytesPerBuffer();
+ ss << ", bytes_per_10ms_buffer=" << GetBytesPer10msBuffer();
+ ss << ", size_in_ms=" << GetBufferSizeInMilliseconds();
+ return ss.str();
+ }
+
+ private:
+ int sample_rate_;
+ size_t channels_;
+ size_t frames_per_buffer_;
+ size_t frames_per_10ms_buffer_;
+};
+
+} // namespace webrtc
+
+#endif // API_AUDIO_AUDIO_DEVICE_DEFINES_H_
diff --git a/api/create_peerconnection_factory.cc b/api/create_peerconnection_factory.cc
index 42bfa60..46105a2 100644
--- a/api/create_peerconnection_factory.cc
+++ b/api/create_peerconnection_factory.cc
@@ -13,6 +13,7 @@
#include <memory>
#include <utility>
+#include "api/audio/audio_device.h"
#include "api/audio/audio_processing.h"
#include "api/enable_media.h"
#include "api/peer_connection_interface.h"
@@ -20,7 +21,6 @@
#include "api/scoped_refptr.h"
#include "api/task_queue/default_task_queue_factory.h"
#include "api/transport/field_trial_based_config.h"
-#include "modules/audio_device/include/audio_device.h"
#include "rtc_base/thread.h"
namespace webrtc {
diff --git a/api/create_peerconnection_factory.h b/api/create_peerconnection_factory.h
index adb4de9..18febb6 100644
--- a/api/create_peerconnection_factory.h
+++ b/api/create_peerconnection_factory.h
@@ -13,6 +13,7 @@
#include <memory>
+#include "api/audio/audio_device.h"
#include "api/audio/audio_mixer.h"
#include "api/audio/audio_processing.h"
#include "api/audio_codecs/audio_decoder_factory.h"
@@ -33,7 +34,6 @@
namespace webrtc {
-class AudioDeviceModule;
class AudioFrameProcessor;
// Create a new instance of PeerConnectionFactoryInterface with optional video
diff --git a/api/voip/BUILD.gn b/api/voip/BUILD.gn
index f175c30..7cf9a7a 100644
--- a/api/voip/BUILD.gn
+++ b/api/voip/BUILD.gn
@@ -40,8 +40,8 @@
":voip_api",
"..:scoped_refptr",
"../../audio/voip:voip_core",
- "../../modules/audio_device:audio_device_api",
"../../rtc_base:logging",
+ "../audio:audio_device",
"../audio:audio_processing",
"../audio_codecs:audio_codecs_api",
"../task_queue",
diff --git a/api/voip/voip_engine_factory.h b/api/voip/voip_engine_factory.h
index 3abced7..1972fcd 100644
--- a/api/voip/voip_engine_factory.h
+++ b/api/voip/voip_engine_factory.h
@@ -13,13 +13,13 @@
#include <memory>
+#include "api/audio/audio_device.h"
#include "api/audio/audio_processing.h"
#include "api/audio_codecs/audio_decoder_factory.h"
#include "api/audio_codecs/audio_encoder_factory.h"
#include "api/scoped_refptr.h"
#include "api/task_queue/task_queue_factory.h"
#include "api/voip/voip_engine.h"
-#include "modules/audio_device/include/audio_device.h"
namespace webrtc {