Move functionality out from AudioFrame and into AudioFrameOperations.
This CL is in preparation to move the AudioFrame into webrtc/api. The
AudioFrame is a POD type used for representing 10ms of audio. It
appears as a parameter and return value of interfaces being migrated
to webrtc/api, in particular AudioMixer.
Here, methods operator+=, operator>>=, Mute are
moved into a new target webrtc/audio/utility/audio_frame_operations,
and dependencies are changed to use
the new versions. The old AudioFrame methods are marked deprecated.
The audio frame utilities in webrtc/modules/utility:audio_frame_operations
are also moved to the new location.
TBR=kjellander@webrtc.org
BUG=webrtc:6548
NOPRESUBMIT=True
Review-Url: https://codereview.webrtc.org/2424173003
Cr-Commit-Position: refs/heads/master@{#15413}
diff --git a/webrtc/audio/BUILD.gn b/webrtc/audio/BUILD.gn
index 5995f59..bde074d 100644
--- a/webrtc/audio/BUILD.gn
+++ b/webrtc/audio/BUILD.gn
@@ -37,11 +37,6 @@
"../modules/audio_processing",
"../system_wrappers",
"../voice_engine",
- "utility", # Bogus dep, needed for landing
- # codereview.webrtc.org/2424173003 without breaking
- # internal projects. See
- # bugs.webrtc.org/6548. TODO(aleloi): remove dependency
- # when codereview.webrtc.org/2424173003 has landed.
]
}
if (rtc_include_tests) {
@@ -51,6 +46,7 @@
"audio_receive_stream_unittest.cc",
"audio_send_stream_unittest.cc",
"audio_state_unittest.cc",
+ "utility/audio_frame_operations_unittest.cc",
]
deps = [
":audio",
@@ -59,6 +55,8 @@
"../modules/audio_device:mock_audio_device",
"../modules/audio_mixer:audio_mixer_impl",
"../test:test_common",
+ "../test:test_support",
+ "utility:audio_frame_operations",
"//testing/gmock",
"//testing/gtest",
]
diff --git a/webrtc/audio/utility/BUILD.gn b/webrtc/audio/utility/BUILD.gn
index f6c62d5..d85b30d 100644
--- a/webrtc/audio/utility/BUILD.gn
+++ b/webrtc/audio/utility/BUILD.gn
@@ -8,4 +8,20 @@
import("../../build/webrtc.gni")
group("utility") {
+ public_deps = [
+ ":audio_frame_operations",
+ ]
+}
+
+rtc_static_library("audio_frame_operations") {
+ sources = [
+ "audio_frame_operations.cc",
+ "audio_frame_operations.h",
+ ]
+
+ deps = [
+ "../..:webrtc_common",
+ "../../base:rtc_base_approved",
+ "../../modules/audio_coding:audio_format_conversion",
+ ]
}
diff --git a/webrtc/modules/utility/source/audio_frame_operations.cc b/webrtc/audio/utility/audio_frame_operations.cc
similarity index 64%
rename from webrtc/modules/utility/source/audio_frame_operations.cc
rename to webrtc/audio/utility/audio_frame_operations.cc
index 102407d..6fcb84e 100644
--- a/webrtc/modules/utility/source/audio_frame_operations.cc
+++ b/webrtc/audio/utility/audio_frame_operations.cc
@@ -8,18 +8,65 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/include/module_common_types.h"
-#include "webrtc/modules/utility/include/audio_frame_operations.h"
+#include "webrtc/audio/utility/audio_frame_operations.h"
+
+#include <algorithm>
+
#include "webrtc/base/checks.h"
+#include "webrtc/base/safe_conversions.h"
+#include "webrtc/modules/include/module_common_types.h"
namespace webrtc {
-namespace {
+namespace {
// 2.7ms @ 48kHz, 4ms @ 32kHz, 8ms @ 16kHz.
const size_t kMuteFadeFrames = 128;
const float kMuteFadeInc = 1.0f / kMuteFadeFrames;
-} // namespace {
+} // namespace
+
+void AudioFrameOperations::Add(const AudioFrame& frame_to_add,
+ AudioFrame* result_frame) {
+ // Sanity check.
+ RTC_DCHECK(result_frame);
+ RTC_DCHECK_GT(result_frame->num_channels_, 0);
+ RTC_DCHECK_EQ(result_frame->num_channels_, frame_to_add.num_channels_);
+
+ bool no_previous_data = false;
+ if (result_frame->samples_per_channel_ != frame_to_add.samples_per_channel_) {
+ // Special case we have no data to start with.
+ RTC_DCHECK_EQ(result_frame->samples_per_channel_, 0);
+ result_frame->samples_per_channel_ = frame_to_add.samples_per_channel_;
+ no_previous_data = true;
+ }
+
+ if (result_frame->vad_activity_ == AudioFrame::kVadActive ||
+ frame_to_add.vad_activity_ == AudioFrame::kVadActive) {
+ result_frame->vad_activity_ = AudioFrame::kVadActive;
+ } else if (result_frame->vad_activity_ == AudioFrame::kVadUnknown ||
+ frame_to_add.vad_activity_ == AudioFrame::kVadUnknown) {
+ result_frame->vad_activity_ = AudioFrame::kVadUnknown;
+ }
+
+ if (result_frame->speech_type_ != frame_to_add.speech_type_)
+ result_frame->speech_type_ = AudioFrame::kUndefined;
+
+ if (no_previous_data) {
+ std::copy(frame_to_add.data_, frame_to_add.data_ +
+ frame_to_add.samples_per_channel_ *
+ result_frame->num_channels_,
+ result_frame->data_);
+ } else {
+ for (size_t i = 0;
+ i < result_frame->samples_per_channel_ * result_frame->num_channels_;
+ i++) {
+ const int32_t wrap_guard = static_cast<int32_t>(result_frame->data_[i]) +
+ static_cast<int32_t>(frame_to_add.data_[i]);
+ result_frame->data_[i] = rtc::saturated_cast<int16_t>(wrap_guard);
+ }
+ }
+ return;
+}
void AudioFrameOperations::MonoToStereo(const int16_t* src_audio,
size_t samples_per_channel,
@@ -68,7 +115,10 @@
}
void AudioFrameOperations::SwapStereoChannels(AudioFrame* frame) {
- if (frame->num_channels_ != 2) return;
+ RTC_DCHECK(frame);
+ if (frame->num_channels_ != 2) {
+ return;
+ }
for (size_t i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
int16_t temp_data = frame->data_[i];
@@ -77,7 +127,8 @@
}
}
-void AudioFrameOperations::Mute(AudioFrame* frame, bool previous_frame_muted,
+void AudioFrameOperations::Mute(AudioFrame* frame,
+ bool previous_frame_muted,
bool current_frame_muted) {
RTC_DCHECK(frame);
if (!previous_frame_muted && !current_frame_muted) {
@@ -125,14 +176,30 @@
}
}
+void AudioFrameOperations::Mute(AudioFrame* frame) {
+ Mute(frame, true, true);
+}
+
+void AudioFrameOperations::ApplyHalfGain(AudioFrame* frame) {
+ RTC_DCHECK(frame);
+ RTC_DCHECK_GT(frame->num_channels_, 0);
+ if (frame->num_channels_ < 1) {
+ return;
+ }
+
+ for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_;
+ i++) {
+ frame->data_[i] = frame->data_[i] >> 1;
+ }
+}
+
int AudioFrameOperations::Scale(float left, float right, AudioFrame& frame) {
if (frame.num_channels_ != 2) {
return -1;
}
for (size_t i = 0; i < frame.samples_per_channel_; i++) {
- frame.data_[2 * i] =
- static_cast<int16_t>(left * frame.data_[2 * i]);
+ frame.data_[2 * i] = static_cast<int16_t>(left * frame.data_[2 * i]);
frame.data_[2 * i + 1] =
static_cast<int16_t>(right * frame.data_[2 * i + 1]);
}
@@ -156,5 +223,4 @@
}
return 0;
}
-
} // namespace webrtc
diff --git a/webrtc/audio/utility/audio_frame_operations.h b/webrtc/audio/utility/audio_frame_operations.h
new file mode 100644
index 0000000..d16b163
--- /dev/null
+++ b/webrtc/audio/utility/audio_frame_operations.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_AUDIO_UTILITY_AUDIO_FRAME_OPERATIONS_H_
+#define WEBRTC_AUDIO_UTILITY_AUDIO_FRAME_OPERATIONS_H_
+
+#include <stddef.h>
+
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+class AudioFrame;
+
+// TODO(andrew): consolidate this with utility.h and audio_frame_manipulator.h.
+// Change reference parameters to pointers. Consider using a namespace rather
+// than a class.
+class AudioFrameOperations {
+ public:
+ // Add samples in |frame_to_add| with samples in |result_frame|
+ // putting the results in |results_frame|. The fields
+ // |vad_activity_| and |speech_type_| of the result frame are
+ // updated. If |result_frame| is empty (|samples_per_channel_|==0),
+ // the samples in |frame_to_add| are added to it. The number of
+ // channels and number of samples per channel must match except when
+ // |result_frame| is empty.
+ static void Add(const AudioFrame& frame_to_add, AudioFrame* result_frame);
+
+ // Upmixes mono |src_audio| to stereo |dst_audio|. This is an out-of-place
+ // operation, meaning src_audio and dst_audio must point to different
+ // buffers. It is the caller's responsibility to ensure that |dst_audio| is
+ // sufficiently large.
+ static void MonoToStereo(const int16_t* src_audio,
+ size_t samples_per_channel,
+ int16_t* dst_audio);
+ // |frame.num_channels_| will be updated. This version checks for sufficient
+ // buffer size and that |num_channels_| is mono.
+ static int MonoToStereo(AudioFrame* frame);
+
+ // Downmixes stereo |src_audio| to mono |dst_audio|. This is an in-place
+ // operation, meaning |src_audio| and |dst_audio| may point to the same
+ // buffer.
+ static void StereoToMono(const int16_t* src_audio,
+ size_t samples_per_channel,
+ int16_t* dst_audio);
+ // |frame.num_channels_| will be updated. This version checks that
+ // |num_channels_| is stereo.
+ static int StereoToMono(AudioFrame* frame);
+
+ // Swap the left and right channels of |frame|. Fails silently if |frame| is
+ // not stereo.
+ static void SwapStereoChannels(AudioFrame* frame);
+
+ // Conditionally zero out contents of |frame| for implementing audio mute:
+ // |previous_frame_muted| && |current_frame_muted| - Zero out whole frame.
+ // |previous_frame_muted| && !|current_frame_muted| - Fade-in at frame start.
+ // !|previous_frame_muted| && |current_frame_muted| - Fade-out at frame end.
+ // !|previous_frame_muted| && !|current_frame_muted| - Leave frame untouched.
+ static void Mute(AudioFrame* frame,
+ bool previous_frame_muted,
+ bool current_frame_muted);
+
+ // Zero out contents of frame.
+ static void Mute(AudioFrame* frame);
+
+ // Halve samples in |frame|.
+ static void ApplyHalfGain(AudioFrame* frame);
+
+ static int Scale(float left, float right, AudioFrame& frame);
+
+ static int ScaleWithSat(float scale, AudioFrame& frame);
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_AUDIO_UTILITY_AUDIO_FRAME_OPERATIONS_H_
diff --git a/webrtc/modules/utility/source/audio_frame_operations_unittest.cc b/webrtc/audio/utility/audio_frame_operations_unittest.cc
similarity index 93%
rename from webrtc/modules/utility/source/audio_frame_operations_unittest.cc
rename to webrtc/audio/utility/audio_frame_operations_unittest.cc
index 8f83e05..36377bd 100644
--- a/webrtc/modules/utility/source/audio_frame_operations_unittest.cc
+++ b/webrtc/audio/utility/audio_frame_operations_unittest.cc
@@ -8,9 +8,9 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include "webrtc/audio/utility/audio_frame_operations.h"
#include "webrtc/base/checks.h"
#include "webrtc/modules/include/module_common_types.h"
-#include "webrtc/modules/utility/include/audio_frame_operations.h"
#include "webrtc/test/gtest.h"
namespace webrtc {
@@ -365,5 +365,25 @@
VerifyFramesAreEqual(scaled_frame, frame_);
}
+TEST_F(AudioFrameOperationsTest, AddingXToEmptyGivesX) {
+ // When samples_per_channel_ is 0, the frame counts as empty and zero.
+ AudioFrame frame_to_add_to;
+ frame_to_add_to.samples_per_channel_ = 0;
+ frame_to_add_to.num_channels_ = frame_.num_channels_;
+
+ AudioFrameOperations::Add(frame_, &frame_to_add_to);
+ VerifyFramesAreEqual(frame_, frame_to_add_to);
+}
+
+TEST_F(AudioFrameOperationsTest, AddingTwoFramesProducesTheirSum) {
+ AudioFrame frame_to_add_to;
+ frame_to_add_to.samples_per_channel_ = frame_.samples_per_channel_;
+ frame_to_add_to.num_channels_ = frame_.num_channels_;
+ SetFrameData(&frame_to_add_to, 1000);
+
+ AudioFrameOperations::Add(frame_, &frame_to_add_to);
+ SetFrameData(&frame_, frame_.data_[0] + 1000);
+ VerifyFramesAreEqual(frame_, frame_to_add_to);
+}
} // namespace
} // namespace webrtc
diff --git a/webrtc/modules/BUILD.gn b/webrtc/modules/BUILD.gn
index dcfd5fa..e853ea4 100644
--- a/webrtc/modules/BUILD.gn
+++ b/webrtc/modules/BUILD.gn
@@ -483,7 +483,6 @@
"rtp_rtcp/test/testAPI/test_api_audio.cc",
"rtp_rtcp/test/testAPI/test_api_rtcp.cc",
"rtp_rtcp/test/testAPI/test_api_video.cc",
- "utility/source/audio_frame_operations_unittest.cc",
"utility/source/file_player_unittests.cc",
"utility/source/process_thread_impl_unittest.cc",
"video_coding/codecs/test/packet_manipulator_unittest.cc",
diff --git a/webrtc/modules/audio_conference_mixer/BUILD.gn b/webrtc/modules/audio_conference_mixer/BUILD.gn
index 58228b5..61faf45 100644
--- a/webrtc/modules/audio_conference_mixer/BUILD.gn
+++ b/webrtc/modules/audio_conference_mixer/BUILD.gn
@@ -39,8 +39,8 @@
}
deps = [
+ "../../audio/utility:audio_frame_operations",
"../../system_wrappers",
"../audio_processing",
- "../utility",
]
}
diff --git a/webrtc/modules/audio_conference_mixer/DEPS b/webrtc/modules/audio_conference_mixer/DEPS
index 2805958..0b95ab7 100644
--- a/webrtc/modules/audio_conference_mixer/DEPS
+++ b/webrtc/modules/audio_conference_mixer/DEPS
@@ -1,4 +1,5 @@
include_rules = [
+ "+webrtc/audio/utility/audio_frame_operations.h",
"+webrtc/base",
"+webrtc/system_wrappers",
]
diff --git a/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc b/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc
index dce3d0b..af91c69 100644
--- a/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc
+++ b/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc
@@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include "webrtc/audio/utility/audio_frame_operations.h"
#include "webrtc/modules/audio_conference_mixer/include/audio_conference_mixer_defines.h"
#include "webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h"
#include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
-#include "webrtc/modules/utility/include/audio_frame_operations.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/trace.h"
@@ -38,9 +38,9 @@
void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) {
assert(mixed_frame->num_channels_ >= frame->num_channels_);
if (use_limiter) {
- // Divide by two to avoid saturation in the mixing.
- // This is only meaningful if the limiter will be used.
- *frame >>= 1;
+ // This is to avoid saturation in the mixing. It is only
+ // meaningful if the limiter will be used.
+ AudioFrameOperations::ApplyHalfGain(frame);
}
if (mixed_frame->num_channels_ > frame->num_channels_) {
// We only support mono-to-stereo.
@@ -49,7 +49,7 @@
AudioFrameOperations::MonoToStereo(frame);
}
- *mixed_frame += *frame;
+ AudioFrameOperations::Add(*frame, mixed_frame);
}
// Return the max number of channels from a |list| composed of AudioFrames.
@@ -303,7 +303,7 @@
if(mixedAudio->samples_per_channel_ == 0) {
// Nothing was mixed, set the audio samples to silence.
mixedAudio->samples_per_channel_ = _sampleSize;
- mixedAudio->Mute();
+ AudioFrameOperations::Mute(mixedAudio);
} else {
// Only call the limiter if we have something to mix.
LimitMixedAudio(mixedAudio);
@@ -922,7 +922,7 @@
//
// Instead we double the frame (with addition since left-shifting a
// negative value is undefined).
- *mixedAudio += *mixedAudio;
+ AudioFrameOperations::Add(*mixedAudio, mixedAudio);
if(error != _limiter->kNoError) {
WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
diff --git a/webrtc/modules/audio_mixer/BUILD.gn b/webrtc/modules/audio_mixer/BUILD.gn
index addffdd..b30c69b 100644
--- a/webrtc/modules/audio_mixer/BUILD.gn
+++ b/webrtc/modules/audio_mixer/BUILD.gn
@@ -32,10 +32,10 @@
deps = [
":audio_frame_manipulator",
"../..:webrtc_common",
+ "../../audio/utility:audio_frame_operations",
"../../base:rtc_base_approved",
- "../../modules/audio_processing",
- "../../modules/utility",
"../../system_wrappers",
+ "../audio_processing",
]
}
@@ -51,7 +51,7 @@
]
deps = [
+ "../../audio/utility",
"../../base:rtc_base_approved",
- "../../modules/utility",
]
}
diff --git a/webrtc/modules/audio_mixer/DEPS b/webrtc/modules/audio_mixer/DEPS
index 2290dc6..647ba2a 100644
--- a/webrtc/modules/audio_mixer/DEPS
+++ b/webrtc/modules/audio_mixer/DEPS
@@ -1,4 +1,5 @@
include_rules = [
+ "+webrtc/audio/utility/audio_frame_operations.h",
"+webrtc/base",
"+webrtc/call",
"+webrtc/common_audio",
diff --git a/webrtc/modules/audio_mixer/audio_frame_manipulator.cc b/webrtc/modules/audio_mixer/audio_frame_manipulator.cc
index 2550274..ff4c31e 100644
--- a/webrtc/modules/audio_mixer/audio_frame_manipulator.cc
+++ b/webrtc/modules/audio_mixer/audio_frame_manipulator.cc
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include "webrtc/audio/utility/audio_frame_operations.h"
#include "webrtc/base/checks.h"
#include "webrtc/modules/audio_mixer/audio_frame_manipulator.h"
#include "webrtc/modules/include/module_common_types.h"
-#include "webrtc/modules/utility/include/audio_frame_operations.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_mixer/audio_mixer_impl.cc b/webrtc/modules/audio_mixer/audio_mixer_impl.cc
index 4bd96cd..70363c0 100644
--- a/webrtc/modules/audio_mixer/audio_mixer_impl.cc
+++ b/webrtc/modules/audio_mixer/audio_mixer_impl.cc
@@ -14,9 +14,9 @@
#include <functional>
#include <utility>
+#include "webrtc/audio/utility/audio_frame_operations.h"
#include "webrtc/base/logging.h"
#include "webrtc/modules/audio_mixer/audio_frame_manipulator.h"
-#include "webrtc/modules/utility/include/audio_frame_operations.h"
namespace webrtc {
namespace {
@@ -106,12 +106,12 @@
// Mix |f.frame| into |mixed_audio|, with saturation protection.
// These effect is applied to |f.frame| itself prior to mixing.
if (use_limiter) {
- // Divide by two to avoid saturation in the mixing.
- // This is only meaningful if the limiter will be used.
- *frame >>= 1;
+ // This is to avoid saturation in the mixing. It is only
+ // meaningful if the limiter will be used.
+ AudioFrameOperations::ApplyHalfGain(frame);
}
RTC_DCHECK_EQ(frame->num_channels_, mixed_audio->num_channels_);
- *mixed_audio += *frame;
+ AudioFrameOperations::Add(*frame, mixed_audio);
}
return 0;
}
@@ -250,7 +250,7 @@
if (audio_frame_for_mixing->samples_per_channel_ == 0) {
// Nothing was mixed, set the audio samples to silence.
audio_frame_for_mixing->samples_per_channel_ = sample_size_;
- audio_frame_for_mixing->Mute();
+ AudioFrameOperations::Mute(audio_frame_for_mixing);
} else {
// Only call the limiter if we have something to mix.
LimitMixedAudio(audio_frame_for_mixing);
@@ -357,7 +357,7 @@
//
// Instead we double the frame (with addition since left-shifting a
// negative value is undefined).
- *mixed_audio += *mixed_audio;
+ AudioFrameOperations::Add(*mixed_audio, mixed_audio);
if (error != limiter_->kNoError) {
LOG_F(LS_ERROR) << "Error from AudioProcessing: " << error;
diff --git a/webrtc/modules/audio_processing/BUILD.gn b/webrtc/modules/audio_processing/BUILD.gn
index 9a5daac..36c7667 100644
--- a/webrtc/modules/audio_processing/BUILD.gn
+++ b/webrtc/modules/audio_processing/BUILD.gn
@@ -165,6 +165,7 @@
defines = []
deps = [
"../..:webrtc_common",
+ "../../audio/utility:audio_frame_operations",
"../audio_coding:isac",
]
diff --git a/webrtc/modules/audio_processing/DEPS b/webrtc/modules/audio_processing/DEPS
index e9ac967..bfaaaf5 100644
--- a/webrtc/modules/audio_processing/DEPS
+++ b/webrtc/modules/audio_processing/DEPS
@@ -1,4 +1,5 @@
include_rules = [
+ "+webrtc/audio/utility/audio_frame_operations.h",
"+webrtc/base",
"+webrtc/common_audio",
"+webrtc/system_wrappers",
diff --git a/webrtc/modules/audio_processing/vad/standalone_vad.cc b/webrtc/modules/audio_processing/vad/standalone_vad.cc
index 8636eb4..95d4982 100644
--- a/webrtc/modules/audio_processing/vad/standalone_vad.cc
+++ b/webrtc/modules/audio_processing/vad/standalone_vad.cc
@@ -10,9 +10,9 @@
#include "webrtc/modules/audio_processing/vad/standalone_vad.h"
+#include "webrtc/audio/utility/audio_frame_operations.h"
#include "webrtc/base/checks.h"
#include "webrtc/modules/include/module_common_types.h"
-#include "webrtc/modules/utility/include/audio_frame_operations.h"
#include "webrtc/typedefs.h"
namespace webrtc {
diff --git a/webrtc/modules/include/module_common_types.h b/webrtc/modules/include/module_common_types.h
index a5ea5c8..5de5eb7 100644
--- a/webrtc/modules/include/module_common_types.h
+++ b/webrtc/modules/include/module_common_types.h
@@ -18,6 +18,8 @@
#include <limits>
#include "webrtc/base/constructormagic.h"
+#include "webrtc/base/deprecation.h"
+#include "webrtc/base/safe_conversions.h"
#include "webrtc/common_types.h"
#include "webrtc/common_video/rotation.h"
#include "webrtc/typedefs.h"
@@ -520,8 +522,6 @@
*
* - Stereo data is interleaved starting with the left channel.
*
- * - The +operator assume that you would never add exactly opposite frames when
- * deciding the resulting state. To do this use the -operator.
*/
class AudioFrame {
public:
@@ -556,26 +556,29 @@
void CopyFrom(const AudioFrame& src);
- void Mute();
-
- AudioFrame& operator>>=(const int rhs);
- AudioFrame& operator+=(const AudioFrame& rhs);
+ // These methods are deprecated. Use the functions in
+ // webrtc/audio/utility instead. These methods will exists for a
+ // short period of time until webrtc clients have updated. See
+ // webrtc:6548 for details.
+ RTC_DEPRECATED void Mute();
+ RTC_DEPRECATED AudioFrame& operator>>=(const int rhs);
+ RTC_DEPRECATED AudioFrame& operator+=(const AudioFrame& rhs);
int id_;
// RTP timestamp of the first sample in the AudioFrame.
- uint32_t timestamp_;
+ uint32_t timestamp_ = 0;
// Time since the first frame in milliseconds.
// -1 represents an uninitialized value.
- int64_t elapsed_time_ms_;
+ int64_t elapsed_time_ms_ = -1;
// NTP time of the estimated capture time in local timebase in milliseconds.
// -1 represents an uninitialized value.
- int64_t ntp_time_ms_;
+ int64_t ntp_time_ms_ = -1;
int16_t data_[kMaxDataSizeSamples];
- size_t samples_per_channel_;
- int sample_rate_hz_;
- size_t num_channels_;
- SpeechType speech_type_;
- VADActivity vad_activity_;
+ size_t samples_per_channel_ = 0;
+ int sample_rate_hz_ = 0;
+ size_t num_channels_ = 0;
+ SpeechType speech_type_ = kUndefined;
+ VADActivity vad_activity_ = kVadUnknown;
private:
RTC_DISALLOW_COPY_AND_ASSIGN(AudioFrame);
@@ -585,7 +588,6 @@
// See https://bugs.chromium.org/p/webrtc/issues/detail?id=5647.
inline AudioFrame::AudioFrame()
: data_() {
- Reset();
}
inline void AudioFrame::Reset() {
@@ -659,18 +661,6 @@
return *this;
}
-namespace {
-inline int16_t ClampToInt16(int32_t input) {
- if (input < -0x00008000) {
- return -0x8000;
- } else if (input > 0x00007FFF) {
- return 0x7FFF;
- } else {
- return static_cast<int16_t>(input);
- }
-}
-}
-
inline AudioFrame& AudioFrame::operator+=(const AudioFrame& rhs) {
// Sanity check
assert((num_channels_ > 0) && (num_channels_ < 3));
@@ -704,7 +694,7 @@
for (size_t i = 0; i < samples_per_channel_ * num_channels_; i++) {
int32_t wrap_guard =
static_cast<int32_t>(data_[i]) + static_cast<int32_t>(rhs.data_[i]);
- data_[i] = ClampToInt16(wrap_guard);
+ data_[i] = rtc::saturated_cast<int16_t>(wrap_guard);
}
}
return *this;
diff --git a/webrtc/modules/module_common_types_unittest.cc b/webrtc/modules/module_common_types_unittest.cc
index 148263c..159e26b 100644
--- a/webrtc/modules/module_common_types_unittest.cc
+++ b/webrtc/modules/module_common_types_unittest.cc
@@ -112,15 +112,6 @@
EXPECT_EQ(0x0000FFFFu, LatestTimestamp(0xFFFF0000, 0x0000FFFF));
}
-TEST(ClampToInt16, TestCases) {
- EXPECT_EQ(0x0000, ClampToInt16(0x00000000));
- EXPECT_EQ(0x0001, ClampToInt16(0x00000001));
- EXPECT_EQ(0x7FFF, ClampToInt16(0x00007FFF));
- EXPECT_EQ(0x7FFF, ClampToInt16(0x7FFFFFFF));
- EXPECT_EQ(-0x0001, ClampToInt16(-0x00000001));
- EXPECT_EQ(-0x8000, ClampToInt16(-0x8000));
- EXPECT_EQ(-0x8000, ClampToInt16(-0x7FFFFFFF));
-}
TEST(SequenceNumberUnwrapper, Limits) {
SequenceNumberUnwrapper unwrapper;
diff --git a/webrtc/modules/utility/BUILD.gn b/webrtc/modules/utility/BUILD.gn
index b8f5f2e..5888304 100644
--- a/webrtc/modules/utility/BUILD.gn
+++ b/webrtc/modules/utility/BUILD.gn
@@ -16,7 +16,6 @@
"include/helpers_android.h",
"include/jvm_android.h",
"include/process_thread.h",
- "source/audio_frame_operations.cc",
"source/coder.cc",
"source/coder.h",
"source/file_player.cc",
@@ -38,10 +37,13 @@
deps = [
"../..:webrtc_common",
+ "../../audio/utility:audio_frame_operations",
"../../base:rtc_task_queue",
"../../common_audio",
"../../system_wrappers",
"../audio_coding",
+ "../audio_coding:builtin_audio_decoder_factory",
+ "../audio_coding:rent_a_codec",
"../media_file",
]
}
diff --git a/webrtc/modules/utility/DEPS b/webrtc/modules/utility/DEPS
index 1a2885b..d07d352 100644
--- a/webrtc/modules/utility/DEPS
+++ b/webrtc/modules/utility/DEPS
@@ -1,4 +1,7 @@
include_rules = [
+ # TODO(aleloi): remove when clients update. See
+ # bugs.webrtc.org/6548.
+ "+webrtc/audio/utility/audio_frame_operations.h",
"+webrtc/base",
"+webrtc/common_audio",
"+webrtc/common_video",
diff --git a/webrtc/modules/utility/include/audio_frame_operations.h b/webrtc/modules/utility/include/audio_frame_operations.h
index e12e3e5..4bf73df6 100644
--- a/webrtc/modules/utility/include/audio_frame_operations.h
+++ b/webrtc/modules/utility/include/audio_frame_operations.h
@@ -10,54 +10,11 @@
#ifndef WEBRTC_MODULES_UTILITY_INCLUDE_AUDIO_FRAME_OPERATIONS_H_
#define WEBRTC_MODULES_UTILITY_INCLUDE_AUDIO_FRAME_OPERATIONS_H_
+// The contents of this file have moved to
+// //webrtc/audio/utility. This file is deprecated.
-#include "webrtc/typedefs.h"
-
-namespace webrtc {
-
-class AudioFrame;
-
-// TODO(andrew): consolidate this with utility.h and audio_frame_manipulator.h.
-// Change reference parameters to pointers. Consider using a namespace rather
-// than a class.
-class AudioFrameOperations {
- public:
- // Upmixes mono |src_audio| to stereo |dst_audio|. This is an out-of-place
- // operation, meaning src_audio and dst_audio must point to different
- // buffers. It is the caller's responsibility to ensure that |dst_audio| is
- // sufficiently large.
- static void MonoToStereo(const int16_t* src_audio, size_t samples_per_channel,
- int16_t* dst_audio);
- // |frame.num_channels_| will be updated. This version checks for sufficient
- // buffer size and that |num_channels_| is mono.
- static int MonoToStereo(AudioFrame* frame);
-
- // Downmixes stereo |src_audio| to mono |dst_audio|. This is an in-place
- // operation, meaning |src_audio| and |dst_audio| may point to the same
- // buffer.
- static void StereoToMono(const int16_t* src_audio, size_t samples_per_channel,
- int16_t* dst_audio);
- // |frame.num_channels_| will be updated. This version checks that
- // |num_channels_| is stereo.
- static int StereoToMono(AudioFrame* frame);
-
- // Swap the left and right channels of |frame|. Fails silently if |frame| is
- // not stereo.
- static void SwapStereoChannels(AudioFrame* frame);
-
- // Conditionally zero out contents of |frame| for implementing audio mute:
- // |previous_frame_muted| && |current_frame_muted| - Zero out whole frame.
- // |previous_frame_muted| && !|current_frame_muted| - Fade-in at frame start.
- // !|previous_frame_muted| && |current_frame_muted| - Fade-out at frame end.
- // !|previous_frame_muted| && !|current_frame_muted| - Leave frame untouched.
- static void Mute(AudioFrame* frame, bool previous_frame_muted,
- bool current_frame_muted);
-
- static int Scale(float left, float right, AudioFrame& frame);
-
- static int ScaleWithSat(float scale, AudioFrame& frame);
-};
-
-} // namespace webrtc
+// TODO(aleloi): Remove this file when clients have updated their
+// includes. See bugs.webrtc.org/6548.
+#include "webrtc/audio/utility/audio_frame_operations.h"
#endif // #ifndef WEBRTC_MODULES_UTILITY_INCLUDE_AUDIO_FRAME_OPERATIONS_H_
diff --git a/webrtc/voice_engine/BUILD.gn b/webrtc/voice_engine/BUILD.gn
index f68b642..6413c18 100644
--- a/webrtc/voice_engine/BUILD.gn
+++ b/webrtc/voice_engine/BUILD.gn
@@ -90,6 +90,7 @@
"../api:audio_mixer_api",
"../api:call_api",
"../api:transport_api",
+ "../audio/utility:audio_frame_operations",
"../base:rtc_base_approved",
"../common_audio",
"../logging:rtc_event_log_api",
diff --git a/webrtc/voice_engine/DEPS b/webrtc/voice_engine/DEPS
index 18efd81..9f6d0d5 100644
--- a/webrtc/voice_engine/DEPS
+++ b/webrtc/voice_engine/DEPS
@@ -1,4 +1,5 @@
include_rules = [
+ "+webrtc/audio/utility/audio_frame_operations.h",
"+webrtc/base",
"+webrtc/call",
"+webrtc/common_audio",
diff --git a/webrtc/voice_engine/channel.cc b/webrtc/voice_engine/channel.cc
index 8d95c7b..dc93dc7 100644
--- a/webrtc/voice_engine/channel.cc
+++ b/webrtc/voice_engine/channel.cc
@@ -13,6 +13,7 @@
#include <algorithm>
#include <utility>
+#include "webrtc/audio/utility/audio_frame_operations.h"
#include "webrtc/base/array_view.h"
#include "webrtc/base/checks.h"
#include "webrtc/base/criticalsection.h"
@@ -32,7 +33,6 @@
#include "webrtc/modules/rtp_rtcp/include/rtp_payload_registry.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_receiver.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_receiver_strategy.h"
-#include "webrtc/modules/utility/include/audio_frame_operations.h"
#include "webrtc/modules/utility/include/process_thread.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/voice_engine/include/voe_external_media.h"
@@ -613,7 +613,7 @@
// TODO(henrik.lundin): We should be able to do better than this. But we
// will have to go through all the cases below where the audio samples may
// be used, and handle the muted case in some way.
- audioFrame->Mute();
+ AudioFrameOperations::Mute(audioFrame);
}
// Convert module ID to internal VoE channel ID
diff --git a/webrtc/voice_engine/output_mixer.cc b/webrtc/voice_engine/output_mixer.cc
index 28dd34e..32a7c4e 100644
--- a/webrtc/voice_engine/output_mixer.cc
+++ b/webrtc/voice_engine/output_mixer.cc
@@ -10,9 +10,9 @@
#include "webrtc/voice_engine/output_mixer.h"
+#include "webrtc/audio/utility/audio_frame_operations.h"
#include "webrtc/base/format_macros.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
-#include "webrtc/modules/utility/include/audio_frame_operations.h"
#include "webrtc/system_wrappers/include/file_wrapper.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/voice_engine/include/voe_external_media.h"
diff --git a/webrtc/voice_engine/transmit_mixer.cc b/webrtc/voice_engine/transmit_mixer.cc
index 834510d..8876e33 100644
--- a/webrtc/voice_engine/transmit_mixer.cc
+++ b/webrtc/voice_engine/transmit_mixer.cc
@@ -12,9 +12,9 @@
#include <memory>
+#include "webrtc/audio/utility/audio_frame_operations.h"
#include "webrtc/base/format_macros.h"
#include "webrtc/base/logging.h"
-#include "webrtc/modules/utility/include/audio_frame_operations.h"
#include "webrtc/system_wrappers/include/event_wrapper.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/voice_engine/channel.h"
diff --git a/webrtc/voice_engine/utility.cc b/webrtc/voice_engine/utility.cc
index 88c60fd..595c711 100644
--- a/webrtc/voice_engine/utility.cc
+++ b/webrtc/voice_engine/utility.cc
@@ -10,13 +10,13 @@
#include "webrtc/voice_engine/utility.h"
+#include "webrtc/audio/utility/audio_frame_operations.h"
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
#include "webrtc/common_audio/resampler/include/push_resampler.h"
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/common_types.h"
#include "webrtc/modules/include/module_common_types.h"
-#include "webrtc/modules/utility/include/audio_frame_operations.h"
#include "webrtc/voice_engine/voice_engine_defines.h"
namespace webrtc {
diff --git a/webrtc/voice_engine/voe_external_media_impl.cc b/webrtc/voice_engine/voe_external_media_impl.cc
index 6f02495..85305c8 100644
--- a/webrtc/voice_engine/voe_external_media_impl.cc
+++ b/webrtc/voice_engine/voe_external_media_impl.cc
@@ -10,6 +10,7 @@
#include "webrtc/voice_engine/voe_external_media_impl.h"
+#include "webrtc/audio/utility/audio_frame_operations.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/voice_engine/channel.h"
#include "webrtc/voice_engine/include/voe_errors.h"
@@ -149,7 +150,7 @@
desired_sample_rate_hz == 0 ? -1 : desired_sample_rate_hz;
auto ret = channelPtr->GetAudioFrameWithMuted(channel, frame);
if (ret == MixerParticipant::AudioFrameInfo::kMuted) {
- frame->Mute();
+ AudioFrameOperations::Mute(frame);
}
return ret == MixerParticipant::AudioFrameInfo::kError ? -1 : 0;
}