| /* |
| * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. |
| * |
| * Use of this source code is governed by a BSD-style license |
| * that can be found in the LICENSE file in the root of the source |
| * tree. An additional intellectual property rights grant can be found |
| * in the file PATENTS. All contributing project authors may |
| * be found in the AUTHORS file in the root of the source tree. |
| */ |
| |
| #include <string.h> |
| |
| #include <limits> |
| #include <memory> |
| #include <utility> |
| |
| #include "webrtc/api/audio/audio_mixer.h" |
| #include "webrtc/base/bind.h" |
| #include "webrtc/base/thread.h" |
| #include "webrtc/modules/audio_mixer/audio_mixer_impl.h" |
| #include "webrtc/test/gmock.h" |
| |
| using testing::_; |
| using testing::Exactly; |
| using testing::Invoke; |
| using testing::Return; |
| |
| namespace webrtc { |
| |
| namespace { |
| |
| constexpr int kDefaultSampleRateHz = 48000; |
| constexpr int kId = 1; |
| |
| // Utility function that resets the frame member variables with |
| // sensible defaults. |
| void ResetFrame(AudioFrame* frame) { |
| frame->id_ = kId; |
| frame->sample_rate_hz_ = kDefaultSampleRateHz; |
| frame->num_channels_ = 1; |
| |
| // Frame duration 10ms. |
| frame->samples_per_channel_ = kDefaultSampleRateHz / 100; |
| frame->vad_activity_ = AudioFrame::kVadActive; |
| frame->speech_type_ = AudioFrame::kNormalSpeech; |
| } |
| |
| AudioFrame frame_for_mixing; |
| |
| } // namespace |
| |
| class MockMixerAudioSource : public AudioMixer::Source { |
| public: |
| MockMixerAudioSource() |
| : fake_audio_frame_info_(AudioMixer::Source::AudioFrameInfo::kNormal) { |
| ON_CALL(*this, GetAudioFrameWithInfo(_, _)) |
| .WillByDefault( |
| Invoke(this, &MockMixerAudioSource::FakeAudioFrameWithInfo)); |
| } |
| |
| MOCK_METHOD2(GetAudioFrameWithInfo, |
| AudioFrameInfo(int sample_rate_hz, AudioFrame* audio_frame)); |
| |
| MOCK_METHOD0(Ssrc, int()); |
| |
| AudioFrame* fake_frame() { return &fake_frame_; } |
| AudioFrameInfo fake_info() { return fake_audio_frame_info_; } |
| void set_fake_info(const AudioFrameInfo audio_frame_info) { |
| fake_audio_frame_info_ = audio_frame_info; |
| } |
| |
| private: |
| AudioFrame fake_frame_; |
| AudioFrameInfo fake_audio_frame_info_; |
| AudioFrameInfo FakeAudioFrameWithInfo(int sample_rate_hz, |
| AudioFrame* audio_frame) { |
| audio_frame->CopyFrom(fake_frame_); |
| return fake_info(); |
| } |
| }; |
| |
| // Creates participants from |frames| and |frame_info| and adds them |
| // to the mixer. Compares mixed status with |expected_status| |
| void MixAndCompare( |
| const std::vector<AudioFrame>& frames, |
| const std::vector<AudioMixer::Source::AudioFrameInfo>& frame_info, |
| const std::vector<bool>& expected_status) { |
| int num_audio_sources = frames.size(); |
| RTC_DCHECK(frames.size() == frame_info.size()); |
| RTC_DCHECK(frame_info.size() == expected_status.size()); |
| |
| const auto mixer = AudioMixerImpl::Create(); |
| std::vector<MockMixerAudioSource> participants(num_audio_sources); |
| |
| for (int i = 0; i < num_audio_sources; i++) { |
| participants[i].fake_frame()->CopyFrom(frames[i]); |
| participants[i].set_fake_info(frame_info[i]); |
| } |
| |
| for (int i = 0; i < num_audio_sources; i++) { |
| EXPECT_TRUE(mixer->AddSource(&participants[i])); |
| EXPECT_CALL(participants[i], GetAudioFrameWithInfo(kDefaultSampleRateHz, _)) |
| .Times(Exactly(1)); |
| } |
| |
| mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); |
| |
| for (int i = 0; i < num_audio_sources; i++) { |
| EXPECT_EQ(expected_status[i], |
| mixer->GetAudioSourceMixabilityStatusForTest(&participants[i])) |
| << "Mixed status of AudioSource #" << i << " wrong."; |
| } |
| } |
| |
| TEST(AudioMixer, LargestEnergyVadActiveMixed) { |
| constexpr int kAudioSources = |
| AudioMixerImpl::kMaximumAmountOfMixedAudioSources + 3; |
| |
| const auto mixer = AudioMixerImpl::Create(); |
| |
| MockMixerAudioSource participants[kAudioSources]; |
| |
| for (int i = 0; i < kAudioSources; ++i) { |
| ResetFrame(participants[i].fake_frame()); |
| |
| // We set the 80-th sample value since the first 80 samples may be |
| // modified by a ramped-in window. |
| participants[i].fake_frame()->data_[80] = i; |
| |
| EXPECT_TRUE(mixer->AddSource(&participants[i])); |
| EXPECT_CALL(participants[i], GetAudioFrameWithInfo(_, _)).Times(Exactly(1)); |
| } |
| |
| // Last participant gives audio frame with passive VAD, although it has the |
| // largest energy. |
| participants[kAudioSources - 1].fake_frame()->vad_activity_ = |
| AudioFrame::kVadPassive; |
| |
| AudioFrame audio_frame; |
| mixer->Mix(kDefaultSampleRateHz, |
| 1, // number of channels |
| &audio_frame); |
| |
| for (int i = 0; i < kAudioSources; ++i) { |
| bool is_mixed = |
| mixer->GetAudioSourceMixabilityStatusForTest(&participants[i]); |
| if (i == kAudioSources - 1 || |
| i < kAudioSources - 1 - |
| AudioMixerImpl::kMaximumAmountOfMixedAudioSources) { |
| EXPECT_FALSE(is_mixed) << "Mixing status of AudioSource #" << i |
| << " wrong."; |
| } else { |
| EXPECT_TRUE(is_mixed) << "Mixing status of AudioSource #" << i |
| << " wrong."; |
| } |
| } |
| } |
| |
| TEST(AudioMixer, FrameNotModifiedForSingleParticipant) { |
| const auto mixer = AudioMixerImpl::Create(); |
| |
| MockMixerAudioSource participant; |
| |
| ResetFrame(participant.fake_frame()); |
| const int n_samples = participant.fake_frame()->samples_per_channel_; |
| |
| // Modify the frame so that it's not zero. |
| for (int j = 0; j < n_samples; j++) { |
| participant.fake_frame()->data_[j] = j; |
| } |
| |
| EXPECT_TRUE(mixer->AddSource(&participant)); |
| EXPECT_CALL(participant, GetAudioFrameWithInfo(_, _)).Times(Exactly(2)); |
| |
| AudioFrame audio_frame; |
| // Two mix iteration to compare after the ramp-up step. |
| for (int i = 0; i < 2; i++) { |
| mixer->Mix(kDefaultSampleRateHz, |
| 1, // number of channels |
| &audio_frame); |
| } |
| |
| EXPECT_EQ( |
| 0, memcmp(participant.fake_frame()->data_, audio_frame.data_, n_samples)); |
| } |
| |
| TEST(AudioMixer, ParticipantSampleRate) { |
| const auto mixer = AudioMixerImpl::Create(); |
| |
| MockMixerAudioSource participant; |
| ResetFrame(participant.fake_frame()); |
| |
| EXPECT_TRUE(mixer->AddSource(&participant)); |
| for (auto frequency : {8000, 16000, 32000, 48000}) { |
| EXPECT_CALL(participant, GetAudioFrameWithInfo(frequency, _)) |
| .Times(Exactly(1)); |
| participant.fake_frame()->sample_rate_hz_ = frequency; |
| participant.fake_frame()->samples_per_channel_ = frequency / 100; |
| mixer->Mix(frequency, 1, &frame_for_mixing); |
| EXPECT_EQ(frequency, frame_for_mixing.sample_rate_hz_); |
| } |
| } |
| |
| TEST(AudioMixer, ParticipantNumberOfChannels) { |
| const auto mixer = AudioMixerImpl::Create(); |
| |
| MockMixerAudioSource participant; |
| ResetFrame(participant.fake_frame()); |
| |
| EXPECT_TRUE(mixer->AddSource(&participant)); |
| for (size_t number_of_channels : {1, 2}) { |
| EXPECT_CALL(participant, GetAudioFrameWithInfo(kDefaultSampleRateHz, _)) |
| .Times(Exactly(1)); |
| mixer->Mix(kDefaultSampleRateHz, number_of_channels, &frame_for_mixing); |
| EXPECT_EQ(number_of_channels, frame_for_mixing.num_channels_); |
| } |
| } |
| |
| // Maximal amount of participants are mixed one iteration, then |
| // another participant with higher energy is added. |
| TEST(AudioMixer, RampedOutSourcesShouldNotBeMarkedMixed) { |
| constexpr int kAudioSources = |
| AudioMixerImpl::kMaximumAmountOfMixedAudioSources + 1; |
| |
| const auto mixer = AudioMixerImpl::Create(); |
| MockMixerAudioSource participants[kAudioSources]; |
| |
| for (int i = 0; i < kAudioSources; i++) { |
| ResetFrame(participants[i].fake_frame()); |
| // Set the participant audio energy to increase with the index |
| // |i|. |
| participants[i].fake_frame()->data_[0] = 100 * i; |
| } |
| |
| // Add all participants but the loudest for mixing. |
| for (int i = 0; i < kAudioSources - 1; i++) { |
| EXPECT_TRUE(mixer->AddSource(&participants[i])); |
| EXPECT_CALL(participants[i], GetAudioFrameWithInfo(kDefaultSampleRateHz, _)) |
| .Times(Exactly(1)); |
| } |
| |
| // First mixer iteration |
| mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); |
| |
| // All participants but the loudest should have been mixed. |
| for (int i = 0; i < kAudioSources - 1; i++) { |
| EXPECT_TRUE(mixer->GetAudioSourceMixabilityStatusForTest(&participants[i])) |
| << "Mixed status of AudioSource #" << i << " wrong."; |
| } |
| |
| // Add new participant with higher energy. |
| EXPECT_TRUE(mixer->AddSource(&participants[kAudioSources - 1])); |
| for (int i = 0; i < kAudioSources; i++) { |
| EXPECT_CALL(participants[i], GetAudioFrameWithInfo(kDefaultSampleRateHz, _)) |
| .Times(Exactly(1)); |
| } |
| |
| mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); |
| |
| // The most quiet participant should not have been mixed. |
| EXPECT_FALSE(mixer->GetAudioSourceMixabilityStatusForTest(&participants[0])) |
| << "Mixed status of AudioSource #0 wrong."; |
| |
| // The loudest participants should have been mixed. |
| for (int i = 1; i < kAudioSources; i++) { |
| EXPECT_EQ(true, |
| mixer->GetAudioSourceMixabilityStatusForTest(&participants[i])) |
| << "Mixed status of AudioSource #" << i << " wrong."; |
| } |
| } |
| |
| // This test checks that the initialization and participant addition |
| // can be done on a different thread. |
| TEST(AudioMixer, ConstructFromOtherThread) { |
| std::unique_ptr<rtc::Thread> init_thread = rtc::Thread::Create(); |
| std::unique_ptr<rtc::Thread> participant_thread = rtc::Thread::Create(); |
| init_thread->Start(); |
| const auto mixer = init_thread->Invoke<rtc::scoped_refptr<AudioMixer>>( |
| RTC_FROM_HERE, &AudioMixerImpl::Create); |
| MockMixerAudioSource participant; |
| |
| ResetFrame(participant.fake_frame()); |
| |
| participant_thread->Start(); |
| EXPECT_TRUE(participant_thread->Invoke<int>( |
| RTC_FROM_HERE, |
| rtc::Bind(&AudioMixer::AddSource, mixer.get(), &participant))); |
| |
| EXPECT_CALL(participant, GetAudioFrameWithInfo(kDefaultSampleRateHz, _)) |
| .Times(Exactly(1)); |
| |
| // Do one mixer iteration |
| mixer->Mix(kDefaultSampleRateHz, 1, &frame_for_mixing); |
| } |
| |
| TEST(AudioMixer, MutedShouldMixAfterUnmuted) { |
| constexpr int kAudioSources = |
| AudioMixerImpl::kMaximumAmountOfMixedAudioSources + 1; |
| |
| std::vector<AudioFrame> frames(kAudioSources); |
| for (auto& frame : frames) { |
| ResetFrame(&frame); |
| } |
| |
| std::vector<AudioMixer::Source::AudioFrameInfo> frame_info( |
| kAudioSources, AudioMixer::Source::AudioFrameInfo::kNormal); |
| frame_info[0] = AudioMixer::Source::AudioFrameInfo::kMuted; |
| std::vector<bool> expected_status(kAudioSources, true); |
| expected_status[0] = false; |
| |
| MixAndCompare(frames, frame_info, expected_status); |
| } |
| |
| TEST(AudioMixer, PassiveShouldMixAfterNormal) { |
| constexpr int kAudioSources = |
| AudioMixerImpl::kMaximumAmountOfMixedAudioSources + 1; |
| |
| std::vector<AudioFrame> frames(kAudioSources); |
| for (auto& frame : frames) { |
| ResetFrame(&frame); |
| } |
| |
| std::vector<AudioMixer::Source::AudioFrameInfo> frame_info( |
| kAudioSources, AudioMixer::Source::AudioFrameInfo::kNormal); |
| frames[0].vad_activity_ = AudioFrame::kVadPassive; |
| std::vector<bool> expected_status(kAudioSources, true); |
| expected_status[0] = false; |
| |
| MixAndCompare(frames, frame_info, expected_status); |
| } |
| |
| TEST(AudioMixer, ActiveShouldMixBeforeLoud) { |
| constexpr int kAudioSources = |
| AudioMixerImpl::kMaximumAmountOfMixedAudioSources + 1; |
| |
| std::vector<AudioFrame> frames(kAudioSources); |
| for (auto& frame : frames) { |
| ResetFrame(&frame); |
| } |
| |
| std::vector<AudioMixer::Source::AudioFrameInfo> frame_info( |
| kAudioSources, AudioMixer::Source::AudioFrameInfo::kNormal); |
| frames[0].vad_activity_ = AudioFrame::kVadPassive; |
| std::fill(frames[0].data_, frames[0].data_ + kDefaultSampleRateHz / 100, |
| std::numeric_limits<int16_t>::max()); |
| std::vector<bool> expected_status(kAudioSources, true); |
| expected_status[0] = false; |
| |
| MixAndCompare(frames, frame_info, expected_status); |
| } |
| |
| TEST(AudioMixer, UnmutedShouldMixBeforeLoud) { |
| constexpr int kAudioSources = |
| AudioMixerImpl::kMaximumAmountOfMixedAudioSources + 1; |
| |
| std::vector<AudioFrame> frames(kAudioSources); |
| for (auto& frame : frames) { |
| ResetFrame(&frame); |
| } |
| |
| std::vector<AudioMixer::Source::AudioFrameInfo> frame_info( |
| kAudioSources, AudioMixer::Source::AudioFrameInfo::kNormal); |
| frame_info[0] = AudioMixer::Source::AudioFrameInfo::kMuted; |
| std::fill(frames[0].data_, frames[0].data_ + kDefaultSampleRateHz / 100, |
| std::numeric_limits<int16_t>::max()); |
| std::vector<bool> expected_status(kAudioSources, true); |
| expected_status[0] = false; |
| |
| MixAndCompare(frames, frame_info, expected_status); |
| } |
| } // namespace webrtc |