Prevent a voice channel from sending data before a source is set.
At the top level, setting a track on an RtpSender is equivalent to
setting a source (previously called a renderer)
on a voice send stream. An RtpSender without a track
is not supposed to send data (not even muted data), so a send stream without
a source shouldn't send data.
Also replacing SendFlags with a boolean and implementing "Start"
and "Stop" methods on AudioSendStream, which was planned anyway
and simplifies this CL.
R=pthatcher@webrtc.org, solenberg@webrtc.org
Review URL: https://codereview.webrtc.org/1741933002 .
Cr-Commit-Position: refs/heads/master@{#11918}
diff --git a/webrtc/api/mediastreamprovider.h b/webrtc/api/mediastreamprovider.h
index ad7870c..0db7a5c 100644
--- a/webrtc/api/mediastreamprovider.h
+++ b/webrtc/api/mediastreamprovider.h
@@ -17,7 +17,7 @@
namespace cricket {
-class AudioRenderer;
+class AudioSource;
class VideoCapturer;
class VideoFrame;
class VideoRenderer;
@@ -49,7 +49,7 @@
virtual void SetAudioSend(uint32_t ssrc,
bool enable,
const cricket::AudioOptions& options,
- cricket::AudioRenderer* renderer) = 0;
+ cricket::AudioSource* source) = 0;
// Sets the audio playout volume of a remote audio track with |ssrc|.
// |volume| is in the range of [0, 10].
diff --git a/webrtc/api/remoteaudiosource.h b/webrtc/api/remoteaudiosource.h
index 72ed17c..abb55f6 100644
--- a/webrtc/api/remoteaudiosource.h
+++ b/webrtc/api/remoteaudiosource.h
@@ -18,7 +18,6 @@
#include "webrtc/api/notifier.h"
#include "webrtc/audio_sink.h"
#include "webrtc/base/criticalsection.h"
-#include "webrtc/media/base/audiorenderer.h"
namespace rtc {
struct Message;
diff --git a/webrtc/api/rtpsender.cc b/webrtc/api/rtpsender.cc
index c5db929..822b7f4 100644
--- a/webrtc/api/rtpsender.cc
+++ b/webrtc/api/rtpsender.cc
@@ -36,7 +36,7 @@
}
}
-void LocalAudioSinkAdapter::SetSink(cricket::AudioRenderer::Sink* sink) {
+void LocalAudioSinkAdapter::SetSink(cricket::AudioSource::Sink* sink) {
rtc::CritScope lock(&lock_);
ASSERT(!sink || !sink_);
sink_ = sink;
@@ -194,9 +194,9 @@
}
#endif
- cricket::AudioRenderer* renderer = sink_adapter_.get();
- ASSERT(renderer != nullptr);
- provider_->SetAudioSend(ssrc_, track_->enabled(), options, renderer);
+ cricket::AudioSource* source = sink_adapter_.get();
+ ASSERT(source != nullptr);
+ provider_->SetAudioSend(ssrc_, track_->enabled(), options, source);
}
VideoRtpSender::VideoRtpSender(VideoTrackInterface* track,
diff --git a/webrtc/api/rtpsender.h b/webrtc/api/rtpsender.h
index 3305941..25cc4ed 100644
--- a/webrtc/api/rtpsender.h
+++ b/webrtc/api/rtpsender.h
@@ -23,14 +23,14 @@
#include "webrtc/base/basictypes.h"
#include "webrtc/base/criticalsection.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/media/base/audiorenderer.h"
+#include "webrtc/media/base/audiosource.h"
namespace webrtc {
// LocalAudioSinkAdapter receives data callback as a sink to the local
-// AudioTrack, and passes the data to the sink of AudioRenderer.
+// AudioTrack, and passes the data to the sink of AudioSource.
class LocalAudioSinkAdapter : public AudioTrackSinkInterface,
- public cricket::AudioRenderer {
+ public cricket::AudioSource {
public:
LocalAudioSinkAdapter();
virtual ~LocalAudioSinkAdapter();
@@ -43,10 +43,10 @@
size_t number_of_channels,
size_t number_of_frames) override;
- // cricket::AudioRenderer implementation.
- void SetSink(cricket::AudioRenderer::Sink* sink) override;
+ // cricket::AudioSource implementation.
+ void SetSink(cricket::AudioSource::Sink* sink) override;
- cricket::AudioRenderer::Sink* sink_;
+ cricket::AudioSource::Sink* sink_;
// Critical section protecting |sink_|.
rtc::CriticalSection lock_;
};
@@ -113,7 +113,7 @@
bool stopped_ = false;
// Used to pass the data callback from the |track_| to the other end of
- // cricket::AudioRenderer.
+ // cricket::AudioSource.
rtc::scoped_ptr<LocalAudioSinkAdapter> sink_adapter_;
};
diff --git a/webrtc/api/rtpsenderreceiver_unittest.cc b/webrtc/api/rtpsenderreceiver_unittest.cc
index 5dc97c6..ecaa6e5 100644
--- a/webrtc/api/rtpsenderreceiver_unittest.cc
+++ b/webrtc/api/rtpsenderreceiver_unittest.cc
@@ -50,7 +50,7 @@
void(uint32_t ssrc,
bool enable,
const cricket::AudioOptions& options,
- cricket::AudioRenderer* renderer));
+ cricket::AudioSource* source));
MOCK_METHOD2(SetAudioPlayoutVolume, void(uint32_t ssrc, double volume));
void SetRawAudioSink(uint32_t,
diff --git a/webrtc/api/webrtcsession.cc b/webrtc/api/webrtcsession.cc
index 8cbcb97..2f3c911 100644
--- a/webrtc/api/webrtcsession.cc
+++ b/webrtc/api/webrtcsession.cc
@@ -1180,13 +1180,13 @@
void WebRtcSession::SetAudioSend(uint32_t ssrc,
bool enable,
const cricket::AudioOptions& options,
- cricket::AudioRenderer* renderer) {
+ cricket::AudioSource* source) {
ASSERT(signaling_thread()->IsCurrent());
if (!voice_channel_) {
LOG(LS_ERROR) << "SetAudioSend: No audio channel exists.";
return;
}
- if (!voice_channel_->SetAudioSend(ssrc, enable, &options, renderer)) {
+ if (!voice_channel_->SetAudioSend(ssrc, enable, &options, source)) {
LOG(LS_ERROR) << "SetAudioSend: ssrc is incorrect: " << ssrc;
}
}
diff --git a/webrtc/api/webrtcsession.h b/webrtc/api/webrtcsession.h
index 87379ab..27472c9 100644
--- a/webrtc/api/webrtcsession.h
+++ b/webrtc/api/webrtcsession.h
@@ -232,7 +232,7 @@
void SetAudioSend(uint32_t ssrc,
bool enable,
const cricket::AudioOptions& options,
- cricket::AudioRenderer* renderer) override;
+ cricket::AudioSource* source) override;
void SetAudioPlayoutVolume(uint32_t ssrc, double volume) override;
void SetRawAudioSink(uint32_t ssrc,
rtc::scoped_ptr<AudioSinkInterface> sink) override;
diff --git a/webrtc/api/webrtcsession_unittest.cc b/webrtc/api/webrtcsession_unittest.cc
index b0ee1be..87ff927 100644
--- a/webrtc/api/webrtcsession_unittest.cc
+++ b/webrtc/api/webrtcsession_unittest.cc
@@ -294,19 +294,20 @@
State state_;
};
-class FakeAudioRenderer : public cricket::AudioRenderer {
+class FakeAudioSource : public cricket::AudioSource {
public:
- FakeAudioRenderer() : sink_(NULL) {}
- virtual ~FakeAudioRenderer() {
+ FakeAudioSource() : sink_(NULL) {}
+ virtual ~FakeAudioSource() {
if (sink_)
sink_->OnClose();
}
void SetSink(Sink* sink) override { sink_ = sink; }
- cricket::AudioRenderer::Sink* sink() const { return sink_; }
+ const cricket::AudioSource::Sink* sink() const { return sink_; }
+
private:
- cricket::AudioRenderer::Sink* sink_;
+ cricket::AudioSource::Sink* sink_;
};
class WebRtcSessionTest
@@ -3337,20 +3338,20 @@
cricket::AudioOptions options;
options.echo_cancellation = rtc::Optional<bool>(true);
- rtc::scoped_ptr<FakeAudioRenderer> renderer(new FakeAudioRenderer());
- session_->SetAudioSend(send_ssrc, false, options, renderer.get());
+ rtc::scoped_ptr<FakeAudioSource> source(new FakeAudioSource());
+ session_->SetAudioSend(send_ssrc, false, options, source.get());
EXPECT_TRUE(channel->IsStreamMuted(send_ssrc));
EXPECT_EQ(rtc::Optional<bool>(), channel->options().echo_cancellation);
- EXPECT_TRUE(renderer->sink() != NULL);
+ EXPECT_TRUE(source->sink() != nullptr);
- // This will trigger SetSink(NULL) to the |renderer|.
- session_->SetAudioSend(send_ssrc, true, options, NULL);
+ // This will trigger SetSink(nullptr) to the |source|.
+ session_->SetAudioSend(send_ssrc, true, options, nullptr);
EXPECT_FALSE(channel->IsStreamMuted(send_ssrc));
EXPECT_EQ(rtc::Optional<bool>(true), channel->options().echo_cancellation);
- EXPECT_TRUE(renderer->sink() == NULL);
+ EXPECT_TRUE(source->sink() == nullptr);
}
-TEST_F(WebRtcSessionTest, AudioRendererForLocalStream) {
+TEST_F(WebRtcSessionTest, AudioSourceForLocalStream) {
Init();
SendAudioVideoStream1();
CreateAndSetRemoteOfferAndLocalAnswer();
@@ -3359,18 +3360,18 @@
ASSERT_EQ(1u, channel->send_streams().size());
uint32_t send_ssrc = channel->send_streams()[0].first_ssrc();
- rtc::scoped_ptr<FakeAudioRenderer> renderer(new FakeAudioRenderer());
+ rtc::scoped_ptr<FakeAudioSource> source(new FakeAudioSource());
cricket::AudioOptions options;
- session_->SetAudioSend(send_ssrc, true, options, renderer.get());
- EXPECT_TRUE(renderer->sink() != NULL);
+ session_->SetAudioSend(send_ssrc, true, options, source.get());
+ EXPECT_TRUE(source->sink() != nullptr);
- // Delete the |renderer| and it will trigger OnClose() to the sink, and this
- // will invalidate the |renderer_| pointer in the sink and prevent getting a
- // SetSink(NULL) callback afterwards.
- renderer.reset();
+ // Delete the |source| and it will trigger OnClose() to the sink, and this
+ // will invalidate the |source_| pointer in the sink and prevent getting a
+ // SetSink(nullptr) callback afterwards.
+ source.reset();
- // This will trigger SetSink(NULL) if no OnClose() callback.
- session_->SetAudioSend(send_ssrc, true, options, NULL);
+ // This will trigger SetSink(nullptr) if no OnClose() callback.
+ session_->SetAudioSend(send_ssrc, true, options, nullptr);
}
TEST_F(WebRtcSessionTest, SetVideoPlayout) {
diff --git a/webrtc/audio/audio_send_stream.cc b/webrtc/audio/audio_send_stream.cc
index c18331c..160a818 100644
--- a/webrtc/audio/audio_send_stream.cc
+++ b/webrtc/audio/audio_send_stream.cc
@@ -97,10 +97,20 @@
void AudioSendStream::Start() {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ ScopedVoEInterface<VoEBase> base(voice_engine());
+ int error = base->StartSend(config_.voe_channel_id);
+ if (error != 0) {
+ LOG(LS_ERROR) << "AudioSendStream::Start failed with error: " << error;
+ }
}
void AudioSendStream::Stop() {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ ScopedVoEInterface<VoEBase> base(voice_engine());
+ int error = base->StopSend(config_.voe_channel_id);
+ if (error != 0) {
+ LOG(LS_ERROR) << "AudioSendStream::Stop failed with error: " << error;
+ }
}
void AudioSendStream::SignalNetworkState(NetworkState state) {
diff --git a/webrtc/media/base/audiorenderer.h b/webrtc/media/base/audiorenderer.h
index 4544c7f..b611bc4 100644
--- a/webrtc/media/base/audiorenderer.h
+++ b/webrtc/media/base/audiorenderer.h
@@ -11,37 +11,6 @@
#ifndef WEBRTC_MEDIA_BASE_AUDIORENDERER_H_
#define WEBRTC_MEDIA_BASE_AUDIORENDERER_H_
-#include <cstddef>
-
-namespace cricket {
-
-// Abstract interface for rendering the audio data.
-class AudioRenderer {
- public:
- class Sink {
- public:
- // Callback to receive data from the AudioRenderer.
- virtual void OnData(const void* audio_data,
- int bits_per_sample,
- int sample_rate,
- size_t number_of_channels,
- size_t number_of_frames) = 0;
-
- // Called when the AudioRenderer is going away.
- virtual void OnClose() = 0;
-
- protected:
- virtual ~Sink() {}
- };
-
- // Sets a sink to the AudioRenderer. There can be only one sink connected
- // to the renderer at a time.
- virtual void SetSink(Sink* sink) {}
-
- protected:
- virtual ~AudioRenderer() {}
-};
-
-} // namespace cricket
+// TODO(deadbeef): Remove this once not included by Chromium.
#endif // WEBRTC_MEDIA_BASE_AUDIORENDERER_H_
diff --git a/webrtc/media/base/audiosource.h b/webrtc/media/base/audiosource.h
new file mode 100644
index 0000000..97743bc
--- /dev/null
+++ b/webrtc/media/base/audiosource.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MEDIA_BASE_AUDIOSOURCE_H_
+#define WEBRTC_MEDIA_BASE_AUDIOSOURCE_H_
+
+#include <cstddef>
+
+namespace cricket {
+
+// Abstract interface for providing the audio data.
+// TODO(deadbeef): Rename this to AudioSourceInterface, and rename
+// webrtc::AudioSourceInterface to AudioTrackSourceInterface.
+class AudioSource {
+ public:
+ class Sink {
+ public:
+ // Callback to receive data from the AudioSource.
+ virtual void OnData(const void* audio_data,
+ int bits_per_sample,
+ int sample_rate,
+ size_t number_of_channels,
+ size_t number_of_frames) = 0;
+
+ // Called when the AudioSource is going away.
+ virtual void OnClose() = 0;
+
+ protected:
+ virtual ~Sink() {}
+ };
+
+ // Sets a sink to the AudioSource. There can be only one sink connected
+ // to the source at a time.
+ virtual void SetSink(Sink* sink) = 0;
+
+ protected:
+ virtual ~AudioSource() {}
+};
+
+} // namespace cricket
+
+#endif // WEBRTC_MEDIA_BASE_AUDIOSOURCE_H_
diff --git a/webrtc/media/base/fakemediaengine.h b/webrtc/media/base/fakemediaengine.h
index afd262b..c3f1660 100644
--- a/webrtc/media/base/fakemediaengine.h
+++ b/webrtc/media/base/fakemediaengine.h
@@ -21,7 +21,7 @@
#include "webrtc/audio_sink.h"
#include "webrtc/base/buffer.h"
#include "webrtc/base/stringutils.h"
-#include "webrtc/media/base/audiorenderer.h"
+#include "webrtc/media/base/audiosource.h"
#include "webrtc/media/base/mediaengine.h"
#include "webrtc/media/base/rtputils.h"
#include "webrtc/media/base/streamparams.h"
@@ -253,14 +253,12 @@
set_playout(playout);
return true;
}
- virtual bool SetSend(SendFlags flag) {
- return set_sending(flag != SEND_NOTHING);
- }
+ virtual void SetSend(bool send) { set_sending(send); }
virtual bool SetAudioSend(uint32_t ssrc,
bool enable,
const AudioOptions* options,
- AudioRenderer* renderer) {
- if (!SetLocalRenderer(ssrc, renderer)) {
+ AudioSource* source) {
+ if (!SetLocalSource(ssrc, source)) {
return false;
}
if (!RtpHelper<VoiceMediaChannel>::MuteStream(ssrc, !enable)) {
@@ -338,15 +336,14 @@
}
private:
- class VoiceChannelAudioSink : public AudioRenderer::Sink {
+ class VoiceChannelAudioSink : public AudioSource::Sink {
public:
- explicit VoiceChannelAudioSink(AudioRenderer* renderer)
- : renderer_(renderer) {
- renderer_->SetSink(this);
+ explicit VoiceChannelAudioSink(AudioSource* source) : source_(source) {
+ source_->SetSink(this);
}
virtual ~VoiceChannelAudioSink() {
- if (renderer_) {
- renderer_->SetSink(NULL);
+ if (source_) {
+ source_->SetSink(nullptr);
}
}
void OnData(const void* audio_data,
@@ -354,11 +351,11 @@
int sample_rate,
size_t number_of_channels,
size_t number_of_frames) override {}
- void OnClose() override { renderer_ = NULL; }
- AudioRenderer* renderer() const { return renderer_; }
+ void OnClose() override { source_ = nullptr; }
+ AudioSource* source() const { return source_; }
private:
- AudioRenderer* renderer_;
+ AudioSource* source_;
};
bool SetRecvCodecs(const std::vector<AudioCodec>& codecs) {
@@ -383,19 +380,19 @@
options_.SetAll(options);
return true;
}
- bool SetLocalRenderer(uint32_t ssrc, AudioRenderer* renderer) {
- auto it = local_renderers_.find(ssrc);
- if (renderer) {
- if (it != local_renderers_.end()) {
- ASSERT(it->second->renderer() == renderer);
+ bool SetLocalSource(uint32_t ssrc, AudioSource* source) {
+ auto it = local_sinks_.find(ssrc);
+ if (source) {
+ if (it != local_sinks_.end()) {
+ ASSERT(it->second->source() == source);
} else {
- local_renderers_.insert(std::make_pair(
- ssrc, new VoiceChannelAudioSink(renderer)));
+ local_sinks_.insert(
+ std::make_pair(ssrc, new VoiceChannelAudioSink(source)));
}
} else {
- if (it != local_renderers_.end()) {
+ if (it != local_sinks_.end()) {
delete it->second;
- local_renderers_.erase(it);
+ local_sinks_.erase(it);
}
}
return true;
@@ -408,7 +405,7 @@
std::vector<DtmfInfo> dtmf_info_queue_;
int time_since_last_typing_;
AudioOptions options_;
- std::map<uint32_t, VoiceChannelAudioSink*> local_renderers_;
+ std::map<uint32_t, VoiceChannelAudioSink*> local_sinks_;
std::unique_ptr<webrtc::AudioSinkInterface> sink_;
};
diff --git a/webrtc/media/base/mediachannel.h b/webrtc/media/base/mediachannel.h
index bab72b2..d7aefe5 100644
--- a/webrtc/media/base/mediachannel.h
+++ b/webrtc/media/base/mediachannel.h
@@ -42,7 +42,7 @@
namespace cricket {
-class AudioRenderer;
+class AudioSource;
class ScreencastId;
class VideoCapturer;
class VideoFrame;
@@ -448,11 +448,6 @@
NetworkInterface* network_interface_;
};
-enum SendFlags {
- SEND_NOTHING,
- SEND_MICROPHONE
-};
-
// The stats information is structured as follows:
// Media are represented by either MediaSenderInfo or MediaReceiverInfo.
// Media contains a vector of SSRC infos that are exclusively used by this
@@ -901,12 +896,12 @@
// Starts or stops playout of received audio.
virtual bool SetPlayout(bool playout) = 0;
// Starts or stops sending (and potentially capture) of local audio.
- virtual bool SetSend(SendFlags flag) = 0;
+ virtual void SetSend(bool send) = 0;
// Configure stream for sending.
virtual bool SetAudioSend(uint32_t ssrc,
bool enable,
const AudioOptions* options,
- AudioRenderer* renderer) = 0;
+ AudioSource* source) = 0;
// Gets current energy levels for all incoming streams.
virtual bool GetActiveStreams(AudioInfo::StreamList* actives) = 0;
// Get the current energy level of the stream sent to the speaker.
diff --git a/webrtc/media/engine/fakewebrtccall.h b/webrtc/media/engine/fakewebrtccall.h
index 5a9ff30..89a644a 100644
--- a/webrtc/media/engine/fakewebrtccall.h
+++ b/webrtc/media/engine/fakewebrtccall.h
@@ -44,11 +44,12 @@
const webrtc::AudioSendStream::Config& GetConfig() const;
void SetStats(const webrtc::AudioSendStream::Stats& stats);
TelephoneEvent GetLatestTelephoneEvent() const;
+ bool IsSending() const { return sending_; }
private:
// webrtc::SendStream implementation.
- void Start() override {}
- void Stop() override {}
+ void Start() override { sending_ = true; }
+ void Stop() override { sending_ = false; }
void SignalNetworkState(webrtc::NetworkState state) override {}
bool DeliverRtcp(const uint8_t* packet, size_t length) override {
return true;
@@ -62,6 +63,7 @@
TelephoneEvent latest_telephone_event_;
webrtc::AudioSendStream::Config config_;
webrtc::AudioSendStream::Stats stats_;
+ bool sending_ = false;
};
class FakeAudioReceiveStream final : public webrtc::AudioReceiveStream {
diff --git a/webrtc/media/engine/fakewebrtcvoiceengine.h b/webrtc/media/engine/fakewebrtcvoiceengine.h
index 941dfef..926f247 100644
--- a/webrtc/media/engine/fakewebrtcvoiceengine.h
+++ b/webrtc/media/engine/fakewebrtcvoiceengine.h
@@ -130,7 +130,6 @@
struct Channel {
explicit Channel()
: external_transport(false),
- send(false),
playout(false),
volume_scale(1.0),
vad(false),
@@ -151,7 +150,6 @@
memset(&send_codec, 0, sizeof(send_codec));
}
bool external_transport;
- bool send;
bool playout;
float volume_scale;
bool vad;
@@ -193,7 +191,6 @@
agc_mode_(webrtc::kAgcDefault),
observer_(NULL),
playout_fail_channel_(-1),
- send_fail_channel_(-1),
recording_sample_rate_(-1),
playout_sample_rate_(-1) {
memset(&agc_config_, 0, sizeof(agc_config_));
@@ -213,9 +210,6 @@
bool GetPlayout(int channel) {
return channels_[channel]->playout;
}
- bool GetSend(int channel) {
- return channels_[channel]->send;
- }
bool GetVAD(int channel) {
return channels_[channel]->vad;
}
@@ -268,9 +262,6 @@
void set_playout_fail_channel(int channel) {
playout_fail_channel_ = channel;
}
- void set_send_fail_channel(int channel) {
- send_fail_channel_ = channel;
- }
void set_fail_create_channel(bool fail_create_channel) {
fail_create_channel_ = fail_create_channel;
}
@@ -347,28 +338,14 @@
return -1;
}
}
- WEBRTC_FUNC(StartSend, (int channel)) {
- if (send_fail_channel_ != channel) {
- WEBRTC_CHECK_CHANNEL(channel);
- channels_[channel]->send = true;
- return 0;
- } else {
- // When send_fail_channel_ == channel, fail the StartSend on this
- // channel.
- return -1;
- }
- }
+ WEBRTC_STUB(StartSend, (int channel));
WEBRTC_STUB(StopReceive, (int channel));
WEBRTC_FUNC(StopPlayout, (int channel)) {
WEBRTC_CHECK_CHANNEL(channel);
channels_[channel]->playout = false;
return 0;
}
- WEBRTC_FUNC(StopSend, (int channel)) {
- WEBRTC_CHECK_CHANNEL(channel);
- channels_[channel]->send = false;
- return 0;
- }
+ WEBRTC_STUB(StopSend, (int channel));
WEBRTC_STUB(GetVersion, (char version[1024]));
WEBRTC_STUB(LastError, ());
WEBRTC_FUNC(AssociateSendChannel, (int channel,
@@ -797,7 +774,6 @@
webrtc::AgcConfig agc_config_;
webrtc::VoiceEngineObserver* observer_;
int playout_fail_channel_;
- int send_fail_channel_;
int recording_sample_rate_;
int playout_sample_rate_;
FakeAudioProcessing audio_processing_;
diff --git a/webrtc/media/engine/webrtcvoiceengine.cc b/webrtc/media/engine/webrtcvoiceengine.cc
index 4116f42..e78e5e0 100644
--- a/webrtc/media/engine/webrtcvoiceengine.cc
+++ b/webrtc/media/engine/webrtcvoiceengine.cc
@@ -33,7 +33,7 @@
#include "webrtc/call/rtc_event_log.h"
#include "webrtc/common.h"
#include "webrtc/media/base/audioframe.h"
-#include "webrtc/media/base/audiorenderer.h"
+#include "webrtc/media/base/audiosource.h"
#include "webrtc/media/base/mediaconstants.h"
#include "webrtc/media/base/streamparams.h"
#include "webrtc/media/engine/webrtcmediaengine.h"
@@ -1138,7 +1138,7 @@
}
class WebRtcVoiceMediaChannel::WebRtcAudioSendStream
- : public AudioRenderer::Sink {
+ : public AudioSource::Sink {
public:
WebRtcAudioSendStream(int ch, webrtc::AudioTransport* voe_audio_transport,
uint32_t ssrc, const std::string& c_name,
@@ -1160,7 +1160,7 @@
~WebRtcAudioSendStream() override {
RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
- Stop();
+ ClearSource();
call_->DestroyAudioSendStream(stream_);
}
@@ -1184,39 +1184,47 @@
return stream_->SendTelephoneEvent(payload_type, event, duration_ms);
}
+ void SetSend(bool send) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ send_ = send;
+ UpdateSendState();
+ }
+
webrtc::AudioSendStream::Stats GetStats() const {
RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
RTC_DCHECK(stream_);
return stream_->GetStats();
}
- // Starts the rendering by setting a sink to the renderer to get data
- // callback.
+ // Starts the sending by setting ourselves as a sink to the AudioSource to
+ // get data callbacks.
// This method is called on the libjingle worker thread.
// TODO(xians): Make sure Start() is called only once.
- void Start(AudioRenderer* renderer) {
+ void SetSource(AudioSource* source) {
RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
- RTC_DCHECK(renderer);
- if (renderer_) {
- RTC_DCHECK(renderer_ == renderer);
+ RTC_DCHECK(source);
+ if (source_) {
+ RTC_DCHECK(source_ == source);
return;
}
- renderer->SetSink(this);
- renderer_ = renderer;
+ source->SetSink(this);
+ source_ = source;
+ UpdateSendState();
}
- // Stops rendering by setting the sink of the renderer to nullptr. No data
+ // Stops sending by setting the sink of the AudioSource to nullptr. No data
// callback will be received after this method.
// This method is called on the libjingle worker thread.
- void Stop() {
+ void ClearSource() {
RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
- if (renderer_) {
- renderer_->SetSink(nullptr);
- renderer_ = nullptr;
+ if (source_) {
+ source_->SetSink(nullptr);
+ source_ = nullptr;
}
+ UpdateSendState();
}
- // AudioRenderer::Sink implementation.
+ // AudioSource::Sink implementation.
// This method is called on the audio thread.
void OnData(const void* audio_data,
int bits_per_sample,
@@ -1234,13 +1242,14 @@
number_of_frames);
}
- // Callback from the |renderer_| when it is going away. In case Start() has
+ // Callback from the |source_| when it is going away. In case Start() has
// never been called, this callback won't be triggered.
void OnClose() override {
RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
- // Set |renderer_| to nullptr to make sure no more callback will get into
- // the renderer.
- renderer_ = nullptr;
+ // Set |source_| to nullptr to make sure no more callback will get into
+ // the source.
+ source_ = nullptr;
+ UpdateSendState();
}
// Accessor to the VoE channel ID.
@@ -1250,6 +1259,16 @@
}
private:
+ void UpdateSendState() {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(stream_);
+ if (send_ && source_ != nullptr) {
+ stream_->Start();
+ } else { // !send || source_ = nullptr
+ stream_->Stop();
+ }
+ }
+
rtc::ThreadChecker worker_thread_checker_;
rtc::ThreadChecker audio_capture_thread_checker_;
webrtc::AudioTransport* const voe_audio_transport_ = nullptr;
@@ -1259,10 +1278,11 @@
// configuration changes.
webrtc::AudioSendStream* stream_ = nullptr;
- // Raw pointer to AudioRenderer owned by LocalAudioTrackHandler.
+ // Raw pointer to AudioSource owned by LocalAudioTrackHandler.
// PeerConnection will make sure invalidating the pointer before the object
// goes away.
- AudioRenderer* renderer_ = nullptr;
+ AudioSource* source_ = nullptr;
+ bool send_ = false;
RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(WebRtcAudioSendStream);
};
@@ -1827,68 +1847,32 @@
return true;
}
-bool WebRtcVoiceMediaChannel::SetSend(SendFlags send) {
- desired_send_ = send;
- if (!send_streams_.empty()) {
- return ChangeSend(desired_send_);
- }
- return true;
-}
-
-bool WebRtcVoiceMediaChannel::PauseSend() {
- return ChangeSend(SEND_NOTHING);
-}
-
-bool WebRtcVoiceMediaChannel::ResumeSend() {
- return ChangeSend(desired_send_);
-}
-
-bool WebRtcVoiceMediaChannel::ChangeSend(SendFlags send) {
+void WebRtcVoiceMediaChannel::SetSend(bool send) {
if (send_ == send) {
- return true;
+ return;
}
// Apply channel specific options when channel is enabled for sending.
- if (send == SEND_MICROPHONE) {
+ if (send) {
engine()->ApplyOptions(options_);
}
// Change the settings on each send channel.
- for (const auto& ch : send_streams_) {
- if (!ChangeSend(ch.second->channel(), send)) {
- return false;
- }
+ for (auto& kv : send_streams_) {
+ kv.second->SetSend(send);
}
send_ = send;
- return true;
-}
-
-bool WebRtcVoiceMediaChannel::ChangeSend(int channel, SendFlags send) {
- if (send == SEND_MICROPHONE) {
- if (engine()->voe()->base()->StartSend(channel) == -1) {
- LOG_RTCERR1(StartSend, channel);
- return false;
- }
- } else { // SEND_NOTHING
- RTC_DCHECK(send == SEND_NOTHING);
- if (engine()->voe()->base()->StopSend(channel) == -1) {
- LOG_RTCERR1(StopSend, channel);
- return false;
- }
- }
-
- return true;
}
bool WebRtcVoiceMediaChannel::SetAudioSend(uint32_t ssrc,
bool enable,
const AudioOptions* options,
- AudioRenderer* renderer) {
+ AudioSource* source) {
RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
// TODO(solenberg): The state change should be fully rolled back if any one of
// these calls fail.
- if (!SetLocalRenderer(ssrc, renderer)) {
+ if (!SetLocalSource(ssrc, source)) {
return false;
}
if (!MuteStream(ssrc, !enable)) {
@@ -1975,7 +1959,8 @@
}
}
- return ChangeSend(channel, desired_send_);
+ send_streams_[ssrc]->SetSend(send_);
+ return true;
}
bool WebRtcVoiceMediaChannel::RemoveSendStream(uint32_t ssrc) {
@@ -1989,10 +1974,10 @@
return false;
}
- int channel = it->second->channel();
- ChangeSend(channel, SEND_NOTHING);
+ it->second->SetSend(false);
// Clean up and delete the send stream+channel.
+ int channel = it->second->channel();
LOG(LS_INFO) << "Removing audio send stream " << ssrc
<< " with VoiceEngine channel #" << channel << ".";
delete it->second;
@@ -2001,7 +1986,7 @@
return false;
}
if (send_streams_.empty()) {
- ChangeSend(SEND_NOTHING);
+ SetSend(false);
}
return true;
}
@@ -2110,13 +2095,13 @@
return DeleteVoEChannel(channel);
}
-bool WebRtcVoiceMediaChannel::SetLocalRenderer(uint32_t ssrc,
- AudioRenderer* renderer) {
+bool WebRtcVoiceMediaChannel::SetLocalSource(uint32_t ssrc,
+ AudioSource* source) {
auto it = send_streams_.find(ssrc);
if (it == send_streams_.end()) {
- if (renderer) {
- // Return an error if trying to set a valid renderer with an invalid ssrc.
- LOG(LS_ERROR) << "SetLocalRenderer failed with ssrc "<< ssrc;
+ if (source) {
+ // Return an error if trying to set a valid source with an invalid ssrc.
+ LOG(LS_ERROR) << "SetLocalSource failed with ssrc " << ssrc;
return false;
}
@@ -2124,10 +2109,10 @@
return true;
}
- if (renderer) {
- it->second->Start(renderer);
+ if (source) {
+ it->second->SetSource(source);
} else {
- it->second->Stop();
+ it->second->ClearSource();
}
return true;
@@ -2445,8 +2430,7 @@
sinfo.echo_delay_std_ms = stats.echo_delay_std_ms;
sinfo.echo_return_loss = stats.echo_return_loss;
sinfo.echo_return_loss_enhancement = stats.echo_return_loss_enhancement;
- sinfo.typing_noise_detected =
- (send_ == SEND_NOTHING ? false : stats.typing_noise_detected);
+ sinfo.typing_noise_detected = (send_ ? stats.typing_noise_detected : false);
info->senders.push_back(sinfo);
}
diff --git a/webrtc/media/engine/webrtcvoiceengine.h b/webrtc/media/engine/webrtcvoiceengine.h
index 1d45f6e..0ccc649 100644
--- a/webrtc/media/engine/webrtcvoiceengine.h
+++ b/webrtc/media/engine/webrtcvoiceengine.h
@@ -31,7 +31,7 @@
namespace cricket {
class AudioDeviceModule;
-class AudioRenderer;
+class AudioSource;
class VoEWrapper;
class WebRtcVoiceMediaChannel;
@@ -155,13 +155,13 @@
bool SetPlayout(bool playout) override;
bool PausePlayout();
bool ResumePlayout();
- bool SetSend(SendFlags send) override;
+ void SetSend(bool send) override;
bool PauseSend();
bool ResumeSend();
bool SetAudioSend(uint32_t ssrc,
bool enable,
const AudioOptions* options,
- AudioRenderer* renderer) override;
+ AudioSource* source) override;
bool AddSendStream(const StreamParams& sp) override;
bool RemoveSendStream(uint32_t ssrc) override;
bool AddRecvStream(const StreamParams& sp) override;
@@ -218,7 +218,7 @@
void SetNack(int channel, bool nack_enabled);
bool SetSendCodec(int channel, const webrtc::CodecInst& send_codec);
bool SetMaxSendBandwidth(int bps);
- bool SetLocalRenderer(uint32_t ssrc, AudioRenderer* renderer);
+ bool SetLocalSource(uint32_t ssrc, AudioSource* source);
bool MuteStream(uint32_t ssrc, bool mute);
WebRtcVoiceEngine* engine() { return engine_; }
@@ -226,8 +226,6 @@
int GetOutputLevel(int channel);
bool SetPlayout(int channel, bool playout);
bool ChangePlayout(bool playout);
- bool ChangeSend(SendFlags send);
- bool ChangeSend(int channel, SendFlags send);
int CreateVoEChannel();
bool DeleteVoEChannel(int channel);
bool IsDefaultRecvStream(uint32_t ssrc) {
@@ -249,8 +247,7 @@
bool desired_playout_ = false;
bool recv_transport_cc_enabled_ = false;
bool playout_ = false;
- SendFlags desired_send_ = SEND_NOTHING;
- SendFlags send_ = SEND_NOTHING;
+ bool send_ = false;
webrtc::Call* const call_ = nullptr;
// SSRC of unsignalled receive stream, or -1 if there isn't one.
diff --git a/webrtc/media/engine/webrtcvoiceengine_unittest.cc b/webrtc/media/engine/webrtcvoiceengine_unittest.cc
index 768673d..1fe9fc1 100644
--- a/webrtc/media/engine/webrtcvoiceengine_unittest.cc
+++ b/webrtc/media/engine/webrtcvoiceengine_unittest.cc
@@ -41,7 +41,8 @@
const cricket::AudioCodec kTelephoneEventCodec(106, "telephone-event", 8000, 0,
1, 0);
const uint32_t kSsrc1 = 0x99;
-const uint32_t kSsrc2 = 0x98;
+const uint32_t kSsrc2 = 2;
+const uint32_t kSsrc3 = 3;
const uint32_t kSsrcs4[] = { 1, 2, 3, 4 };
class FakeVoEWrapper : public cricket::VoEWrapper {
@@ -63,6 +64,10 @@
void OnData(const Data& audio) override {}
};
+class FakeAudioSource : public cricket::AudioSource {
+ void SetSink(Sink* sink) override {}
+};
+
class WebRtcVoiceEngineTestFake : public testing::Test {
public:
WebRtcVoiceEngineTestFake() : WebRtcVoiceEngineTestFake("") {}
@@ -94,8 +99,10 @@
if (!SetupEngine()) {
return false;
}
- return channel_->AddSendStream(
- cricket::StreamParams::CreateLegacy(kSsrc1));
+ if (!channel_->AddSendStream(cricket::StreamParams::CreateLegacy(kSsrc1))) {
+ return false;
+ }
+ return channel_->SetAudioSend(kSsrc1, true, nullptr, &fake_source_);
}
void SetupForMultiSendStream() {
EXPECT_TRUE(SetupEngineWithSendStream());
@@ -127,15 +134,11 @@
}
const webrtc::AudioSendStream::Config& GetSendStreamConfig(uint32_t ssrc) {
- const auto* send_stream = call_.GetAudioSendStream(ssrc);
- EXPECT_TRUE(send_stream);
- return send_stream->GetConfig();
+ return GetSendStream(ssrc).GetConfig();
}
const webrtc::AudioReceiveStream::Config& GetRecvStreamConfig(uint32_t ssrc) {
- const auto* recv_stream = call_.GetAudioReceiveStream(ssrc);
- EXPECT_TRUE(recv_stream);
- return recv_stream->GetConfig();
+ return GetRecvStream(ssrc).GetConfig();
}
void TestInsertDtmf(uint32_t ssrc, bool caller) {
@@ -152,7 +155,7 @@
// Test we can only InsertDtmf when the other side supports telephone-event.
EXPECT_TRUE(channel_->SetSendParameters(send_parameters_));
- EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
+ channel_->SetSend(true);
EXPECT_FALSE(channel_->CanInsertDtmf());
EXPECT_FALSE(channel_->InsertDtmf(ssrc, 1, 111));
send_parameters_.codecs.push_back(kTelephoneEventCodec);
@@ -401,6 +404,7 @@
cricket::VoiceMediaChannel* channel_;
cricket::AudioSendParameters send_parameters_;
cricket::AudioRecvParameters recv_parameters_;
+ FakeAudioSource fake_source_;
private:
webrtc::test::ScopedFieldTrials override_field_trials_;
@@ -2001,12 +2005,25 @@
// Test that we can create a channel and start sending on it.
TEST_F(WebRtcVoiceEngineTestFake, Send) {
EXPECT_TRUE(SetupEngineWithSendStream());
- int channel_num = voe_.GetLastChannel();
EXPECT_TRUE(channel_->SetSendParameters(send_parameters_));
- EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
- EXPECT_TRUE(voe_.GetSend(channel_num));
- EXPECT_TRUE(channel_->SetSend(cricket::SEND_NOTHING));
- EXPECT_FALSE(voe_.GetSend(channel_num));
+ channel_->SetSend(true);
+ EXPECT_TRUE(GetSendStream(kSsrc1).IsSending());
+ channel_->SetSend(false);
+ EXPECT_FALSE(GetSendStream(kSsrc1).IsSending());
+}
+
+// Test that a channel will send if and only if it has a source and is enabled
+// for sending.
+TEST_F(WebRtcVoiceEngineTestFake, SendStateWithAndWithoutSource) {
+ EXPECT_TRUE(SetupEngineWithSendStream());
+ EXPECT_TRUE(channel_->SetSendParameters(send_parameters_));
+ EXPECT_TRUE(channel_->SetAudioSend(kSsrc1, true, nullptr, nullptr));
+ channel_->SetSend(true);
+ EXPECT_FALSE(GetSendStream(kSsrc1).IsSending());
+ EXPECT_TRUE(channel_->SetAudioSend(kSsrc1, true, nullptr, &fake_source_));
+ EXPECT_TRUE(GetSendStream(kSsrc1).IsSending());
+ EXPECT_TRUE(channel_->SetAudioSend(kSsrc1, true, nullptr, nullptr));
+ EXPECT_FALSE(GetSendStream(kSsrc1).IsSending());
}
// Test that we can create a channel and start playing out on it.
@@ -2025,13 +2042,14 @@
SetupForMultiSendStream();
// Set the global state for sending.
- EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
+ channel_->SetSend(true);
for (uint32_t ssrc : kSsrcs4) {
EXPECT_TRUE(channel_->AddSendStream(
cricket::StreamParams::CreateLegacy(ssrc)));
+ EXPECT_TRUE(channel_->SetAudioSend(ssrc, true, nullptr, &fake_source_));
// Verify that we are in a sending state for all the created streams.
- EXPECT_TRUE(voe_.GetSend(GetSendStreamConfig(ssrc).voe_channel_id));
+ EXPECT_TRUE(GetSendStream(ssrc).IsSending());
}
EXPECT_EQ(arraysize(kSsrcs4), call_.GetAudioSendStreams().size());
@@ -2086,28 +2104,26 @@
TEST_F(WebRtcVoiceEngineTestFake, SetSendWithMultipleSendStreams) {
SetupForMultiSendStream();
- // Create the send channels and they should be a SEND_NOTHING date.
+ // Create the send channels and they should be a "not sending" date.
for (uint32_t ssrc : kSsrcs4) {
EXPECT_TRUE(channel_->AddSendStream(
cricket::StreamParams::CreateLegacy(ssrc)));
- int channel_num = voe_.GetLastChannel();
- EXPECT_FALSE(voe_.GetSend(channel_num));
+ EXPECT_TRUE(channel_->SetAudioSend(ssrc, true, nullptr, &fake_source_));
+ EXPECT_FALSE(GetSendStream(ssrc).IsSending());
}
// Set the global state for starting sending.
- EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
+ channel_->SetSend(true);
for (uint32_t ssrc : kSsrcs4) {
// Verify that we are in a sending state for all the send streams.
- int channel_num = GetSendStreamConfig(ssrc).voe_channel_id;
- EXPECT_TRUE(voe_.GetSend(channel_num));
+ EXPECT_TRUE(GetSendStream(ssrc).IsSending());
}
// Set the global state for stopping sending.
- EXPECT_TRUE(channel_->SetSend(cricket::SEND_NOTHING));
+ channel_->SetSend(false);
for (uint32_t ssrc : kSsrcs4) {
// Verify that we are in a stop state for all the send streams.
- int channel_num = GetSendStreamConfig(ssrc).voe_channel_id;
- EXPECT_FALSE(voe_.GetSend(channel_num));
+ EXPECT_FALSE(GetSendStream(ssrc).IsSending());
}
}
@@ -2180,29 +2196,27 @@
EXPECT_FALSE(voe_.GetPlayout(channel_num1));
// Adding another stream should enable playout on the new stream only.
- EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(2)));
+ EXPECT_TRUE(
+ channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(kSsrc2)));
int channel_num2 = voe_.GetLastChannel();
- EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
- EXPECT_TRUE(voe_.GetSend(channel_num1));
- EXPECT_FALSE(voe_.GetSend(channel_num2));
+ channel_->SetSend(true);
+ EXPECT_TRUE(GetSendStream(kSsrc1).IsSending());
// Make sure only the new stream is played out.
EXPECT_FALSE(voe_.GetPlayout(channel_num1));
EXPECT_TRUE(voe_.GetPlayout(channel_num2));
// Adding yet another stream should have stream 2 and 3 enabled for playout.
- EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(3)));
+ EXPECT_TRUE(
+ channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(kSsrc3)));
int channel_num3 = voe_.GetLastChannel();
EXPECT_FALSE(voe_.GetPlayout(channel_num1));
EXPECT_TRUE(voe_.GetPlayout(channel_num2));
EXPECT_TRUE(voe_.GetPlayout(channel_num3));
- EXPECT_FALSE(voe_.GetSend(channel_num3));
// Stop sending.
- EXPECT_TRUE(channel_->SetSend(cricket::SEND_NOTHING));
- EXPECT_FALSE(voe_.GetSend(channel_num1));
- EXPECT_FALSE(voe_.GetSend(channel_num2));
- EXPECT_FALSE(voe_.GetSend(channel_num3));
+ channel_->SetSend(false);
+ EXPECT_FALSE(GetSendStream(kSsrc1).IsSending());
// Stop playout.
EXPECT_TRUE(channel_->SetPlayout(false));
@@ -2228,18 +2242,17 @@
EXPECT_TRUE(SetupEngineWithSendStream());
cricket::AudioOptions options_adjust_agc;
options_adjust_agc.adjust_agc_delta = rtc::Optional<int>(-10);
- int channel_num = voe_.GetLastChannel();
webrtc::AgcConfig agc_config;
EXPECT_EQ(0, voe_.GetAgcConfig(agc_config));
EXPECT_EQ(0, agc_config.targetLeveldBOv);
send_parameters_.options = options_adjust_agc;
EXPECT_TRUE(channel_->SetSendParameters(send_parameters_));
- EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
- EXPECT_TRUE(voe_.GetSend(channel_num));
+ channel_->SetSend(true);
+ EXPECT_TRUE(GetSendStream(kSsrc1).IsSending());
EXPECT_EQ(0, voe_.GetAgcConfig(agc_config));
EXPECT_EQ(agc_config.targetLeveldBOv, 10); // level was attenuated
- EXPECT_TRUE(channel_->SetSend(cricket::SEND_NOTHING));
- EXPECT_FALSE(voe_.GetSend(channel_num));
+ channel_->SetSend(false);
+ EXPECT_FALSE(GetSendStream(kSsrc1).IsSending());
EXPECT_EQ(0, voe_.GetAgcConfig(agc_config));
}
@@ -2315,7 +2328,7 @@
// Start sending - this affects some reported stats.
{
cricket::VoiceMediaInfo info;
- EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
+ channel_->SetSend(true);
EXPECT_EQ(true, channel_->GetStats(&info));
VerifyVoiceSenderInfo(info.senders[0], true);
}
@@ -2574,7 +2587,7 @@
TEST_F(WebRtcVoiceEngineTestFake, TestSetPlayoutError) {
EXPECT_TRUE(SetupEngineWithSendStream());
EXPECT_TRUE(channel_->SetSendParameters(send_parameters_));
- EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
+ channel_->SetSend(true);
EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(2)));
EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(3)));
EXPECT_TRUE(channel_->SetPlayout(true));
@@ -2844,7 +2857,7 @@
EXPECT_TRUE(agc_enabled);
EXPECT_TRUE(ns_enabled);
- channel1->SetSend(cricket::SEND_MICROPHONE);
+ channel1->SetSend(true);
voe_.GetEcStatus(ec_enabled, ec_mode);
voe_.GetAgcStatus(agc_enabled, agc_mode);
voe_.GetNsStatus(ns_enabled, ns_mode);
@@ -2852,7 +2865,7 @@
EXPECT_TRUE(agc_enabled);
EXPECT_FALSE(ns_enabled);
- channel2->SetSend(cricket::SEND_MICROPHONE);
+ channel2->SetSend(true);
voe_.GetEcStatus(ec_enabled, ec_mode);
voe_.GetAgcStatus(agc_enabled, agc_mode);
voe_.GetNsStatus(ns_enabled, ns_mode);
@@ -2868,7 +2881,7 @@
rtc::Optional<bool>(false);
parameters_options_no_agc_nor_ns.options.noise_suppression =
rtc::Optional<bool>(false);
- channel2->SetSend(cricket::SEND_MICROPHONE);
+ channel2->SetSend(true);
channel2->SetSendParameters(parameters_options_no_agc_nor_ns);
expected_options.echo_cancellation = rtc::Optional<bool>(true);
expected_options.auto_gain_control = rtc::Optional<bool>(false);
diff --git a/webrtc/media/media.gyp b/webrtc/media/media.gyp
index 53716bd..5f8e559 100644
--- a/webrtc/media/media.gyp
+++ b/webrtc/media/media.gyp
@@ -32,7 +32,7 @@
},
'sources': [
'base/audioframe.h',
- 'base/audiorenderer.h',
+ 'base/audiosource.h',
'base/capturemanager.cc',
'base/capturemanager.h',
'base/codec.cc',
diff --git a/webrtc/pc/channel.cc b/webrtc/pc/channel.cc
index b05c0bc..c6553a0 100644
--- a/webrtc/pc/channel.cc
+++ b/webrtc/pc/channel.cc
@@ -1334,9 +1334,9 @@
bool VoiceChannel::SetAudioSend(uint32_t ssrc,
bool enable,
const AudioOptions* options,
- AudioRenderer* renderer) {
+ AudioSource* source) {
return InvokeOnWorker(Bind(&VoiceMediaChannel::SetAudioSend, media_channel(),
- ssrc, enable, options, renderer));
+ ssrc, enable, options, source));
}
// TODO(juberti): Handle early media the right way. We should get an explicit
@@ -1454,10 +1454,7 @@
// Send outgoing data if we're the active call, we have the remote content,
// and we have had some form of connectivity.
bool send = IsReadyToSend();
- SendFlags send_flag = send ? SEND_MICROPHONE : SEND_NOTHING;
- if (!media_channel()->SetSend(send_flag)) {
- LOG(LS_ERROR) << "Failed to SetSend " << send_flag << " on voice channel";
- }
+ media_channel()->SetSend(send);
LOG(LS_INFO) << "Changing voice state, recv=" << recv << " send=" << send;
}
diff --git a/webrtc/pc/channel.h b/webrtc/pc/channel.h
index e1cbf7e..a7c0a3e 100644
--- a/webrtc/pc/channel.h
+++ b/webrtc/pc/channel.h
@@ -336,7 +336,7 @@
bool SetAudioSend(uint32_t ssrc,
bool enable,
const AudioOptions* options,
- AudioRenderer* renderer);
+ AudioSource* source);
// downcasts a MediaChannel
virtual VoiceMediaChannel* media_channel() const {