Adds trunk/talk folder of revision 359 from libjingles google code to
trunk/talk
git-svn-id: http://webrtc.googlecode.com/svn/trunk@4318 4adac7df-926f-26a2-2b94-8c16560cd09d
diff --git a/talk/app/webrtc/audiotrack.cc b/talk/app/webrtc/audiotrack.cc
new file mode 100644
index 0000000..5bfca42
--- /dev/null
+++ b/talk/app/webrtc/audiotrack.cc
@@ -0,0 +1,53 @@
+/*
+ * libjingle
+ * Copyright 2004--2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "talk/app/webrtc/audiotrack.h"
+
+#include <string>
+
+namespace webrtc {
+
+static const char kAudioTrackKind[] = "audio";
+
+AudioTrack::AudioTrack(const std::string& label,
+ AudioSourceInterface* audio_source)
+ : MediaStreamTrack<AudioTrackInterface>(label),
+ audio_source_(audio_source),
+ renderer_(new AudioTrackRenderer()) {
+}
+
+std::string AudioTrack::kind() const {
+ return kAudioTrackKind;
+}
+
+talk_base::scoped_refptr<AudioTrack> AudioTrack::Create(
+ const std::string& id, AudioSourceInterface* source) {
+ talk_base::RefCountedObject<AudioTrack>* track =
+ new talk_base::RefCountedObject<AudioTrack>(id, source);
+ return track;
+}
+
+} // namespace webrtc
diff --git a/talk/app/webrtc/audiotrack.h b/talk/app/webrtc/audiotrack.h
new file mode 100644
index 0000000..48098f5
--- /dev/null
+++ b/talk/app/webrtc/audiotrack.h
@@ -0,0 +1,66 @@
+/*
+ * libjingle
+ * Copyright 2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_APP_WEBRTC_AUDIOTRACK_H_
+#define TALK_APP_WEBRTC_AUDIOTRACK_H_
+
+#include "talk/app/webrtc/audiotrackrenderer.h"
+#include "talk/app/webrtc/mediastreaminterface.h"
+#include "talk/app/webrtc/mediastreamtrack.h"
+#include "talk/app/webrtc/notifier.h"
+#include "talk/base/scoped_ptr.h"
+#include "talk/base/scoped_ref_ptr.h"
+
+namespace webrtc {
+
+class AudioTrack : public MediaStreamTrack<AudioTrackInterface> {
+ public:
+ static talk_base::scoped_refptr<AudioTrack> Create(
+ const std::string& id, AudioSourceInterface* source);
+
+ virtual AudioSourceInterface* GetSource() const {
+ return audio_source_.get();
+ }
+
+ virtual cricket::AudioRenderer* FrameInput() {
+ return renderer_.get();
+ }
+
+ // Implement MediaStreamTrack
+ virtual std::string kind() const;
+
+ protected:
+ AudioTrack(const std::string& label, AudioSourceInterface* audio_source);
+
+ private:
+ talk_base::scoped_refptr<AudioSourceInterface> audio_source_;
+ talk_base::scoped_ptr<AudioTrackRenderer> renderer_;
+};
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_AUDIOTRACK_H_
diff --git a/talk/app/webrtc/audiotrackrenderer.cc b/talk/app/webrtc/audiotrackrenderer.cc
new file mode 100644
index 0000000..c8ad522
--- /dev/null
+++ b/talk/app/webrtc/audiotrackrenderer.cc
@@ -0,0 +1,48 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/audiotrackrenderer.h"
+#include "talk/base/common.h"
+
+namespace webrtc {
+
+AudioTrackRenderer::AudioTrackRenderer() : channel_id_(-1) {
+}
+
+AudioTrackRenderer::~AudioTrackRenderer() {
+}
+
+void AudioTrackRenderer::SetChannelId(int channel_id) {
+ ASSERT(channel_id_ == -1);
+ channel_id_ = channel_id;
+}
+
+int AudioTrackRenderer::GetChannelId() const {
+ return channel_id_;
+}
+
+} // namespace webrtc
diff --git a/talk/app/webrtc/audiotrackrenderer.h b/talk/app/webrtc/audiotrackrenderer.h
new file mode 100644
index 0000000..55de04e
--- /dev/null
+++ b/talk/app/webrtc/audiotrackrenderer.h
@@ -0,0 +1,55 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_APP_WEBRTC_AUDIOTRACKRENDERER_H_
+#define TALK_APP_WEBRTC_AUDIOTRACKRENDERER_H_
+
+#include "talk/media/base/audiorenderer.h"
+
+namespace webrtc {
+
+// Class used for AudioTrack to get the ID of WebRtc voice channel that
+// the AudioTrack is connecting to.
+// Each AudioTrack owns a AudioTrackRenderer instance.
+// SetChannelID() should be called only when a AudioTrack is added to a
+// MediaStream and should not be changed afterwards.
+class AudioTrackRenderer : public cricket::AudioRenderer {
+ public:
+ AudioTrackRenderer();
+ ~AudioTrackRenderer();
+
+ // Implements cricket::AudioRenderer.
+ virtual void SetChannelId(int channel_id);
+ virtual int GetChannelId() const;
+
+ private:
+ int channel_id_;
+};
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_AUDIOTRACKRENDERER_H_
diff --git a/talk/app/webrtc/datachannel.cc b/talk/app/webrtc/datachannel.cc
new file mode 100644
index 0000000..345cd5f
--- /dev/null
+++ b/talk/app/webrtc/datachannel.cc
@@ -0,0 +1,295 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "talk/app/webrtc/datachannel.h"
+
+#include <string>
+
+#include "talk/app/webrtc/webrtcsession.h"
+#include "talk/base/logging.h"
+#include "talk/base/refcount.h"
+
+namespace webrtc {
+
+static size_t kMaxQueuedDataPackets = 100;
+
+talk_base::scoped_refptr<DataChannel> DataChannel::Create(
+ WebRtcSession* session,
+ const std::string& label,
+ const DataChannelInit* config) {
+ talk_base::scoped_refptr<DataChannel> channel(
+ new talk_base::RefCountedObject<DataChannel>(session, label));
+ if (!channel->Init(config)) {
+ return NULL;
+ }
+ return channel;
+}
+
+DataChannel::DataChannel(WebRtcSession* session, const std::string& label)
+ : label_(label),
+ observer_(NULL),
+ state_(kConnecting),
+ was_ever_writable_(false),
+ session_(session),
+ data_session_(NULL),
+ send_ssrc_set_(false),
+ send_ssrc_(0),
+ receive_ssrc_set_(false),
+ receive_ssrc_(0) {
+}
+
+bool DataChannel::Init(const DataChannelInit* config) {
+ if (config) {
+ if (session_->data_channel_type() == cricket::DCT_RTP &&
+ (config->reliable ||
+ config->id != -1 ||
+ config->maxRetransmits != -1 ||
+ config->maxRetransmitTime != -1)) {
+ LOG(LS_ERROR) << "Failed to initialize the RTP data channel due to "
+ << "invalid DataChannelInit.";
+ return false;
+ } else if (session_->data_channel_type() == cricket::DCT_SCTP) {
+ if (config->id < -1 ||
+ config->maxRetransmits < -1 ||
+ config->maxRetransmitTime < -1) {
+ LOG(LS_ERROR) << "Failed to initialize the SCTP data channel due to "
+ << "invalid DataChannelInit.";
+ return false;
+ }
+ if (config->maxRetransmits != -1 && config->maxRetransmitTime != -1) {
+ LOG(LS_ERROR) <<
+ "maxRetransmits and maxRetransmitTime should not be both set.";
+ return false;
+ }
+ }
+ config_ = *config;
+ }
+ return true;
+}
+
+bool DataChannel::HasNegotiationCompleted() {
+ return send_ssrc_set_ == receive_ssrc_set_;
+}
+
+DataChannel::~DataChannel() {
+ ClearQueuedData();
+}
+
+void DataChannel::RegisterObserver(DataChannelObserver* observer) {
+ observer_ = observer;
+ DeliverQueuedData();
+}
+
+void DataChannel::UnregisterObserver() {
+ observer_ = NULL;
+}
+
+bool DataChannel::reliable() const {
+ if (session_->data_channel_type() == cricket::DCT_RTP) {
+ return false;
+ } else {
+ return config_.maxRetransmits == -1 &&
+ config_.maxRetransmitTime == -1;
+ }
+}
+
+uint64 DataChannel::buffered_amount() const {
+ return 0;
+}
+
+void DataChannel::Close() {
+ if (state_ == kClosed)
+ return;
+ send_ssrc_ = 0;
+ send_ssrc_set_ = false;
+ SetState(kClosing);
+ UpdateState();
+}
+
+bool DataChannel::Send(const DataBuffer& buffer) {
+ if (state_ != kOpen) {
+ return false;
+ }
+ cricket::SendDataParams send_params;
+
+ send_params.ssrc = send_ssrc_;
+ if (session_->data_channel_type() == cricket::DCT_SCTP) {
+ send_params.ordered = config_.ordered;
+ send_params.max_rtx_count = config_.maxRetransmits;
+ send_params.max_rtx_ms = config_.maxRetransmitTime;
+ }
+ send_params.type = buffer.binary ? cricket::DMT_BINARY : cricket::DMT_TEXT;
+
+ cricket::SendDataResult send_result;
+ // TODO(pthatcher): Use send_result.would_block for buffering.
+ return session_->data_channel()->SendData(
+ send_params, buffer.data, &send_result);
+}
+
+void DataChannel::SetReceiveSsrc(uint32 receive_ssrc) {
+ if (receive_ssrc_set_) {
+ ASSERT(session_->data_channel_type() == cricket::DCT_RTP ||
+ receive_ssrc_ == send_ssrc_);
+ return;
+ }
+ receive_ssrc_ = receive_ssrc;
+ receive_ssrc_set_ = true;
+ UpdateState();
+}
+
+// The remote peer request that this channel shall be closed.
+void DataChannel::RemotePeerRequestClose() {
+ DoClose();
+}
+
+void DataChannel::SetSendSsrc(uint32 send_ssrc) {
+ if (send_ssrc_set_) {
+ ASSERT(session_->data_channel_type() == cricket::DCT_RTP ||
+ receive_ssrc_ == send_ssrc_);
+ return;
+ }
+ send_ssrc_ = send_ssrc;
+ send_ssrc_set_ = true;
+ UpdateState();
+}
+
+// The underlaying data engine is closing.
+// This function make sure the DataChannel is disconneced and change state to
+// kClosed.
+void DataChannel::OnDataEngineClose() {
+ DoClose();
+}
+
+void DataChannel::DoClose() {
+ receive_ssrc_set_ = false;
+ send_ssrc_set_ = false;
+ SetState(kClosing);
+ UpdateState();
+}
+
+void DataChannel::UpdateState() {
+ switch (state_) {
+ case kConnecting: {
+ if (HasNegotiationCompleted()) {
+ if (!IsConnectedToDataSession()) {
+ ConnectToDataSession();
+ }
+ if (was_ever_writable_) {
+ SetState(kOpen);
+ // If we have received buffers before the channel got writable.
+ // Deliver them now.
+ DeliverQueuedData();
+ }
+ }
+ break;
+ }
+ case kOpen: {
+ break;
+ }
+ case kClosing: {
+ if (IsConnectedToDataSession()) {
+ DisconnectFromDataSession();
+ }
+ if (HasNegotiationCompleted()) {
+ SetState(kClosed);
+ }
+ break;
+ }
+ case kClosed:
+ break;
+ }
+}
+
+void DataChannel::SetState(DataState state) {
+ state_ = state;
+ if (observer_) {
+ observer_->OnStateChange();
+ }
+}
+
+void DataChannel::ConnectToDataSession() {
+ ASSERT(session_->data_channel() != NULL);
+ if (!session_->data_channel()) {
+ LOG(LS_ERROR) << "The DataEngine does not exist.";
+ return;
+ }
+
+ data_session_ = session_->data_channel();
+ data_session_->SignalReadyToSendData.connect(this,
+ &DataChannel::OnChannelReady);
+ data_session_->SignalDataReceived.connect(this, &DataChannel::OnDataReceived);
+}
+
+void DataChannel::DisconnectFromDataSession() {
+ data_session_->SignalReadyToSendData.disconnect(this);
+ data_session_->SignalDataReceived.disconnect(this);
+ data_session_ = NULL;
+}
+
+void DataChannel::DeliverQueuedData() {
+ if (was_ever_writable_ && observer_) {
+ while (!queued_data_.empty()) {
+ DataBuffer* buffer = queued_data_.front();
+ observer_->OnMessage(*buffer);
+ queued_data_.pop();
+ delete buffer;
+ }
+ }
+}
+
+void DataChannel::ClearQueuedData() {
+ while (!queued_data_.empty()) {
+ DataBuffer* buffer = queued_data_.front();
+ queued_data_.pop();
+ delete buffer;
+ }
+}
+
+void DataChannel::OnDataReceived(cricket::DataChannel* channel,
+ const cricket::ReceiveDataParams& params,
+ const talk_base::Buffer& payload) {
+ if (params.ssrc == receive_ssrc_) {
+ bool binary = false;
+ talk_base::scoped_ptr<DataBuffer> buffer(new DataBuffer(payload, binary));
+ if (was_ever_writable_ && observer_) {
+ observer_->OnMessage(*buffer.get());
+ } else {
+ if (queued_data_.size() > kMaxQueuedDataPackets) {
+ ClearQueuedData();
+ }
+ queued_data_.push(buffer.release());
+ }
+ }
+}
+
+void DataChannel::OnChannelReady(bool writable) {
+ if (!was_ever_writable_ && writable) {
+ was_ever_writable_ = true;
+ UpdateState();
+ }
+}
+
+} // namespace webrtc
diff --git a/talk/app/webrtc/datachannel.h b/talk/app/webrtc/datachannel.h
new file mode 100644
index 0000000..c79c491
--- /dev/null
+++ b/talk/app/webrtc/datachannel.h
@@ -0,0 +1,154 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_APP_WEBRTC_DATACHANNEL_H_
+#define TALK_APP_WEBRTC_DATACHANNEL_H_
+
+#include <string>
+#include <queue>
+
+#include "talk/app/webrtc/datachannelinterface.h"
+#include "talk/app/webrtc/proxy.h"
+#include "talk/base/scoped_ref_ptr.h"
+#include "talk/base/sigslot.h"
+#include "talk/session/media/channel.h"
+
+namespace webrtc {
+
+class WebRtcSession;
+
+// DataChannel is a an implementation of the DataChannelInterface based on
+// libjingle's data engine. It provides an implementation of unreliable data
+// channels. Currently this class is specifically designed to use RtpDataEngine,
+// and will changed to use SCTP in the future.
+
+// DataChannel states:
+// kConnecting: The channel has been created but SSRC for sending and receiving
+// has not yet been set and the transport might not yet be ready.
+// kOpen: The channel have a local SSRC set by a call to UpdateSendSsrc
+// and a remote SSRC set by call to UpdateReceiveSsrc and the transport
+// has been writable once.
+// kClosing: DataChannelInterface::Close has been called or UpdateReceiveSsrc
+// has been called with SSRC==0
+// kClosed: Both UpdateReceiveSsrc and UpdateSendSsrc has been called with
+// SSRC==0.
+class DataChannel : public DataChannelInterface,
+ public sigslot::has_slots<> {
+ public:
+ static talk_base::scoped_refptr<DataChannel> Create(
+ WebRtcSession* session,
+ const std::string& label,
+ const DataChannelInit* config);
+
+ virtual void RegisterObserver(DataChannelObserver* observer);
+ virtual void UnregisterObserver();
+
+ virtual std::string label() const { return label_; }
+ virtual bool reliable() const;
+ virtual int id() const { return config_.id; }
+ virtual uint64 buffered_amount() const;
+ virtual void Close();
+ virtual DataState state() const { return state_; }
+ virtual bool Send(const DataBuffer& buffer);
+
+ // Set the SSRC this channel should use to receive data from the
+ // underlying data engine.
+ void SetReceiveSsrc(uint32 receive_ssrc);
+ // The remote peer request that this channel should be closed.
+ void RemotePeerRequestClose();
+
+ // Set the SSRC this channel should use to send data on the
+ // underlying data engine. |send_ssrc| == 0 means that the channel is no
+ // longer part of the session negotiation.
+ void SetSendSsrc(uint32 send_ssrc);
+
+ // Called if the underlying data engine is closing.
+ void OnDataEngineClose();
+
+ protected:
+ DataChannel(WebRtcSession* session, const std::string& label);
+ virtual ~DataChannel();
+
+ bool Init(const DataChannelInit* config);
+ bool HasNegotiationCompleted();
+
+ // Sigslots from cricket::DataChannel
+ void OnDataReceived(cricket::DataChannel* channel,
+ const cricket::ReceiveDataParams& params,
+ const talk_base::Buffer& payload);
+ void OnChannelReady(bool writable);
+
+ private:
+ void DoClose();
+ void UpdateState();
+ void SetState(DataState state);
+ void ConnectToDataSession();
+ void DisconnectFromDataSession();
+ bool IsConnectedToDataSession() { return data_session_ != NULL; }
+ void DeliverQueuedData();
+ void ClearQueuedData();
+
+ std::string label_;
+ DataChannelInit config_;
+ DataChannelObserver* observer_;
+ DataState state_;
+ bool was_ever_writable_;
+ WebRtcSession* session_;
+ cricket::DataChannel* data_session_;
+ bool send_ssrc_set_;
+ uint32 send_ssrc_;
+ bool receive_ssrc_set_;
+ uint32 receive_ssrc_;
+ std::queue<DataBuffer*> queued_data_;
+};
+
+class DataChannelFactory {
+ public:
+ virtual talk_base::scoped_refptr<DataChannel> CreateDataChannel(
+ const std::string& label,
+ const DataChannelInit* config) = 0;
+
+ protected:
+ virtual ~DataChannelFactory() {}
+};
+
+// Define proxy for DataChannelInterface.
+BEGIN_PROXY_MAP(DataChannel)
+ PROXY_METHOD1(void, RegisterObserver, DataChannelObserver*)
+ PROXY_METHOD0(void, UnregisterObserver)
+ PROXY_CONSTMETHOD0(std::string, label)
+ PROXY_CONSTMETHOD0(bool, reliable)
+ PROXY_CONSTMETHOD0(int, id)
+ PROXY_CONSTMETHOD0(DataState, state)
+ PROXY_CONSTMETHOD0(uint64, buffered_amount)
+ PROXY_METHOD0(void, Close)
+ PROXY_METHOD1(bool, Send, const DataBuffer&)
+END_PROXY()
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_DATACHANNEL_H_
diff --git a/talk/app/webrtc/datachannelinterface.h b/talk/app/webrtc/datachannelinterface.h
new file mode 100644
index 0000000..9c66a50
--- /dev/null
+++ b/talk/app/webrtc/datachannelinterface.h
@@ -0,0 +1,127 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// This file contains interfaces for DataChannels
+// http://dev.w3.org/2011/webrtc/editor/webrtc.html#rtcdatachannel
+
+#ifndef TALK_APP_WEBRTC_DATACHANNELINTERFACE_H_
+#define TALK_APP_WEBRTC_DATACHANNELINTERFACE_H_
+
+#include <string>
+
+#include "talk/base/basictypes.h"
+#include "talk/base/buffer.h"
+#include "talk/base/refcount.h"
+
+
+namespace webrtc {
+
+struct DataChannelInit {
+ DataChannelInit()
+ : reliable(false),
+ ordered(true),
+ maxRetransmitTime(-1),
+ maxRetransmits(-1),
+ negotiated(false),
+ id(-1) {
+ }
+
+ bool reliable; // Deprecated.
+ bool ordered; // True if ordered delivery is required.
+ int maxRetransmitTime; // The max period of time in milliseconds in which
+ // retransmissions will be sent. After this time, no
+ // more retransmissions will be sent. -1 if unset.
+ int maxRetransmits; // The max number of retransmissions. -1 if unset.
+ std::string protocol; // This is set by the application and opaque to the
+ // WebRTC implementation.
+ bool negotiated; // True if the channel has been externally negotiated
+ // and we do not send an in-band signalling in the
+ // form of an "open" message.
+ int id; // The stream id, or SID, for SCTP data channels. -1
+ // if unset.
+};
+
+struct DataBuffer {
+ DataBuffer(const talk_base::Buffer& data, bool binary)
+ : data(data),
+ binary(binary) {
+ }
+ // For convenience for unit tests.
+ explicit DataBuffer(const std::string& text)
+ : data(text.data(), text.length()),
+ binary(false) {
+ }
+ talk_base::Buffer data;
+ // Indicates if the received data contains UTF-8 or binary data.
+ // Note that the upper layers are left to verify the UTF-8 encoding.
+ // TODO(jiayl): prefer to use an enum instead of a bool.
+ bool binary;
+};
+
+class DataChannelObserver {
+ public:
+ // The data channel state have changed.
+ virtual void OnStateChange() = 0;
+ // A data buffer was successfully received.
+ virtual void OnMessage(const DataBuffer& buffer) = 0;
+
+ protected:
+ virtual ~DataChannelObserver() {}
+};
+
+class DataChannelInterface : public talk_base::RefCountInterface {
+ public:
+ enum DataState {
+ kConnecting,
+ kOpen, // The DataChannel is ready to send data.
+ kClosing,
+ kClosed
+ };
+
+ virtual void RegisterObserver(DataChannelObserver* observer) = 0;
+ virtual void UnregisterObserver() = 0;
+ // The label attribute represents a label that can be used to distinguish this
+ // DataChannel object from other DataChannel objects.
+ virtual std::string label() const = 0;
+ virtual bool reliable() const = 0;
+ virtual int id() const = 0;
+ virtual DataState state() const = 0;
+ // The buffered_amount returns the number of bytes of application data
+ // (UTF-8 text and binary data) that have been queued using SendBuffer but
+ // have not yet been transmitted to the network.
+ virtual uint64 buffered_amount() const = 0;
+ virtual void Close() = 0;
+ // Sends |data| to the remote peer.
+ virtual bool Send(const DataBuffer& buffer) = 0;
+
+ protected:
+ virtual ~DataChannelInterface() {}
+};
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_DATACHANNELINTERFACE_H_
diff --git a/talk/app/webrtc/dtmfsender.cc b/talk/app/webrtc/dtmfsender.cc
new file mode 100644
index 0000000..6556acd
--- /dev/null
+++ b/talk/app/webrtc/dtmfsender.cc
@@ -0,0 +1,257 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/dtmfsender.h"
+
+#include <ctype.h>
+
+#include <string>
+
+#include "talk/base/logging.h"
+#include "talk/base/thread.h"
+
+namespace webrtc {
+
+enum {
+ MSG_DO_INSERT_DTMF = 0,
+};
+
+// RFC4733
+// +-------+--------+------+---------+
+// | Event | Code | Type | Volume? |
+// +-------+--------+------+---------+
+// | 0--9 | 0--9 | tone | yes |
+// | * | 10 | tone | yes |
+// | # | 11 | tone | yes |
+// | A--D | 12--15 | tone | yes |
+// +-------+--------+------+---------+
+// The "," is a special event defined by the WebRTC spec. It means to delay for
+// 2 seconds before processing the next tone. We use -1 as its code.
+static const int kDtmfCodeTwoSecondDelay = -1;
+static const int kDtmfTwoSecondInMs = 2000;
+static const char kDtmfValidTones[] = ",0123456789*#ABCDabcd";
+static const char kDtmfTonesTable[] = ",0123456789*#ABCD";
+// The duration cannot be more than 6000ms or less than 70ms. The gap between
+// tones must be at least 50 ms.
+static const int kDtmfDefaultDurationMs = 100;
+static const int kDtmfMinDurationMs = 70;
+static const int kDtmfMaxDurationMs = 6000;
+static const int kDtmfDefaultGapMs = 50;
+static const int kDtmfMinGapMs = 50;
+
+// Get DTMF code from the DTMF event character.
+bool GetDtmfCode(char tone, int* code) {
+ // Convert a-d to A-D.
+ char event = toupper(tone);
+ const char* p = strchr(kDtmfTonesTable, event);
+ if (!p) {
+ return false;
+ }
+ *code = p - kDtmfTonesTable - 1;
+ return true;
+}
+
+talk_base::scoped_refptr<DtmfSender> DtmfSender::Create(
+ AudioTrackInterface* track,
+ talk_base::Thread* signaling_thread,
+ DtmfProviderInterface* provider) {
+ if (!track || !signaling_thread) {
+ return NULL;
+ }
+ talk_base::scoped_refptr<DtmfSender> dtmf_sender(
+ new talk_base::RefCountedObject<DtmfSender>(track, signaling_thread,
+ provider));
+ return dtmf_sender;
+}
+
+DtmfSender::DtmfSender(AudioTrackInterface* track,
+ talk_base::Thread* signaling_thread,
+ DtmfProviderInterface* provider)
+ : track_(track),
+ observer_(NULL),
+ signaling_thread_(signaling_thread),
+ provider_(provider),
+ duration_(kDtmfDefaultDurationMs),
+ inter_tone_gap_(kDtmfDefaultGapMs) {
+ ASSERT(track_ != NULL);
+ ASSERT(signaling_thread_ != NULL);
+ if (provider_) {
+ ASSERT(provider_->GetOnDestroyedSignal() != NULL);
+ provider_->GetOnDestroyedSignal()->connect(
+ this, &DtmfSender::OnProviderDestroyed);
+ }
+}
+
+DtmfSender::~DtmfSender() {
+ if (provider_) {
+ ASSERT(provider_->GetOnDestroyedSignal() != NULL);
+ provider_->GetOnDestroyedSignal()->disconnect(this);
+ }
+ StopSending();
+}
+
+void DtmfSender::RegisterObserver(DtmfSenderObserverInterface* observer) {
+ observer_ = observer;
+}
+
+void DtmfSender::UnregisterObserver() {
+ observer_ = NULL;
+}
+
+bool DtmfSender::CanInsertDtmf() {
+ ASSERT(signaling_thread_->IsCurrent());
+ if (!provider_) {
+ return false;
+ }
+ return provider_->CanInsertDtmf(track_->id());
+}
+
+bool DtmfSender::InsertDtmf(const std::string& tones, int duration,
+ int inter_tone_gap) {
+ ASSERT(signaling_thread_->IsCurrent());
+
+ if (duration > kDtmfMaxDurationMs ||
+ duration < kDtmfMinDurationMs ||
+ inter_tone_gap < kDtmfMinGapMs) {
+ LOG(LS_ERROR) << "InsertDtmf is called with invalid duration or tones gap. "
+ << "The duration cannot be more than " << kDtmfMaxDurationMs
+ << "ms or less than " << kDtmfMinDurationMs << "ms. "
+ << "The gap between tones must be at least " << kDtmfMinGapMs << "ms.";
+ return false;
+ }
+
+ if (!CanInsertDtmf()) {
+ LOG(LS_ERROR)
+ << "InsertDtmf is called on DtmfSender that can't send DTMF.";
+ return false;
+ }
+
+ tones_ = tones;
+ duration_ = duration;
+ inter_tone_gap_ = inter_tone_gap;
+ // Clear the previous queue.
+ signaling_thread_->Clear(this, MSG_DO_INSERT_DTMF);
+ // Kick off a new DTMF task queue.
+ signaling_thread_->Post(this, MSG_DO_INSERT_DTMF);
+ return true;
+}
+
+const AudioTrackInterface* DtmfSender::track() const {
+ return track_;
+}
+
+std::string DtmfSender::tones() const {
+ return tones_;
+}
+
+int DtmfSender::duration() const {
+ return duration_;
+}
+
+int DtmfSender::inter_tone_gap() const {
+ return inter_tone_gap_;
+}
+
+void DtmfSender::OnMessage(talk_base::Message* msg) {
+ switch (msg->message_id) {
+ case MSG_DO_INSERT_DTMF: {
+ DoInsertDtmf();
+ break;
+ }
+ default: {
+ ASSERT(false);
+ break;
+ }
+ }
+}
+
+void DtmfSender::DoInsertDtmf() {
+ ASSERT(signaling_thread_->IsCurrent());
+
+ // Get the first DTMF tone from the tone buffer. Unrecognized characters will
+ // be ignored and skipped.
+ size_t first_tone_pos = tones_.find_first_of(kDtmfValidTones);
+ int code = 0;
+ if (first_tone_pos == std::string::npos) {
+ tones_.clear();
+ // Fire a “OnToneChange” event with an empty string and stop.
+ if (observer_) {
+ observer_->OnToneChange(std::string());
+ }
+ return;
+ } else {
+ char tone = tones_[first_tone_pos];
+ if (!GetDtmfCode(tone, &code)) {
+ // The find_first_of(kDtmfValidTones) should have guarantee |tone| is
+ // a valid DTMF tone.
+ ASSERT(false);
+ }
+ }
+
+ int tone_gap = inter_tone_gap_;
+ if (code == kDtmfCodeTwoSecondDelay) {
+ // Special case defined by WebRTC - The character',' indicates a delay of 2
+ // seconds before processing the next character in the tones parameter.
+ tone_gap = kDtmfTwoSecondInMs;
+ } else {
+ if (!provider_) {
+ LOG(LS_ERROR) << "The DtmfProvider has been destroyed.";
+ return;
+ }
+ // The provider starts playout of the given tone on the
+ // associated RTP media stream, using the appropriate codec.
+ if (!provider_->InsertDtmf(track_->id(), code, duration_)) {
+ LOG(LS_ERROR) << "The DtmfProvider can no longer send DTMF.";
+ return;
+ }
+ // Wait for the number of milliseconds specified by |duration_|.
+ tone_gap += duration_;
+ }
+
+ // Fire a “OnToneChange” event with the tone that's just processed.
+ if (observer_) {
+ observer_->OnToneChange(tones_.substr(first_tone_pos, 1));
+ }
+
+ // Erase the unrecognized characters plus the tone that's just processed.
+ tones_.erase(0, first_tone_pos + 1);
+
+ // Continue with the next tone.
+ signaling_thread_->PostDelayed(tone_gap, this, MSG_DO_INSERT_DTMF);
+}
+
+void DtmfSender::OnProviderDestroyed() {
+ LOG(LS_INFO) << "The Dtmf provider is deleted. Clear the sending queue.";
+ StopSending();
+ provider_ = NULL;
+}
+
+void DtmfSender::StopSending() {
+ signaling_thread_->Clear(this);
+}
+
+} // namespace webrtc
diff --git a/talk/app/webrtc/dtmfsender.h b/talk/app/webrtc/dtmfsender.h
new file mode 100644
index 0000000..f2bebde
--- /dev/null
+++ b/talk/app/webrtc/dtmfsender.h
@@ -0,0 +1,138 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_APP_WEBRTC_DTMFSENDER_H_
+#define TALK_APP_WEBRTC_DTMFSENDER_H_
+
+#include <string>
+
+#include "talk/app/webrtc/dtmfsenderinterface.h"
+#include "talk/app/webrtc/mediastreaminterface.h"
+#include "talk/app/webrtc/proxy.h"
+#include "talk/base/common.h"
+#include "talk/base/messagehandler.h"
+#include "talk/base/refcount.h"
+
+// DtmfSender is the native implementation of the RTCDTMFSender defined by
+// the WebRTC W3C Editor's Draft.
+// http://dev.w3.org/2011/webrtc/editor/webrtc.html
+
+namespace talk_base {
+class Thread;
+}
+
+namespace webrtc {
+
+// This interface is called by DtmfSender to talk to the actual audio channel
+// to send DTMF.
+class DtmfProviderInterface {
+ public:
+ // Returns true if the audio track with given id (|track_id|) is capable
+ // of sending DTMF. Otherwise returns false.
+ virtual bool CanInsertDtmf(const std::string& track_id) = 0;
+ // Sends DTMF |code| via the audio track with given id (|track_id|).
+ // The |duration| indicates the length of the DTMF tone in ms.
+ // Returns true on success and false on failure.
+ virtual bool InsertDtmf(const std::string& track_id,
+ int code, int duration) = 0;
+ // Returns a |sigslot::signal0<>| signal. The signal should fire before
+ // the provider is destroyed.
+ virtual sigslot::signal0<>* GetOnDestroyedSignal() = 0;
+
+ protected:
+ virtual ~DtmfProviderInterface() {}
+};
+
+class DtmfSender
+ : public DtmfSenderInterface,
+ public sigslot::has_slots<>,
+ public talk_base::MessageHandler {
+ public:
+ static talk_base::scoped_refptr<DtmfSender> Create(
+ AudioTrackInterface* track,
+ talk_base::Thread* signaling_thread,
+ DtmfProviderInterface* provider);
+
+ // Implements DtmfSenderInterface.
+ virtual void RegisterObserver(DtmfSenderObserverInterface* observer) OVERRIDE;
+ virtual void UnregisterObserver() OVERRIDE;
+ virtual bool CanInsertDtmf() OVERRIDE;
+ virtual bool InsertDtmf(const std::string& tones, int duration,
+ int inter_tone_gap) OVERRIDE;
+ virtual const AudioTrackInterface* track() const OVERRIDE;
+ virtual std::string tones() const OVERRIDE;
+ virtual int duration() const OVERRIDE;
+ virtual int inter_tone_gap() const OVERRIDE;
+
+ protected:
+ DtmfSender(AudioTrackInterface* track,
+ talk_base::Thread* signaling_thread,
+ DtmfProviderInterface* provider);
+ virtual ~DtmfSender();
+
+ private:
+ DtmfSender();
+
+ // Implements MessageHandler.
+ virtual void OnMessage(talk_base::Message* msg);
+
+ // The DTMF sending task.
+ void DoInsertDtmf();
+
+ void OnProviderDestroyed();
+
+ void StopSending();
+
+ talk_base::scoped_refptr<AudioTrackInterface> track_;
+ DtmfSenderObserverInterface* observer_;
+ talk_base::Thread* signaling_thread_;
+ DtmfProviderInterface* provider_;
+ std::string tones_;
+ int duration_;
+ int inter_tone_gap_;
+
+ DISALLOW_COPY_AND_ASSIGN(DtmfSender);
+};
+
+// Define proxy for DtmfSenderInterface.
+BEGIN_PROXY_MAP(DtmfSender)
+ PROXY_METHOD1(void, RegisterObserver, DtmfSenderObserverInterface*)
+ PROXY_METHOD0(void, UnregisterObserver)
+ PROXY_METHOD0(bool, CanInsertDtmf)
+ PROXY_METHOD3(bool, InsertDtmf, const std::string&, int, int)
+ PROXY_CONSTMETHOD0(const AudioTrackInterface*, track)
+ PROXY_CONSTMETHOD0(std::string, tones)
+ PROXY_CONSTMETHOD0(int, duration)
+ PROXY_CONSTMETHOD0(int, inter_tone_gap)
+END_PROXY()
+
+// Get DTMF code from the DTMF event character.
+bool GetDtmfCode(char tone, int* code);
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_DTMFSENDER_H_
diff --git a/talk/app/webrtc/dtmfsender_unittest.cc b/talk/app/webrtc/dtmfsender_unittest.cc
new file mode 100644
index 0000000..e1c3be9
--- /dev/null
+++ b/talk/app/webrtc/dtmfsender_unittest.cc
@@ -0,0 +1,356 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/dtmfsender.h"
+
+#include <set>
+#include <string>
+#include <vector>
+
+#include "talk/app/webrtc/audiotrack.h"
+#include "talk/base/gunit.h"
+#include "talk/base/logging.h"
+#include "talk/base/timeutils.h"
+
+using webrtc::AudioTrackInterface;
+using webrtc::AudioTrack;
+using webrtc::DtmfProviderInterface;
+using webrtc::DtmfSender;
+using webrtc::DtmfSenderObserverInterface;
+
+static const char kTestAudioLabel[] = "test_audio_track";
+static const int kMaxWaitMs = 3000;
+
+class FakeDtmfObserver : public DtmfSenderObserverInterface {
+ public:
+ FakeDtmfObserver() : completed_(false) {}
+
+ // Implements DtmfSenderObserverInterface.
+ virtual void OnToneChange(const std::string& tone) OVERRIDE {
+ LOG(LS_VERBOSE) << "FakeDtmfObserver::OnToneChange '" << tone << "'.";
+ tones_.push_back(tone);
+ if (tone.empty()) {
+ completed_ = true;
+ }
+ }
+
+ // getters
+ const std::vector<std::string>& tones() const {
+ return tones_;
+ }
+ bool completed() const {
+ return completed_;
+ }
+
+ private:
+ std::vector<std::string> tones_;
+ bool completed_;
+};
+
+class FakeDtmfProvider : public DtmfProviderInterface {
+ public:
+ struct DtmfInfo {
+ DtmfInfo(int code, int duration, int gap)
+ : code(code),
+ duration(duration),
+ gap(gap) {}
+ int code;
+ int duration;
+ int gap;
+ };
+
+ FakeDtmfProvider() : last_insert_dtmf_call_(0) {}
+
+ ~FakeDtmfProvider() {
+ SignalDestroyed();
+ }
+
+ // Implements DtmfProviderInterface.
+ virtual bool CanInsertDtmf(const std::string& track_label) OVERRIDE {
+ return (can_insert_dtmf_tracks_.count(track_label) != 0);
+ }
+
+ virtual bool InsertDtmf(const std::string& track_label,
+ int code, int duration) OVERRIDE {
+ int gap = 0;
+ // TODO(ronghuawu): Make the timer (basically the talk_base::TimeNanos)
+ // mockable and use a fake timer in the unit tests.
+ if (last_insert_dtmf_call_ > 0) {
+ gap = static_cast<int>(talk_base::Time() - last_insert_dtmf_call_);
+ }
+ last_insert_dtmf_call_ = talk_base::Time();
+
+ LOG(LS_VERBOSE) << "FakeDtmfProvider::InsertDtmf code=" << code
+ << " duration=" << duration
+ << " gap=" << gap << ".";
+ dtmf_info_queue_.push_back(DtmfInfo(code, duration, gap));
+ return true;
+ }
+
+ virtual sigslot::signal0<>* GetOnDestroyedSignal() {
+ return &SignalDestroyed;
+ }
+
+ // getter and setter
+ const std::vector<DtmfInfo>& dtmf_info_queue() const {
+ return dtmf_info_queue_;
+ }
+
+ // helper functions
+ void AddCanInsertDtmfTrack(const std::string& label) {
+ can_insert_dtmf_tracks_.insert(label);
+ }
+ void RemoveCanInsertDtmfTrack(const std::string& label) {
+ can_insert_dtmf_tracks_.erase(label);
+ }
+
+ private:
+ std::set<std::string> can_insert_dtmf_tracks_;
+ std::vector<DtmfInfo> dtmf_info_queue_;
+ int64 last_insert_dtmf_call_;
+ sigslot::signal0<> SignalDestroyed;
+};
+
+class DtmfSenderTest : public testing::Test {
+ protected:
+ DtmfSenderTest()
+ : track_(AudioTrack::Create(kTestAudioLabel, NULL)),
+ observer_(new talk_base::RefCountedObject<FakeDtmfObserver>()),
+ provider_(new FakeDtmfProvider()) {
+ provider_->AddCanInsertDtmfTrack(kTestAudioLabel);
+ dtmf_ = DtmfSender::Create(track_, talk_base::Thread::Current(),
+ provider_.get());
+ dtmf_->RegisterObserver(observer_.get());
+ }
+
+ ~DtmfSenderTest() {
+ if (dtmf_.get()) {
+ dtmf_->UnregisterObserver();
+ }
+ }
+
+ // Constructs a list of DtmfInfo from |tones|, |duration| and
+ // |inter_tone_gap|.
+ void GetDtmfInfoFromString(const std::string& tones, int duration,
+ int inter_tone_gap,
+ std::vector<FakeDtmfProvider::DtmfInfo>* dtmfs) {
+ // Init extra_delay as -inter_tone_gap - duration to ensure the first
+ // DtmfInfo's gap field will be 0.
+ int extra_delay = -1 * (inter_tone_gap + duration);
+
+ std::string::const_iterator it = tones.begin();
+ for (; it != tones.end(); ++it) {
+ char tone = *it;
+ int code = 0;
+ webrtc::GetDtmfCode(tone, &code);
+ if (tone == ',') {
+ extra_delay = 2000; // 2 seconds
+ } else {
+ dtmfs->push_back(FakeDtmfProvider::DtmfInfo(code, duration,
+ duration + inter_tone_gap + extra_delay));
+ extra_delay = 0;
+ }
+ }
+ }
+
+ void VerifyExpectedState(AudioTrackInterface* track,
+ const std::string& tones,
+ int duration, int inter_tone_gap) {
+ EXPECT_EQ(track, dtmf_->track());
+ EXPECT_EQ(tones, dtmf_->tones());
+ EXPECT_EQ(duration, dtmf_->duration());
+ EXPECT_EQ(inter_tone_gap, dtmf_->inter_tone_gap());
+ }
+
+ // Verify the provider got all the expected calls.
+ void VerifyOnProvider(const std::string& tones, int duration,
+ int inter_tone_gap) {
+ std::vector<FakeDtmfProvider::DtmfInfo> dtmf_queue_ref;
+ GetDtmfInfoFromString(tones, duration, inter_tone_gap, &dtmf_queue_ref);
+ VerifyOnProvider(dtmf_queue_ref);
+ }
+
+ void VerifyOnProvider(
+ const std::vector<FakeDtmfProvider::DtmfInfo>& dtmf_queue_ref) {
+ const std::vector<FakeDtmfProvider::DtmfInfo>& dtmf_queue =
+ provider_->dtmf_info_queue();
+ ASSERT_EQ(dtmf_queue_ref.size(), dtmf_queue.size());
+ std::vector<FakeDtmfProvider::DtmfInfo>::const_iterator it_ref =
+ dtmf_queue_ref.begin();
+ std::vector<FakeDtmfProvider::DtmfInfo>::const_iterator it =
+ dtmf_queue.begin();
+ while (it_ref != dtmf_queue_ref.end() && it != dtmf_queue.end()) {
+ EXPECT_EQ(it_ref->code, it->code);
+ EXPECT_EQ(it_ref->duration, it->duration);
+ // Allow ~20ms error.
+ EXPECT_GE(it_ref->gap, it->gap - 20);
+ EXPECT_LE(it_ref->gap, it->gap + 20);
+ ++it_ref;
+ ++it;
+ }
+ }
+
+ // Verify the observer got all the expected callbacks.
+ void VerifyOnObserver(const std::string& tones_ref) {
+ const std::vector<std::string>& tones = observer_->tones();
+ // The observer will get an empty string at the end.
+ EXPECT_EQ(tones_ref.size() + 1, tones.size());
+ EXPECT_TRUE(tones.back().empty());
+ std::string::const_iterator it_ref = tones_ref.begin();
+ std::vector<std::string>::const_iterator it = tones.begin();
+ while (it_ref != tones_ref.end() && it != tones.end()) {
+ EXPECT_EQ(*it_ref, it->at(0));
+ ++it_ref;
+ ++it;
+ }
+ }
+
+ talk_base::scoped_refptr<AudioTrackInterface> track_;
+ talk_base::scoped_ptr<FakeDtmfObserver> observer_;
+ talk_base::scoped_ptr<FakeDtmfProvider> provider_;
+ talk_base::scoped_refptr<DtmfSender> dtmf_;
+};
+
+TEST_F(DtmfSenderTest, CanInsertDtmf) {
+ EXPECT_TRUE(dtmf_->CanInsertDtmf());
+ provider_->RemoveCanInsertDtmfTrack(kTestAudioLabel);
+ EXPECT_FALSE(dtmf_->CanInsertDtmf());
+}
+
+TEST_F(DtmfSenderTest, InsertDtmf) {
+ std::string tones = "@1%a&*$";
+ int duration = 100;
+ int inter_tone_gap = 50;
+ EXPECT_TRUE(dtmf_->InsertDtmf(tones, duration, inter_tone_gap));
+ EXPECT_TRUE_WAIT(observer_->completed(), kMaxWaitMs);
+
+ // The unrecognized characters should be ignored.
+ std::string known_tones = "1a*";
+ VerifyOnProvider(known_tones, duration, inter_tone_gap);
+ VerifyOnObserver(known_tones);
+}
+
+TEST_F(DtmfSenderTest, InsertDtmfTwice) {
+ std::string tones1 = "12";
+ std::string tones2 = "ab";
+ int duration = 100;
+ int inter_tone_gap = 50;
+ EXPECT_TRUE(dtmf_->InsertDtmf(tones1, duration, inter_tone_gap));
+ VerifyExpectedState(track_, tones1, duration, inter_tone_gap);
+ // Wait until the first tone got sent.
+ EXPECT_TRUE_WAIT(observer_->tones().size() == 1, kMaxWaitMs);
+ VerifyExpectedState(track_, "2", duration, inter_tone_gap);
+ // Insert with another tone buffer.
+ EXPECT_TRUE(dtmf_->InsertDtmf(tones2, duration, inter_tone_gap));
+ VerifyExpectedState(track_, tones2, duration, inter_tone_gap);
+ // Wait until it's completed.
+ EXPECT_TRUE_WAIT(observer_->completed(), kMaxWaitMs);
+
+ std::vector<FakeDtmfProvider::DtmfInfo> dtmf_queue_ref;
+ GetDtmfInfoFromString("1", duration, inter_tone_gap, &dtmf_queue_ref);
+ GetDtmfInfoFromString("ab", duration, inter_tone_gap, &dtmf_queue_ref);
+ VerifyOnProvider(dtmf_queue_ref);
+ VerifyOnObserver("1ab");
+}
+
+TEST_F(DtmfSenderTest, InsertDtmfWhileProviderIsDeleted) {
+ std::string tones = "@1%a&*$";
+ int duration = 100;
+ int inter_tone_gap = 50;
+ EXPECT_TRUE(dtmf_->InsertDtmf(tones, duration, inter_tone_gap));
+ // Wait until the first tone got sent.
+ EXPECT_TRUE_WAIT(observer_->tones().size() == 1, kMaxWaitMs);
+ // Delete provider.
+ provider_.reset();
+ // The queue should be discontinued so no more tone callbacks.
+ WAIT(false, 200);
+ EXPECT_EQ(1U, observer_->tones().size());
+}
+
+TEST_F(DtmfSenderTest, InsertDtmfWhileSenderIsDeleted) {
+ std::string tones = "@1%a&*$";
+ int duration = 100;
+ int inter_tone_gap = 50;
+ EXPECT_TRUE(dtmf_->InsertDtmf(tones, duration, inter_tone_gap));
+ // Wait until the first tone got sent.
+ EXPECT_TRUE_WAIT(observer_->tones().size() == 1, kMaxWaitMs);
+ // Delete the sender.
+ dtmf_ = NULL;
+ // The queue should be discontinued so no more tone callbacks.
+ WAIT(false, 200);
+ EXPECT_EQ(1U, observer_->tones().size());
+}
+
+TEST_F(DtmfSenderTest, InsertEmptyTonesToCancelPreviousTask) {
+ std::string tones1 = "12";
+ std::string tones2 = "";
+ int duration = 100;
+ int inter_tone_gap = 50;
+ EXPECT_TRUE(dtmf_->InsertDtmf(tones1, duration, inter_tone_gap));
+ // Wait until the first tone got sent.
+ EXPECT_TRUE_WAIT(observer_->tones().size() == 1, kMaxWaitMs);
+ // Insert with another tone buffer.
+ EXPECT_TRUE(dtmf_->InsertDtmf(tones2, duration, inter_tone_gap));
+ // Wait until it's completed.
+ EXPECT_TRUE_WAIT(observer_->completed(), kMaxWaitMs);
+
+ std::vector<FakeDtmfProvider::DtmfInfo> dtmf_queue_ref;
+ GetDtmfInfoFromString("1", duration, inter_tone_gap, &dtmf_queue_ref);
+ VerifyOnProvider(dtmf_queue_ref);
+ VerifyOnObserver("1");
+}
+
+TEST_F(DtmfSenderTest, InsertDtmfWithCommaAsDelay) {
+ std::string tones = "3,4";
+ int duration = 100;
+ int inter_tone_gap = 50;
+ EXPECT_TRUE(dtmf_->InsertDtmf(tones, duration, inter_tone_gap));
+ EXPECT_TRUE_WAIT(observer_->completed(), kMaxWaitMs);
+
+ VerifyOnProvider(tones, duration, inter_tone_gap);
+ VerifyOnObserver(tones);
+}
+
+TEST_F(DtmfSenderTest, TryInsertDtmfWhenItDoesNotWork) {
+ std::string tones = "3,4";
+ int duration = 100;
+ int inter_tone_gap = 50;
+ provider_->RemoveCanInsertDtmfTrack(kTestAudioLabel);
+ EXPECT_FALSE(dtmf_->InsertDtmf(tones, duration, inter_tone_gap));
+}
+
+TEST_F(DtmfSenderTest, InsertDtmfWithInvalidDurationOrGap) {
+ std::string tones = "3,4";
+ int duration = 100;
+ int inter_tone_gap = 50;
+
+ EXPECT_FALSE(dtmf_->InsertDtmf(tones, 6001, inter_tone_gap));
+ EXPECT_FALSE(dtmf_->InsertDtmf(tones, 69, inter_tone_gap));
+ EXPECT_FALSE(dtmf_->InsertDtmf(tones, duration, 49));
+
+ EXPECT_TRUE(dtmf_->InsertDtmf(tones, duration, inter_tone_gap));
+}
diff --git a/talk/app/webrtc/dtmfsenderinterface.h b/talk/app/webrtc/dtmfsenderinterface.h
new file mode 100644
index 0000000..46f3924
--- /dev/null
+++ b/talk/app/webrtc/dtmfsenderinterface.h
@@ -0,0 +1,105 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_APP_WEBRTC_DTMFSENDERINTERFACE_H_
+#define TALK_APP_WEBRTC_DTMFSENDERINTERFACE_H_
+
+#include <string>
+
+#include "talk/app/webrtc/mediastreaminterface.h"
+#include "talk/base/common.h"
+#include "talk/base/refcount.h"
+
+// This file contains interfaces for DtmfSender.
+
+namespace webrtc {
+
+// DtmfSender callback interface. Application should implement this interface
+// to get notifications from the DtmfSender.
+class DtmfSenderObserverInterface {
+ public:
+ // Triggered when DTMF |tone| is sent.
+ // If |tone| is empty that means the DtmfSender has sent out all the given
+ // tones.
+ virtual void OnToneChange(const std::string& tone) = 0;
+
+ protected:
+ virtual ~DtmfSenderObserverInterface() {}
+};
+
+// The interface of native implementation of the RTCDTMFSender defined by the
+// WebRTC W3C Editor's Draft.
+class DtmfSenderInterface : public talk_base::RefCountInterface {
+ public:
+ virtual void RegisterObserver(DtmfSenderObserverInterface* observer) = 0;
+ virtual void UnregisterObserver() = 0;
+
+ // Returns true if this DtmfSender is capable of sending DTMF.
+ // Otherwise returns false.
+ virtual bool CanInsertDtmf() = 0;
+
+ // Queues a task that sends the DTMF |tones|. The |tones| parameter is treated
+ // as a series of characters. The characters 0 through 9, A through D, #, and
+ // * generate the associated DTMF tones. The characters a to d are equivalent
+ // to A to D. The character ',' indicates a delay of 2 seconds before
+ // processing the next character in the tones parameter.
+ // Unrecognized characters are ignored.
+ // The |duration| parameter indicates the duration in ms to use for each
+ // character passed in the |tones| parameter.
+ // The duration cannot be more than 6000 or less than 70.
+ // The |inter_tone_gap| parameter indicates the gap between tones in ms.
+ // The |inter_tone_gap| must be at least 50 ms but should be as short as
+ // possible.
+ // If InsertDtmf is called on the same object while an existing task for this
+ // object to generate DTMF is still running, the previous task is canceled.
+ // Returns true on success and false on failure.
+ virtual bool InsertDtmf(const std::string& tones, int duration,
+ int inter_tone_gap) = 0;
+
+ // Returns the track given as argument to the constructor.
+ virtual const AudioTrackInterface* track() const = 0;
+
+ // Returns the tones remaining to be played out.
+ virtual std::string tones() const = 0;
+
+ // Returns the current tone duration value in ms.
+ // This value will be the value last set via the InsertDtmf() method, or the
+ // default value of 100 ms if InsertDtmf() was never called.
+ virtual int duration() const = 0;
+
+ // Returns the current value of the between-tone gap in ms.
+ // This value will be the value last set via the InsertDtmf() method, or the
+ // default value of 50 ms if InsertDtmf() was never called.
+ virtual int inter_tone_gap() const = 0;
+
+ protected:
+ virtual ~DtmfSenderInterface() {}
+};
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_DTMFSENDERINTERFACE_H_
diff --git a/talk/app/webrtc/fakeportallocatorfactory.h b/talk/app/webrtc/fakeportallocatorfactory.h
new file mode 100644
index 0000000..c1727ae
--- /dev/null
+++ b/talk/app/webrtc/fakeportallocatorfactory.h
@@ -0,0 +1,74 @@
+/*
+ * libjingle
+ * Copyright 2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// This file defines a fake port allocator factory used for testing.
+// This implementation creates instances of cricket::FakePortAllocator.
+
+#ifndef TALK_APP_WEBRTC_FAKEPORTALLOCATORFACTORY_H_
+#define TALK_APP_WEBRTC_FAKEPORTALLOCATORFACTORY_H_
+
+#include "talk/app/webrtc/peerconnectioninterface.h"
+#include "talk/p2p/client/fakeportallocator.h"
+
+namespace webrtc {
+
+class FakePortAllocatorFactory : public PortAllocatorFactoryInterface {
+ public:
+ static FakePortAllocatorFactory* Create() {
+ talk_base::RefCountedObject<FakePortAllocatorFactory>* allocator =
+ new talk_base::RefCountedObject<FakePortAllocatorFactory>();
+ return allocator;
+ }
+
+ virtual cricket::PortAllocator* CreatePortAllocator(
+ const std::vector<StunConfiguration>& stun_configurations,
+ const std::vector<TurnConfiguration>& turn_configurations) {
+ stun_configs_ = stun_configurations;
+ turn_configs_ = turn_configurations;
+ return new cricket::FakePortAllocator(talk_base::Thread::Current(), NULL);
+ }
+
+ const std::vector<StunConfiguration>& stun_configs() const {
+ return stun_configs_;
+ }
+
+ const std::vector<TurnConfiguration>& turn_configs() const {
+ return turn_configs_;
+ }
+
+ protected:
+ FakePortAllocatorFactory() {}
+ ~FakePortAllocatorFactory() {}
+
+ private:
+ std::vector<PortAllocatorFactoryInterface::StunConfiguration> stun_configs_;
+ std::vector<PortAllocatorFactoryInterface::TurnConfiguration> turn_configs_;
+};
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_FAKEPORTALLOCATORFACTORY_H_
diff --git a/talk/app/webrtc/java/README b/talk/app/webrtc/java/README
new file mode 100644
index 0000000..454046c
--- /dev/null
+++ b/talk/app/webrtc/java/README
@@ -0,0 +1,23 @@
+This directory holds a Java implementation of the webrtc::PeerConnection API, as
+well as the JNI glue C++ code that lets the Java implementation reuse the C++
+implementation of the same API.
+
+To build the Java API and related tests, build with
+OS=linux or OS=android and include
+build_with_libjingle=1 build_with_chromium=0
+in $GYP_DEFINES.
+
+To use the Java API, start by looking at the public interface of
+org.webrtc.PeerConnection{,Factory} and the org.webrtc.PeerConnectionTest.
+
+To understand the implementation of the API, see the native code in jni/.
+
+An example command-line to build & run the unittest:
+cd path/to/trunk
+GYP_DEFINES="build_with_libjingle=1 build_with_chromium=0 java_home=path/to/JDK" gclient runhooks && \
+ ninja -C out/Debug libjingle_peerconnection_java_unittest && \
+ ./out/Debug/libjingle_peerconnection_java_unittest
+(where path/to/JDK should contain include/jni.h)
+
+During development it can be helpful to run the JVM with the -Xcheck:jni flag.
+
diff --git a/talk/app/webrtc/java/jni/peerconnection_jni.cc b/talk/app/webrtc/java/jni/peerconnection_jni.cc
new file mode 100644
index 0000000..6b5a6a4
--- /dev/null
+++ b/talk/app/webrtc/java/jni/peerconnection_jni.cc
@@ -0,0 +1,1359 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// Hints for future visitors:
+// This entire file is an implementation detail of the org.webrtc Java package,
+// the most interesting bits of which are org.webrtc.PeerConnection{,Factory}.
+// The layout of this file is roughly:
+// - various helper C++ functions & classes that wrap Java counterparts and
+// expose a C++ interface that can be passed to the C++ PeerConnection APIs
+// - implementations of methods declared "static" in the Java package (named
+// things like Java_org_webrtc_OMG_Can_This_Name_Be_Any_Longer, prescribed by
+// the JNI spec).
+//
+// Lifecycle notes: objects are owned where they will be called; in other words
+// FooObservers are owned by C++-land, and user-callable objects (e.g.
+// PeerConnection and VideoTrack) are owned by Java-land.
+// When this file allocates C++ RefCountInterfaces it AddRef()s an artificial
+// ref simulating the jlong held in Java-land, and then Release()s the ref in
+// the respective free call. Sometimes this AddRef is implicit in the
+// construction of a scoped_refptr<> which is then .release()d.
+// Any persistent (non-local) references from C++ to Java must be global or weak
+// (in which case they must be checked before use)!
+//
+// Exception notes: pretty much all JNI calls can throw Java exceptions, so each
+// call through a JNIEnv* pointer needs to be followed by an ExceptionCheck()
+// call. In this file this is done in CHECK_EXCEPTION, making for much easier
+// debugging in case of failure (the alternative is to wait for control to
+// return to the Java frame that called code in this file, at which point it's
+// impossible to tell which JNI call broke).
+
+#include <jni.h>
+#undef JNIEXPORT
+#define JNIEXPORT __attribute__((visibility("default")))
+
+#include <map>
+
+#include "talk/app/webrtc/mediaconstraintsinterface.h"
+#include "talk/app/webrtc/peerconnectioninterface.h"
+#include "talk/app/webrtc/videosourceinterface.h"
+#include "talk/base/logging.h"
+#include "talk/base/ssladapter.h"
+#include "talk/media/base/videocapturer.h"
+#include "talk/media/base/videorenderer.h"
+#include "talk/media/devices/videorendererfactory.h"
+#include "talk/media/webrtc/webrtcvideocapturer.h"
+#include "third_party/icu/public/common/unicode/unistr.h"
+#include "third_party/webrtc/system_wrappers/interface/trace.h"
+#include "third_party/webrtc/video_engine/include/vie_base.h"
+#include "third_party/webrtc/voice_engine/include/voe_base.h"
+
+using icu::UnicodeString;
+using webrtc::AudioSourceInterface;
+using webrtc::AudioTrackInterface;
+using webrtc::AudioTrackVector;
+using webrtc::CreateSessionDescriptionObserver;
+using webrtc::IceCandidateInterface;
+using webrtc::MediaConstraintsInterface;
+using webrtc::MediaSourceInterface;
+using webrtc::MediaStreamInterface;
+using webrtc::MediaStreamTrackInterface;
+using webrtc::PeerConnectionFactoryInterface;
+using webrtc::PeerConnectionInterface;
+using webrtc::PeerConnectionObserver;
+using webrtc::SessionDescriptionInterface;
+using webrtc::SetSessionDescriptionObserver;
+using webrtc::StatsObserver;
+using webrtc::StatsReport;
+using webrtc::VideoRendererInterface;
+using webrtc::VideoSourceInterface;
+using webrtc::VideoTrackInterface;
+using webrtc::VideoTrackVector;
+using webrtc::VideoRendererInterface;
+
+// Abort the process if |x| is false, emitting |msg|.
+#define CHECK(x, msg) \
+ if (x) {} else { \
+ LOG(LS_ERROR) << __FILE__ << ":" << __LINE__ << ": " << msg; \
+ abort(); \
+ }
+// Abort the process if |jni| has a Java exception pending, emitting |msg|.
+#define CHECK_EXCEPTION(jni, msg) \
+ if (0) {} else { \
+ if (jni->ExceptionCheck()) { \
+ jni->ExceptionDescribe(); \
+ jni->ExceptionClear(); \
+ CHECK(0, msg); \
+ } \
+ }
+
+namespace {
+
+static JavaVM* g_jvm = NULL; // Set in JNI_OnLoad().
+
+static pthread_once_t g_jni_ptr_once = PTHREAD_ONCE_INIT;
+static pthread_key_t g_jni_ptr; // Key for per-thread JNIEnv* data.
+
+static void ThreadDestructor(void* unused) {
+ jint status = g_jvm->DetachCurrentThread();
+ CHECK(status == JNI_OK, "Failed to detach thread: " << status);
+}
+
+static void CreateJNIPtrKey() {
+ CHECK(!pthread_key_create(&g_jni_ptr, &ThreadDestructor),
+ "pthread_key_create");
+}
+
+// Deal with difference in signatures between Oracle's jni.h and Android's.
+static JNIEnv* AttachCurrentThreadIfNeeded() {
+ CHECK(!pthread_once(&g_jni_ptr_once, &CreateJNIPtrKey),
+ "pthread_once");
+ JNIEnv* jni = reinterpret_cast<JNIEnv*>(pthread_getspecific(g_jni_ptr));
+ if (jni == NULL) {
+#ifdef _JAVASOFT_JNI_H_ // Oracle's jni.h violates the JNI spec!
+ void* env;
+#else
+ JNIEnv* env;
+#endif
+ CHECK(!g_jvm->AttachCurrentThread(&env, NULL), "Failed to attach thread");
+ CHECK(env, "AttachCurrentThread handed back NULL!");
+ jni = reinterpret_cast<JNIEnv*>(env);
+ CHECK(!pthread_setspecific(g_jni_ptr, jni), "pthread_setspecific");
+ }
+ return jni;
+}
+
+// Android's FindClass() is trickier than usual because the app-specific
+// ClassLoader is not consulted when there is no app-specific frame on the
+// stack. Consequently, we only look up classes once in JNI_OnLoad.
+// http://developer.android.com/training/articles/perf-jni.html#faq_FindClass
+class ClassReferenceHolder {
+ public:
+ explicit ClassReferenceHolder(JNIEnv* jni) {
+ LoadClass(jni, "java/nio/ByteBuffer");
+ LoadClass(jni, "org/webrtc/AudioTrack");
+ LoadClass(jni, "org/webrtc/IceCandidate");
+ LoadClass(jni, "org/webrtc/MediaSource$State");
+ LoadClass(jni, "org/webrtc/MediaStream");
+ LoadClass(jni, "org/webrtc/MediaStreamTrack$State");
+ LoadClass(jni, "org/webrtc/PeerConnection$SignalingState");
+ LoadClass(jni, "org/webrtc/PeerConnection$IceConnectionState");
+ LoadClass(jni, "org/webrtc/PeerConnection$IceGatheringState");
+ LoadClass(jni, "org/webrtc/SessionDescription");
+ LoadClass(jni, "org/webrtc/SessionDescription$Type");
+ LoadClass(jni, "org/webrtc/StatsReport");
+ LoadClass(jni, "org/webrtc/StatsReport$Value");
+ LoadClass(jni, "org/webrtc/VideoRenderer$I420Frame");
+ LoadClass(jni, "org/webrtc/VideoTrack");
+ }
+
+ ~ClassReferenceHolder() {
+ CHECK(classes_.empty(), "Must call FreeReferences() before dtor!");
+ }
+
+ void FreeReferences(JNIEnv* jni) {
+ for (std::map<std::string, jclass>::const_iterator it = classes_.begin();
+ it != classes_.end(); ++it) {
+ jni->DeleteGlobalRef(it->second);
+ }
+ classes_.clear();
+ }
+
+ jclass GetClass(const std::string& name) {
+ std::map<std::string, jclass>::iterator it = classes_.find(name);
+ CHECK(it != classes_.end(), "Unexpected GetClass() call for: " << name);
+ return it->second;
+ }
+
+ private:
+ void LoadClass(JNIEnv* jni, const std::string& name) {
+ jclass localRef = jni->FindClass(name.c_str());
+ CHECK_EXCEPTION(jni, "error during FindClass: " << name);
+ CHECK(localRef, name);
+ jclass globalRef = reinterpret_cast<jclass>(jni->NewGlobalRef(localRef));
+ CHECK_EXCEPTION(jni, "error during NewGlobalRef: " << name);
+ CHECK(globalRef, name);
+ bool inserted = classes_.insert(std::make_pair(name, globalRef)).second;
+ CHECK(inserted, "Duplicate class name: " << name);
+ }
+
+ std::map<std::string, jclass> classes_;
+};
+
+// Allocated in JNI_OnLoad(), freed in JNI_OnUnLoad().
+static ClassReferenceHolder* g_class_reference_holder = NULL;
+
+// JNIEnv-helper methods that CHECK success: no Java exception thrown and found
+// object/class/method/field is non-null.
+jmethodID GetMethodID(
+ JNIEnv* jni, jclass c, const std::string& name, const char* signature) {
+ jmethodID m = jni->GetMethodID(c, name.c_str(), signature);
+ CHECK_EXCEPTION(jni,
+ "error during GetMethodID: " << name << ", " << signature);
+ CHECK(m, name << ", " << signature);
+ return m;
+}
+
+jmethodID GetStaticMethodID(
+ JNIEnv* jni, jclass c, const char* name, const char* signature) {
+ jmethodID m = jni->GetStaticMethodID(c, name, signature);
+ CHECK_EXCEPTION(jni,
+ "error during GetStaticMethodID: "
+ << name << ", " << signature);
+ CHECK(m, name << ", " << signature);
+ return m;
+}
+
+jfieldID GetFieldID(
+ JNIEnv* jni, jclass c, const char* name, const char* signature) {
+ jfieldID f = jni->GetFieldID(c, name, signature);
+ CHECK_EXCEPTION(jni, "error during GetFieldID");
+ CHECK(f, name << ", " << signature);
+ return f;
+}
+
+jclass FindClass(JNIEnv* jni, const char* name) {
+ return g_class_reference_holder->GetClass(name);
+}
+
+jclass GetObjectClass(JNIEnv* jni, jobject object) {
+ jclass c = jni->GetObjectClass(object);
+ CHECK_EXCEPTION(jni, "error during GetObjectClass");
+ CHECK(c, "");
+ return c;
+}
+
+jobject GetObjectField(JNIEnv* jni, jobject object, jfieldID id) {
+ jobject o = jni->GetObjectField(object, id);
+ CHECK_EXCEPTION(jni, "error during GetObjectField");
+ CHECK(o, "");
+ return o;
+}
+
+jlong GetLongField(JNIEnv* jni, jobject object, jfieldID id) {
+ jlong l = jni->GetLongField(object, id);
+ CHECK_EXCEPTION(jni, "error during GetLongField");
+ CHECK(l, "");
+ return l;
+}
+
+jobject NewGlobalRef(JNIEnv* jni, jobject o) {
+ jobject ret = jni->NewGlobalRef(o);
+ CHECK_EXCEPTION(jni, "error during NewGlobalRef");
+ CHECK(ret, "");
+ return ret;
+}
+
+void DeleteGlobalRef(JNIEnv* jni, jobject o) {
+ jni->DeleteGlobalRef(o);
+ CHECK_EXCEPTION(jni, "error during DeleteGlobalRef");
+}
+
+// Given a jweak reference, allocate a (strong) local reference scoped to the
+// lifetime of this object if the weak reference is still valid, or NULL
+// otherwise.
+class WeakRef {
+ public:
+ WeakRef(JNIEnv* jni, jweak ref)
+ : jni_(jni), obj_(jni_->NewLocalRef(ref)) {
+ CHECK_EXCEPTION(jni, "error during NewLocalRef");
+ }
+ ~WeakRef() {
+ if (obj_) {
+ jni_->DeleteLocalRef(obj_);
+ CHECK_EXCEPTION(jni_, "error during DeleteLocalRef");
+ }
+ }
+ jobject obj() { return obj_; }
+
+ private:
+ JNIEnv* const jni_;
+ jobject const obj_;
+};
+
+// Given a local ref, take ownership of it and delete the ref when this goes out
+// of scope.
+template<class T> // T is jclass, jobject, jintArray, etc.
+class ScopedLocalRef {
+ public:
+ ScopedLocalRef(JNIEnv* jni, T obj)
+ : jni_(jni), obj_(obj) {}
+ ~ScopedLocalRef() {
+ jni_->DeleteLocalRef(obj_);
+ }
+ T operator*() const {
+ return obj_;
+ }
+ private:
+ JNIEnv* jni_;
+ T obj_;
+};
+
+// Scoped holder for global Java refs.
+class ScopedGlobalRef {
+ public:
+ explicit ScopedGlobalRef(JNIEnv* jni, jobject obj)
+ : obj_(jni->NewGlobalRef(obj)) {}
+ ~ScopedGlobalRef() {
+ DeleteGlobalRef(AttachCurrentThreadIfNeeded(), obj_);
+ }
+ jobject operator*() const {
+ return obj_;
+ }
+ private:
+ jobject obj_;
+};
+
+// Return the (singleton) Java Enum object corresponding to |index|;
+// |state_class_fragment| is something like "MediaSource$State".
+jobject JavaEnumFromIndex(
+ JNIEnv* jni, const std::string& state_class_fragment, int index) {
+ std::string state_class_name = "org/webrtc/" + state_class_fragment;
+ jclass state_class = FindClass(jni, state_class_name.c_str());
+ jmethodID state_values_id = GetStaticMethodID(
+ jni, state_class, "values", ("()[L" + state_class_name + ";").c_str());
+ ScopedLocalRef<jobjectArray> state_values(
+ jni,
+ (jobjectArray)jni->CallStaticObjectMethod(state_class, state_values_id));
+ CHECK_EXCEPTION(jni, "error during CallStaticObjectMethod");
+ jobject ret = jni->GetObjectArrayElement(*state_values, index);
+ CHECK_EXCEPTION(jni, "error during GetObjectArrayElement");
+ return ret;
+}
+
+// Given a UTF-8 encoded |native| string return a new (UTF-16) jstring.
+static jstring JavaStringFromStdString(JNIEnv* jni, const std::string& native) {
+ UnicodeString ustr(UnicodeString::fromUTF8(native));
+ jstring jstr = jni->NewString(ustr.getBuffer(), ustr.length());
+ CHECK_EXCEPTION(jni, "error during NewString");
+ return jstr;
+}
+
+// Given a (UTF-16) jstring return a new UTF-8 native string.
+static std::string JavaToStdString(JNIEnv* jni, const jstring& j_string) {
+ const jchar* jchars = jni->GetStringChars(j_string, NULL);
+ CHECK_EXCEPTION(jni, "Error during GetStringChars");
+ UnicodeString ustr(jchars, jni->GetStringLength(j_string));
+ CHECK_EXCEPTION(jni, "Error during GetStringLength");
+ jni->ReleaseStringChars(j_string, jchars);
+ CHECK_EXCEPTION(jni, "Error during ReleaseStringChars");
+ std::string ret;
+ return ustr.toUTF8String(ret);
+}
+
+class ConstraintsWrapper;
+
+// Adapter between the C++ PeerConnectionObserver interface and the Java
+// PeerConnection.Observer interface. Wraps an instance of the Java interface
+// and dispatches C++ callbacks to Java.
+class PCOJava : public PeerConnectionObserver {
+ public:
+ PCOJava(JNIEnv* jni, jobject j_observer)
+ : j_observer_global_(jni, j_observer),
+ j_observer_class_((jclass)NewGlobalRef(
+ jni, GetObjectClass(jni, *j_observer_global_))),
+ j_media_stream_class_((jclass)NewGlobalRef(
+ jni, FindClass(jni, "org/webrtc/MediaStream"))),
+ j_media_stream_ctor_(GetMethodID(jni,
+ j_media_stream_class_, "<init>", "(J)V")),
+ j_audio_track_class_((jclass)NewGlobalRef(
+ jni, FindClass(jni, "org/webrtc/AudioTrack"))),
+ j_audio_track_ctor_(GetMethodID(
+ jni, j_audio_track_class_, "<init>", "(J)V")),
+ j_video_track_class_((jclass)NewGlobalRef(
+ jni, FindClass(jni, "org/webrtc/VideoTrack"))),
+ j_video_track_ctor_(GetMethodID(jni,
+ j_video_track_class_, "<init>", "(J)V")) {
+ }
+
+ virtual ~PCOJava() {}
+
+ virtual void OnIceCandidate(const IceCandidateInterface* candidate) {
+ std::string sdp;
+ CHECK(candidate->ToString(&sdp), "got so far: " << sdp);
+ jclass candidate_class = FindClass(jni(), "org/webrtc/IceCandidate");
+ jmethodID ctor = GetMethodID(jni(), candidate_class,
+ "<init>", "(Ljava/lang/String;ILjava/lang/String;)V");
+ ScopedLocalRef<jstring> j_mid(
+ jni(), JavaStringFromStdString(jni(), candidate->sdp_mid()));
+ ScopedLocalRef<jstring> j_sdp(jni(), JavaStringFromStdString(jni(), sdp));
+ ScopedLocalRef<jobject> j_candidate(jni(), jni()->NewObject(
+ candidate_class, ctor, *j_mid, candidate->sdp_mline_index(), *j_sdp));
+ CHECK_EXCEPTION(jni(), "error during NewObject");
+ jmethodID m = GetMethodID(jni(), j_observer_class_,
+ "onIceCandidate", "(Lorg/webrtc/IceCandidate;)V");
+ jni()->CallVoidMethod(*j_observer_global_, m, *j_candidate);
+ CHECK_EXCEPTION(jni(), "error during CallVoidMethod");
+ }
+
+ virtual void OnError() {
+ jmethodID m = GetMethodID(jni(), j_observer_class_, "onError", "(V)V");
+ jni()->CallVoidMethod(*j_observer_global_, m);
+ CHECK_EXCEPTION(jni(), "error during CallVoidMethod");
+ }
+
+ virtual void OnSignalingChange(
+ PeerConnectionInterface::SignalingState new_state) {
+ jmethodID m = GetMethodID(
+ jni(), j_observer_class_, "onSignalingChange",
+ "(Lorg/webrtc/PeerConnection$SignalingState;)V");
+ ScopedLocalRef<jobject> new_state_enum(jni(), JavaEnumFromIndex(
+ jni(), "PeerConnection$SignalingState", new_state));
+ jni()->CallVoidMethod(*j_observer_global_, m, *new_state_enum);
+ CHECK_EXCEPTION(jni(), "error during CallVoidMethod");
+ }
+
+ virtual void OnIceConnectionChange(
+ PeerConnectionInterface::IceConnectionState new_state) {
+ jmethodID m = GetMethodID(
+ jni(), j_observer_class_, "onIceConnectionChange",
+ "(Lorg/webrtc/PeerConnection$IceConnectionState;)V");
+ ScopedLocalRef<jobject> new_state_enum(jni(), JavaEnumFromIndex(
+ jni(), "PeerConnection$IceConnectionState", new_state));
+ jni()->CallVoidMethod(*j_observer_global_, m, *new_state_enum);
+ CHECK_EXCEPTION(jni(), "error during CallVoidMethod");
+ }
+
+ virtual void OnIceGatheringChange(
+ PeerConnectionInterface::IceGatheringState new_state) {
+ jmethodID m = GetMethodID(
+ jni(), j_observer_class_, "onIceGatheringChange",
+ "(Lorg/webrtc/PeerConnection$IceGatheringState;)V");
+ ScopedLocalRef<jobject> new_state_enum(jni(), JavaEnumFromIndex(
+ jni(), "PeerConnection$IceGatheringState", new_state));
+ jni()->CallVoidMethod(*j_observer_global_, m, *new_state_enum);
+ CHECK_EXCEPTION(jni(), "error during CallVoidMethod");
+ }
+
+ virtual void OnAddStream(MediaStreamInterface* stream) {
+ ScopedLocalRef<jobject> j_stream(jni(), jni()->NewObject(
+ j_media_stream_class_, j_media_stream_ctor_, (jlong)stream));
+ CHECK_EXCEPTION(jni(), "error during NewObject");
+
+ AudioTrackVector audio_tracks = stream->GetAudioTracks();
+ for (size_t i = 0; i < audio_tracks.size(); ++i) {
+ AudioTrackInterface* track = audio_tracks[i];
+ ScopedLocalRef<jstring> id(
+ jni(), JavaStringFromStdString(jni(), track->id()));
+ ScopedLocalRef<jobject> j_track(jni(), jni()->NewObject(
+ j_audio_track_class_, j_audio_track_ctor_, (jlong)track, *id));
+ CHECK_EXCEPTION(jni(), "error during NewObject");
+ jfieldID audio_tracks_id = GetFieldID(jni(),
+ j_media_stream_class_, "audioTracks", "Ljava/util/List;");
+ ScopedLocalRef<jobject> audio_tracks(jni(), GetObjectField(
+ jni(), *j_stream, audio_tracks_id));
+ jmethodID add = GetMethodID(jni(),
+ GetObjectClass(jni(), *audio_tracks), "add", "(Ljava/lang/Object;)Z");
+ jboolean added = jni()->CallBooleanMethod(*audio_tracks, add, *j_track);
+ CHECK_EXCEPTION(jni(), "error during CallBooleanMethod");
+ CHECK(added, "");
+ }
+
+ VideoTrackVector video_tracks = stream->GetVideoTracks();
+ for (size_t i = 0; i < video_tracks.size(); ++i) {
+ VideoTrackInterface* track = video_tracks[i];
+ ScopedLocalRef<jstring> id(
+ jni(), JavaStringFromStdString(jni(), track->id()));
+ ScopedLocalRef<jobject> j_track(jni(), jni()->NewObject(
+ j_video_track_class_, j_video_track_ctor_, (jlong)track, *id));
+ CHECK_EXCEPTION(jni(), "error during NewObject");
+ jfieldID video_tracks_id = GetFieldID(jni(),
+ j_media_stream_class_, "videoTracks", "Ljava/util/List;");
+ ScopedLocalRef<jobject> video_tracks(jni(), GetObjectField(
+ jni(), *j_stream, video_tracks_id));
+ jmethodID add = GetMethodID(jni(),
+ GetObjectClass(jni(), *video_tracks), "add", "(Ljava/lang/Object;)Z");
+ jboolean added = jni()->CallBooleanMethod(*video_tracks, add, *j_track);
+ CHECK_EXCEPTION(jni(), "error during CallBooleanMethod");
+ CHECK(added, "");
+ }
+ streams_[stream] = jni()->NewWeakGlobalRef(*j_stream);
+ CHECK_EXCEPTION(jni(), "error during NewWeakGlobalRef");
+
+ jmethodID m = GetMethodID(jni(),
+ j_observer_class_, "onAddStream", "(Lorg/webrtc/MediaStream;)V");
+ jni()->CallVoidMethod(*j_observer_global_, m, *j_stream);
+ CHECK_EXCEPTION(jni(), "error during CallVoidMethod");
+ }
+
+ virtual void OnRemoveStream(MediaStreamInterface* stream) {
+ NativeToJavaStreamsMap::iterator it = streams_.find(stream);
+ CHECK(it != streams_.end(), "unexpected stream: " << std::hex << stream);
+
+ WeakRef s(jni(), it->second);
+ streams_.erase(it);
+ if (!s.obj())
+ return;
+
+ jmethodID m = GetMethodID(jni(),
+ j_observer_class_, "onRemoveStream", "(Lorg/webrtc/MediaStream;)V");
+ jni()->CallVoidMethod(*j_observer_global_, m, s.obj());
+ CHECK_EXCEPTION(jni(), "error during CallVoidMethod");
+ }
+
+ void SetConstraints(ConstraintsWrapper* constraints) {
+ CHECK(!constraints_.get(), "constraints already set!");
+ constraints_.reset(constraints);
+ }
+
+ const ConstraintsWrapper* constraints() { return constraints_.get(); }
+
+ private:
+ JNIEnv* jni() {
+ return AttachCurrentThreadIfNeeded();
+ }
+
+ const ScopedGlobalRef j_observer_global_;
+ const jclass j_observer_class_;
+ const jclass j_media_stream_class_;
+ const jmethodID j_media_stream_ctor_;
+ const jclass j_audio_track_class_;
+ const jmethodID j_audio_track_ctor_;
+ const jclass j_video_track_class_;
+ const jmethodID j_video_track_ctor_;
+ typedef std::map<void*, jweak> NativeToJavaStreamsMap;
+ NativeToJavaStreamsMap streams_; // C++ -> Java streams.
+ talk_base::scoped_ptr<ConstraintsWrapper> constraints_;
+};
+
+// Wrapper for a Java MediaConstraints object. Copies all needed data so when
+// the constructor returns the Java object is no longer needed.
+class ConstraintsWrapper : public MediaConstraintsInterface {
+ public:
+ ConstraintsWrapper(JNIEnv* jni, jobject j_constraints) {
+ PopulateConstraintsFromJavaPairList(
+ jni, j_constraints, "mandatory", &mandatory_);
+ PopulateConstraintsFromJavaPairList(
+ jni, j_constraints, "optional", &optional_);
+ }
+
+ virtual ~ConstraintsWrapper() {}
+
+ // MediaConstraintsInterface.
+ virtual const Constraints& GetMandatory() const { return mandatory_; }
+ virtual const Constraints& GetOptional() const { return optional_; }
+
+ private:
+ // Helper for translating a List<Pair<String, String>> to a Constraints.
+ static void PopulateConstraintsFromJavaPairList(
+ JNIEnv* jni, jobject j_constraints,
+ const char* field_name, Constraints* field) {
+ jfieldID j_id = GetFieldID(jni,
+ GetObjectClass(jni, j_constraints), field_name, "Ljava/util/List;");
+ jobject j_list = GetObjectField(jni, j_constraints, j_id);
+ jmethodID j_iterator_id = GetMethodID(jni,
+ GetObjectClass(jni, j_list), "iterator", "()Ljava/util/Iterator;");
+ jobject j_iterator = jni->CallObjectMethod(j_list, j_iterator_id);
+ CHECK_EXCEPTION(jni, "error during CallObjectMethod");
+ jmethodID j_has_next = GetMethodID(jni,
+ GetObjectClass(jni, j_iterator), "hasNext", "()Z");
+ jmethodID j_next = GetMethodID(jni,
+ GetObjectClass(jni, j_iterator), "next", "()Ljava/lang/Object;");
+ while (jni->CallBooleanMethod(j_iterator, j_has_next)) {
+ CHECK_EXCEPTION(jni, "error during CallBooleanMethod");
+ jobject entry = jni->CallObjectMethod(j_iterator, j_next);
+ CHECK_EXCEPTION(jni, "error during CallObjectMethod");
+ jmethodID get_key = GetMethodID(jni,
+ GetObjectClass(jni, entry), "getKey", "()Ljava/lang/String;");
+ jstring j_key = reinterpret_cast<jstring>(
+ jni->CallObjectMethod(entry, get_key));
+ CHECK_EXCEPTION(jni, "error during CallObjectMethod");
+ jmethodID get_value = GetMethodID(jni,
+ GetObjectClass(jni, entry), "getValue", "()Ljava/lang/String;");
+ jstring j_value = reinterpret_cast<jstring>(
+ jni->CallObjectMethod(entry, get_value));
+ CHECK_EXCEPTION(jni, "error during CallObjectMethod");
+ field->push_back(Constraint(JavaToStdString(jni, j_key),
+ JavaToStdString(jni, j_value)));
+ }
+ CHECK_EXCEPTION(jni, "error during CallBooleanMethod");
+ }
+
+ Constraints mandatory_;
+ Constraints optional_;
+};
+
+static jobject JavaSdpFromNativeSdp(
+ JNIEnv* jni, const SessionDescriptionInterface* desc) {
+ std::string sdp;
+ CHECK(desc->ToString(&sdp), "got so far: " << sdp);
+ ScopedLocalRef<jstring> j_description(jni, JavaStringFromStdString(jni, sdp));
+
+ jclass j_type_class = FindClass(
+ jni, "org/webrtc/SessionDescription$Type");
+ jmethodID j_type_from_canonical = GetStaticMethodID(
+ jni, j_type_class, "fromCanonicalForm",
+ "(Ljava/lang/String;)Lorg/webrtc/SessionDescription$Type;");
+ ScopedLocalRef<jstring> j_type_string(
+ jni, JavaStringFromStdString(jni, desc->type()));
+ jobject j_type = jni->CallStaticObjectMethod(
+ j_type_class, j_type_from_canonical, *j_type_string);
+ CHECK_EXCEPTION(jni, "error during CallObjectMethod");
+
+ jclass j_sdp_class = FindClass(jni, "org/webrtc/SessionDescription");
+ jmethodID j_sdp_ctor = GetMethodID(
+ jni, j_sdp_class, "<init>",
+ "(Lorg/webrtc/SessionDescription$Type;Ljava/lang/String;)V");
+ jobject j_sdp = jni->NewObject(
+ j_sdp_class, j_sdp_ctor, j_type, *j_description);
+ CHECK_EXCEPTION(jni, "error during NewObject");
+ return j_sdp;
+}
+
+template <class T> // T is one of {Create,Set}SessionDescriptionObserver.
+class SdpObserverWrapper : public T {
+ public:
+ SdpObserverWrapper(JNIEnv* jni, jobject j_observer,
+ ConstraintsWrapper* constraints)
+ : constraints_(constraints),
+ j_observer_global_(NewGlobalRef(jni, j_observer)),
+ j_observer_class_((jclass)NewGlobalRef(
+ jni, GetObjectClass(jni, j_observer))) {
+ }
+
+ virtual ~SdpObserverWrapper() {
+ DeleteGlobalRef(jni(), j_observer_global_);
+ DeleteGlobalRef(jni(), j_observer_class_);
+ }
+
+ virtual void OnSuccess() {
+ jmethodID m = GetMethodID(jni(), j_observer_class_, "onSetSuccess", "()V");
+ jni()->CallVoidMethod(j_observer_global_, m);
+ CHECK_EXCEPTION(jni(), "error during CallVoidMethod");
+ }
+
+ virtual void OnSuccess(SessionDescriptionInterface* desc) {
+ jmethodID m = GetMethodID(
+ jni(), j_observer_class_, "onCreateSuccess",
+ "(Lorg/webrtc/SessionDescription;)V");
+ ScopedLocalRef<jobject> j_sdp(jni(), JavaSdpFromNativeSdp(jni(), desc));
+ jni()->CallVoidMethod(j_observer_global_, m, *j_sdp);
+ CHECK_EXCEPTION(jni(), "error during CallVoidMethod");
+ }
+
+ protected:
+ // Common implementation for failure of Set & Create types, distinguished by
+ // |op| being "Set" or "Create".
+ void OnFailure(const std::string& op, const std::string& error) {
+ jmethodID m = GetMethodID(jni(),
+ j_observer_class_, "on" + op + "Failure", "(Ljava/lang/String;)V");
+ ScopedLocalRef<jstring> j_error_string(
+ jni(), JavaStringFromStdString(jni(), error));
+ jni()->CallVoidMethod(j_observer_global_, m, *j_error_string);
+ CHECK_EXCEPTION(jni(), "error during CallVoidMethod");
+ }
+
+ private:
+ JNIEnv* jni() {
+ return AttachCurrentThreadIfNeeded();
+ }
+
+ talk_base::scoped_ptr<ConstraintsWrapper> constraints_;
+ const jobject j_observer_global_;
+ const jclass j_observer_class_;
+};
+
+class CreateSdpObserverWrapper
+ : public SdpObserverWrapper<CreateSessionDescriptionObserver> {
+ public:
+ CreateSdpObserverWrapper(JNIEnv* jni, jobject j_observer,
+ ConstraintsWrapper* constraints)
+ : SdpObserverWrapper(jni, j_observer, constraints) {}
+
+ virtual void OnFailure(const std::string& error) {
+ SdpObserverWrapper::OnFailure(std::string("Create"), error);
+ }
+};
+
+class SetSdpObserverWrapper
+ : public SdpObserverWrapper<SetSessionDescriptionObserver> {
+ public:
+ SetSdpObserverWrapper(JNIEnv* jni, jobject j_observer,
+ ConstraintsWrapper* constraints)
+ : SdpObserverWrapper(jni, j_observer, constraints) {}
+
+ virtual void OnFailure(const std::string& error) {
+ SdpObserverWrapper::OnFailure(std::string("Set"), error);
+ }
+};
+
+// Adapter for a Java StatsObserver presenting a C++ StatsObserver and
+// dispatching the callback from C++ back to Java.
+class StatsObserverWrapper : public StatsObserver {
+ public:
+ StatsObserverWrapper(JNIEnv* jni, jobject j_observer)
+ : j_observer_global_(NewGlobalRef(jni, j_observer)),
+ j_observer_class_((jclass)NewGlobalRef(
+ jni, GetObjectClass(jni, j_observer))),
+ j_stats_report_class_(FindClass(jni, "org/webrtc/StatsReport")),
+ j_stats_report_ctor_(GetMethodID(
+ jni, j_stats_report_class_, "<init>",
+ "(Ljava/lang/String;Ljava/lang/String;D"
+ "[Lorg/webrtc/StatsReport$Value;)V")),
+ j_value_class_(FindClass(
+ jni, "org/webrtc/StatsReport$Value")),
+ j_value_ctor_(GetMethodID(
+ jni, j_value_class_, "<init>",
+ "(Ljava/lang/String;Ljava/lang/String;)V")) {
+ }
+
+ virtual ~StatsObserverWrapper() {
+ DeleteGlobalRef(jni(), j_observer_global_);
+ DeleteGlobalRef(jni(), j_observer_class_);
+ }
+
+ virtual void OnComplete(const std::vector<StatsReport>& reports) {
+ ScopedLocalRef<jobjectArray> j_reports(jni(),
+ ReportsToJava(jni(), reports));
+ jmethodID m = GetMethodID(
+ jni(), j_observer_class_, "onComplete", "([Lorg/webrtc/StatsReport;)V");
+ jni()->CallVoidMethod(j_observer_global_, m, *j_reports);
+ CHECK_EXCEPTION(jni(), "error during CallVoidMethod");
+ }
+
+ private:
+ jobjectArray ReportsToJava(
+ JNIEnv* jni, const std::vector<StatsReport>& reports) {
+ jobjectArray reports_array = jni->NewObjectArray(
+ reports.size(), j_stats_report_class_, NULL);
+ for (int i = 0; i < reports.size(); ++i) {
+ const StatsReport& report = reports[i];
+ ScopedLocalRef<jstring> j_id(
+ jni, JavaStringFromStdString(jni, report.id));
+ ScopedLocalRef<jstring> j_type(
+ jni, JavaStringFromStdString(jni, report.type));
+ ScopedLocalRef<jobjectArray> j_values(
+ jni, ValuesToJava(jni, report.values));
+ ScopedLocalRef<jobject> j_report(jni, jni->NewObject(
+ j_stats_report_class_, j_stats_report_ctor_, *j_id, *j_type,
+ report.timestamp, *j_values));
+ jni->SetObjectArrayElement(reports_array, i, *j_report);
+ }
+ return reports_array;
+ }
+
+ jobjectArray ValuesToJava(JNIEnv* jni, const StatsReport::Values& values) {
+ jobjectArray j_values = jni->NewObjectArray(
+ values.size(), j_value_class_, NULL);
+ for (int i = 0; i < values.size(); ++i) {
+ const StatsReport::Value& value = values[i];
+ ScopedLocalRef<jstring> j_name(
+ jni, JavaStringFromStdString(jni, value.name));
+ ScopedLocalRef<jstring> j_value(
+ jni, JavaStringFromStdString(jni, value.value));
+ ScopedLocalRef<jobject> j_element_value(jni, jni->NewObject(
+ j_value_class_, j_value_ctor_, *j_name, *j_value));
+ jni->SetObjectArrayElement(j_values, i, *j_element_value);
+ }
+ return j_values;
+ }
+
+ JNIEnv* jni() {
+ return AttachCurrentThreadIfNeeded();
+ }
+
+ const jobject j_observer_global_;
+ const jclass j_observer_class_;
+ const jclass j_stats_report_class_;
+ const jmethodID j_stats_report_ctor_;
+ const jclass j_value_class_;
+ const jmethodID j_value_ctor_;
+};
+
+// Adapter presenting a cricket::VideoRenderer as a
+// webrtc::VideoRendererInterface.
+class VideoRendererWrapper : public VideoRendererInterface {
+ public:
+ static VideoRendererWrapper* Create(cricket::VideoRenderer* renderer) {
+ if (renderer)
+ return new VideoRendererWrapper(renderer);
+ return NULL;
+ }
+
+ virtual ~VideoRendererWrapper() {}
+
+ virtual void SetSize(int width, int height) {
+ const bool kNotReserved = false; // What does this param mean??
+ renderer_->SetSize(width, height, kNotReserved);
+ }
+
+ virtual void RenderFrame(const cricket::VideoFrame* frame) {
+ renderer_->RenderFrame(frame);
+ }
+
+ private:
+ explicit VideoRendererWrapper(cricket::VideoRenderer* renderer)
+ : renderer_(renderer) {}
+
+ talk_base::scoped_ptr<cricket::VideoRenderer> renderer_;
+};
+
+// Wrapper dispatching webrtc::VideoRendererInterface to a Java VideoRenderer
+// instance.
+class JavaVideoRendererWrapper : public VideoRendererInterface {
+ public:
+ JavaVideoRendererWrapper(JNIEnv* jni, jobject j_callbacks)
+ : j_callbacks_(jni, j_callbacks) {
+ j_set_size_id_ = GetMethodID(
+ jni, GetObjectClass(jni, j_callbacks), "setSize", "(II)V");
+ j_render_frame_id_ = GetMethodID(
+ jni, GetObjectClass(jni, j_callbacks), "renderFrame",
+ "(Lorg/webrtc/VideoRenderer$I420Frame;)V");
+ j_frame_class_ = FindClass(jni, "org/webrtc/VideoRenderer$I420Frame");
+ j_frame_ctor_id_ = GetMethodID(
+ jni, j_frame_class_, "<init>", "(II[I[Ljava/nio/ByteBuffer;)V");
+ j_byte_buffer_class_ = FindClass(jni, "java/nio/ByteBuffer");
+ CHECK_EXCEPTION(jni, "");
+ }
+
+ virtual void SetSize(int width, int height) {
+ jni()->CallVoidMethod(*j_callbacks_, j_set_size_id_, width, height);
+ CHECK_EXCEPTION(jni(), "");
+ }
+
+ virtual void RenderFrame(const cricket::VideoFrame* frame) {
+ ScopedLocalRef<jobject> j_frame(jni(), CricketToJavaFrame(frame));
+ jni()->CallVoidMethod(*j_callbacks_, j_render_frame_id_, *j_frame);
+ CHECK_EXCEPTION(jni(), "");
+ }
+
+ private:
+ // Return a VideoRenderer.I420Frame referring to the data in |frame|.
+ jobject CricketToJavaFrame(const cricket::VideoFrame* frame) {
+ ScopedLocalRef<jintArray> strides(jni(), jni()->NewIntArray(3));
+ jint* strides_array = jni()->GetIntArrayElements(*strides, NULL);
+ strides_array[0] = frame->GetYPitch();
+ strides_array[1] = frame->GetUPitch();
+ strides_array[2] = frame->GetVPitch();
+ jni()->ReleaseIntArrayElements(*strides, strides_array, 0);
+ ScopedLocalRef<jobjectArray> planes(
+ jni(), jni()->NewObjectArray(3, j_byte_buffer_class_, NULL));
+ ScopedLocalRef<jobject> y_buffer(jni(), jni()->NewDirectByteBuffer(
+ const_cast<uint8*>(frame->GetYPlane()),
+ frame->GetYPitch() * frame->GetHeight()));
+ ScopedLocalRef<jobject> u_buffer(jni(), jni()->NewDirectByteBuffer(
+ const_cast<uint8*>(frame->GetUPlane()), frame->GetChromaSize()));
+ ScopedLocalRef<jobject> v_buffer(jni(), jni()->NewDirectByteBuffer(
+ const_cast<uint8*>(frame->GetVPlane()), frame->GetChromaSize()));
+ jni()->SetObjectArrayElement(*planes, 0, *y_buffer);
+ jni()->SetObjectArrayElement(*planes, 1, *u_buffer);
+ jni()->SetObjectArrayElement(*planes, 2, *v_buffer);
+ return jni()->NewObject(
+ j_frame_class_, j_frame_ctor_id_,
+ frame->GetWidth(), frame->GetHeight(), *strides, *planes);
+ }
+
+ JNIEnv* jni() {
+ return AttachCurrentThreadIfNeeded();
+ }
+
+ ScopedGlobalRef j_callbacks_;
+ jmethodID j_set_size_id_;
+ jmethodID j_render_frame_id_;
+ jclass j_frame_class_;
+ jmethodID j_frame_ctor_id_;
+ jclass j_byte_buffer_class_;
+};
+
+} // anonymous namespace
+
+
+// Convenience macro defining JNI-accessible methods in the org.webrtc package.
+// Eliminates unnecessary boilerplate and line-wraps, reducing visual clutter.
+#define JOW(rettype, name) extern "C" rettype JNIEXPORT JNICALL \
+ Java_org_webrtc_##name
+
+extern "C" jint JNIEXPORT JNICALL JNI_OnLoad(JavaVM *jvm, void *reserved) {
+ CHECK(!g_jvm, "JNI_OnLoad called more than once!");
+ g_jvm = jvm;
+ CHECK(g_jvm, "JNI_OnLoad handed NULL?");
+
+ CHECK(talk_base::InitializeSSL(), "Failed to InitializeSSL()");
+
+ JNIEnv* jni;
+ if (jvm->GetEnv(reinterpret_cast<void**>(&jni), JNI_VERSION_1_6) != JNI_OK)
+ return -1;
+ g_class_reference_holder = new ClassReferenceHolder(jni);
+
+#ifdef ANDROID
+ webrtc::Trace::CreateTrace();
+ CHECK(!webrtc::Trace::SetTraceFile("/sdcard/trace.txt", false),
+ "SetTraceFile failed");
+ CHECK(!webrtc::Trace::SetLevelFilter(webrtc::kTraceAll),
+ "SetLevelFilter failed");
+#endif // ANDROID
+
+ // Uncomment to get sensitive logs emitted (to stderr or logcat).
+ // talk_base::LogMessage::LogToDebug(talk_base::LS_SENSITIVE);
+
+ return JNI_VERSION_1_6;
+}
+
+extern "C" void JNIEXPORT JNICALL JNI_OnUnLoad(JavaVM *jvm, void *reserved) {
+ webrtc::Trace::ReturnTrace();
+ delete g_class_reference_holder;
+ g_class_reference_holder = NULL;
+ CHECK(talk_base::CleanupSSL(), "Failed to CleanupSSL()");
+}
+
+JOW(void, PeerConnection_freePeerConnection)(JNIEnv*, jclass, jlong j_p) {
+ reinterpret_cast<PeerConnectionInterface*>(j_p)->Release();
+}
+
+JOW(void, PeerConnection_freeObserver)(JNIEnv*, jclass, jlong j_p) {
+ PCOJava* p = reinterpret_cast<PCOJava*>(j_p);
+ delete p;
+}
+
+JOW(void, MediaSource_free)(JNIEnv*, jclass, jlong j_p) {
+ reinterpret_cast<MediaSourceInterface*>(j_p)->Release();
+}
+
+JOW(void, VideoCapturer_free)(JNIEnv*, jclass, jlong j_p) {
+ delete reinterpret_cast<cricket::VideoCapturer*>(j_p);
+}
+
+JOW(void, VideoRenderer_free)(JNIEnv*, jclass, jlong j_p) {
+ delete reinterpret_cast<VideoRendererWrapper*>(j_p);
+}
+
+JOW(void, MediaStreamTrack_free)(JNIEnv*, jclass, jlong j_p) {
+ reinterpret_cast<MediaStreamTrackInterface*>(j_p)->Release();
+}
+
+JOW(jboolean, MediaStream_nativeAddAudioTrack)(
+ JNIEnv* jni, jclass, jlong pointer, jlong j_audio_track_pointer) {
+ talk_base::scoped_refptr<MediaStreamInterface> stream(
+ reinterpret_cast<MediaStreamInterface*>(pointer));
+ talk_base::scoped_refptr<AudioTrackInterface> track(
+ reinterpret_cast<AudioTrackInterface*>(j_audio_track_pointer));
+ return stream->AddTrack(track);
+}
+
+JOW(jboolean, MediaStream_nativeAddVideoTrack)(
+ JNIEnv* jni, jclass, jlong pointer, jlong j_video_track_pointer) {
+ talk_base::scoped_refptr<MediaStreamInterface> stream(
+ reinterpret_cast<MediaStreamInterface*>(pointer));
+ talk_base::scoped_refptr<VideoTrackInterface> track(
+ reinterpret_cast<VideoTrackInterface*>(j_video_track_pointer));
+ return stream->AddTrack(track);
+}
+
+JOW(jboolean, MediaStream_nativeRemoveAudioTrack)(
+ JNIEnv* jni, jclass, jlong pointer, jlong j_audio_track_pointer) {
+ talk_base::scoped_refptr<MediaStreamInterface> stream(
+ reinterpret_cast<MediaStreamInterface*>(pointer));
+ talk_base::scoped_refptr<AudioTrackInterface> track(
+ reinterpret_cast<AudioTrackInterface*>(j_audio_track_pointer));
+ return stream->RemoveTrack(track);
+}
+
+JOW(jboolean, MediaStream_nativeRemoveVideoTrack)(
+ JNIEnv* jni, jclass, jlong pointer, jlong j_video_track_pointer) {
+ talk_base::scoped_refptr<MediaStreamInterface> stream(
+ reinterpret_cast<MediaStreamInterface*>(pointer));
+ talk_base::scoped_refptr<VideoTrackInterface> track(
+ reinterpret_cast<VideoTrackInterface*>(j_video_track_pointer));
+ return stream->RemoveTrack(track);
+}
+
+JOW(jstring, MediaStream_nativeLabel)(JNIEnv* jni, jclass, jlong j_p) {
+ return JavaStringFromStdString(
+ jni, reinterpret_cast<MediaStreamInterface*>(j_p)->label());
+}
+
+JOW(void, MediaStream_free)(JNIEnv*, jclass, jlong j_p) {
+ reinterpret_cast<MediaStreamInterface*>(j_p)->Release();
+}
+
+JOW(jlong, PeerConnectionFactory_nativeCreateObserver)(
+ JNIEnv * jni, jclass, jobject j_observer) {
+ return (jlong)new PCOJava(jni, j_observer);
+}
+
+#ifdef ANDROID
+JOW(jboolean, PeerConnectionFactory_initializeAndroidGlobals)(
+ JNIEnv* jni, jclass, jobject context) {
+ CHECK(g_jvm, "JNI_OnLoad failed to run?");
+ bool failure = false;
+ failure |= webrtc::VideoEngine::SetAndroidObjects(g_jvm, context);
+ failure |= webrtc::VoiceEngine::SetAndroidObjects(g_jvm, jni, context);
+ return !failure;
+}
+#endif // ANDROID
+
+JOW(jlong, PeerConnectionFactory_nativeCreatePeerConnectionFactory)(
+ JNIEnv* jni, jclass) {
+ talk_base::scoped_refptr<PeerConnectionFactoryInterface> factory(
+ webrtc::CreatePeerConnectionFactory());
+ return (jlong)factory.release();
+}
+
+JOW(void, PeerConnectionFactory_freeFactory)(JNIEnv*, jclass, jlong j_p) {
+ reinterpret_cast<PeerConnectionFactoryInterface*>(j_p)->Release();
+}
+
+JOW(jlong, PeerConnectionFactory_nativeCreateLocalMediaStream)(
+ JNIEnv* jni, jclass, jlong native_factory, jstring label) {
+ talk_base::scoped_refptr<PeerConnectionFactoryInterface> factory(
+ reinterpret_cast<PeerConnectionFactoryInterface*>(native_factory));
+ talk_base::scoped_refptr<MediaStreamInterface> stream(
+ factory->CreateLocalMediaStream(JavaToStdString(jni, label)));
+ return (jlong)stream.release();
+}
+
+JOW(jlong, PeerConnectionFactory_nativeCreateVideoSource)(
+ JNIEnv* jni, jclass, jlong native_factory, jlong native_capturer,
+ jobject j_constraints) {
+ talk_base::scoped_ptr<ConstraintsWrapper> constraints(
+ new ConstraintsWrapper(jni, j_constraints));
+ talk_base::scoped_refptr<PeerConnectionFactoryInterface> factory(
+ reinterpret_cast<PeerConnectionFactoryInterface*>(native_factory));
+ talk_base::scoped_refptr<VideoSourceInterface> source(
+ factory->CreateVideoSource(
+ reinterpret_cast<cricket::VideoCapturer*>(native_capturer),
+ constraints.get()));
+ return (jlong)source.release();
+}
+
+JOW(jlong, PeerConnectionFactory_nativeCreateVideoTrack)(
+ JNIEnv* jni, jclass, jlong native_factory, jstring id,
+ jlong native_source) {
+ talk_base::scoped_refptr<PeerConnectionFactoryInterface> factory(
+ reinterpret_cast<PeerConnectionFactoryInterface*>(native_factory));
+ talk_base::scoped_refptr<VideoTrackInterface> track(
+ factory->CreateVideoTrack(
+ JavaToStdString(jni, id),
+ reinterpret_cast<VideoSourceInterface*>(native_source)));
+ return (jlong)track.release();
+}
+
+JOW(jlong, PeerConnectionFactory_nativeCreateAudioTrack)(
+ JNIEnv* jni, jclass, jlong native_factory, jstring id) {
+ talk_base::scoped_refptr<PeerConnectionFactoryInterface> factory(
+ reinterpret_cast<PeerConnectionFactoryInterface*>(native_factory));
+ talk_base::scoped_refptr<AudioTrackInterface> track(
+ factory->CreateAudioTrack(JavaToStdString(jni, id), NULL));
+ return (jlong)track.release();
+}
+
+static void JavaIceServersToJsepIceServers(
+ JNIEnv* jni, jobject j_ice_servers,
+ PeerConnectionInterface::IceServers* ice_servers) {
+ jclass list_class = GetObjectClass(jni, j_ice_servers);
+ jmethodID iterator_id = GetMethodID(
+ jni, list_class, "iterator", "()Ljava/util/Iterator;");
+ jobject iterator = jni->CallObjectMethod(j_ice_servers, iterator_id);
+ CHECK_EXCEPTION(jni, "error during CallObjectMethod");
+ jmethodID iterator_has_next = GetMethodID(
+ jni, GetObjectClass(jni, iterator), "hasNext", "()Z");
+ jmethodID iterator_next = GetMethodID(
+ jni, GetObjectClass(jni, iterator), "next", "()Ljava/lang/Object;");
+ while (jni->CallBooleanMethod(iterator, iterator_has_next)) {
+ CHECK_EXCEPTION(jni, "error during CallBooleanMethod");
+ jobject j_ice_server = jni->CallObjectMethod(iterator, iterator_next);
+ CHECK_EXCEPTION(jni, "error during CallObjectMethod");
+ jclass j_ice_server_class = GetObjectClass(jni, j_ice_server);
+ jfieldID j_ice_server_uri_id =
+ GetFieldID(jni, j_ice_server_class, "uri", "Ljava/lang/String;");
+ jfieldID j_ice_server_username_id =
+ GetFieldID(jni, j_ice_server_class, "username", "Ljava/lang/String;");
+ jfieldID j_ice_server_password_id =
+ GetFieldID(jni, j_ice_server_class, "password", "Ljava/lang/String;");
+ jstring uri = reinterpret_cast<jstring>(
+ GetObjectField(jni, j_ice_server, j_ice_server_uri_id));
+ jstring username = reinterpret_cast<jstring>(
+ GetObjectField(jni, j_ice_server, j_ice_server_username_id));
+ jstring password = reinterpret_cast<jstring>(
+ GetObjectField(jni, j_ice_server, j_ice_server_password_id));
+ PeerConnectionInterface::IceServer server;
+ server.uri = JavaToStdString(jni, uri);
+ server.username = JavaToStdString(jni, username);
+ server.password = JavaToStdString(jni, password);
+ ice_servers->push_back(server);
+ }
+ CHECK_EXCEPTION(jni, "error during CallBooleanMethod");
+}
+
+JOW(jlong, PeerConnectionFactory_nativeCreatePeerConnection)(
+ JNIEnv *jni, jclass, jlong factory, jobject j_ice_servers,
+ jobject j_constraints, jlong observer_p) {
+ talk_base::scoped_refptr<PeerConnectionFactoryInterface> f(
+ reinterpret_cast<PeerConnectionFactoryInterface*>(factory));
+ PeerConnectionInterface::IceServers servers;
+ JavaIceServersToJsepIceServers(jni, j_ice_servers, &servers);
+ PCOJava* observer = reinterpret_cast<PCOJava*>(observer_p);
+ observer->SetConstraints(new ConstraintsWrapper(jni, j_constraints));
+ talk_base::scoped_refptr<PeerConnectionInterface> pc(f->CreatePeerConnection(
+ servers, observer->constraints(), NULL, observer));
+ return (jlong)pc.release();
+}
+
+static talk_base::scoped_refptr<PeerConnectionInterface> ExtractNativePC(
+ JNIEnv* jni, jobject j_pc) {
+ jfieldID native_pc_id = GetFieldID(jni,
+ GetObjectClass(jni, j_pc), "nativePeerConnection", "J");
+ jlong j_p = GetLongField(jni, j_pc, native_pc_id);
+ return talk_base::scoped_refptr<PeerConnectionInterface>(
+ reinterpret_cast<PeerConnectionInterface*>(j_p));
+}
+
+JOW(jobject, PeerConnection_getLocalDescription)(JNIEnv* jni, jobject j_pc) {
+ const SessionDescriptionInterface* sdp =
+ ExtractNativePC(jni, j_pc)->local_description();
+ return sdp ? JavaSdpFromNativeSdp(jni, sdp) : NULL;
+}
+
+JOW(jobject, PeerConnection_getRemoteDescription)(JNIEnv* jni, jobject j_pc) {
+ const SessionDescriptionInterface* sdp =
+ ExtractNativePC(jni, j_pc)->remote_description();
+ return sdp ? JavaSdpFromNativeSdp(jni, sdp) : NULL;
+}
+
+JOW(void, PeerConnection_createOffer)(
+ JNIEnv* jni, jobject j_pc, jobject j_observer, jobject j_constraints) {
+ ConstraintsWrapper* constraints =
+ new ConstraintsWrapper(jni, j_constraints);
+ talk_base::scoped_refptr<CreateSdpObserverWrapper> observer(
+ new talk_base::RefCountedObject<CreateSdpObserverWrapper>(
+ jni, j_observer, constraints));
+ ExtractNativePC(jni, j_pc)->CreateOffer(observer, constraints);
+}
+
+JOW(void, PeerConnection_createAnswer)(
+ JNIEnv* jni, jobject j_pc, jobject j_observer, jobject j_constraints) {
+ ConstraintsWrapper* constraints =
+ new ConstraintsWrapper(jni, j_constraints);
+ talk_base::scoped_refptr<CreateSdpObserverWrapper> observer(
+ new talk_base::RefCountedObject<CreateSdpObserverWrapper>(
+ jni, j_observer, constraints));
+ ExtractNativePC(jni, j_pc)->CreateAnswer(observer, constraints);
+}
+
+// Helper to create a SessionDescriptionInterface from a SessionDescription.
+static SessionDescriptionInterface* JavaSdpToNativeSdp(
+ JNIEnv* jni, jobject j_sdp) {
+ jfieldID j_type_id = GetFieldID(
+ jni, GetObjectClass(jni, j_sdp), "type",
+ "Lorg/webrtc/SessionDescription$Type;");
+ jobject j_type = GetObjectField(jni, j_sdp, j_type_id);
+ jmethodID j_canonical_form_id = GetMethodID(
+ jni, GetObjectClass(jni, j_type), "canonicalForm",
+ "()Ljava/lang/String;");
+ jstring j_type_string = (jstring)jni->CallObjectMethod(
+ j_type, j_canonical_form_id);
+ CHECK_EXCEPTION(jni, "error during CallObjectMethod");
+ std::string std_type = JavaToStdString(jni, j_type_string);
+
+ jfieldID j_description_id = GetFieldID(
+ jni, GetObjectClass(jni, j_sdp), "description", "Ljava/lang/String;");
+ jstring j_description = (jstring)GetObjectField(jni, j_sdp, j_description_id);
+ std::string std_description = JavaToStdString(jni, j_description);
+
+ return webrtc::CreateSessionDescription(
+ std_type, std_description, NULL);
+}
+
+JOW(void, PeerConnection_setLocalDescription)(
+ JNIEnv* jni, jobject j_pc,
+ jobject j_observer, jobject j_sdp) {
+ talk_base::scoped_refptr<SetSdpObserverWrapper> observer(
+ new talk_base::RefCountedObject<SetSdpObserverWrapper>(
+ jni, j_observer, reinterpret_cast<ConstraintsWrapper*>(NULL)));
+ ExtractNativePC(jni, j_pc)->SetLocalDescription(
+ observer, JavaSdpToNativeSdp(jni, j_sdp));
+}
+
+JOW(void, PeerConnection_setRemoteDescription)(
+ JNIEnv* jni, jobject j_pc,
+ jobject j_observer, jobject j_sdp) {
+ talk_base::scoped_refptr<SetSdpObserverWrapper> observer(
+ new talk_base::RefCountedObject<SetSdpObserverWrapper>(
+ jni, j_observer, reinterpret_cast<ConstraintsWrapper*>(NULL)));
+ ExtractNativePC(jni, j_pc)->SetRemoteDescription(
+ observer, JavaSdpToNativeSdp(jni, j_sdp));
+}
+
+JOW(jboolean, PeerConnection_updateIce)(
+ JNIEnv* jni, jobject j_pc, jobject j_ice_servers, jobject j_constraints) {
+ PeerConnectionInterface::IceServers ice_servers;
+ JavaIceServersToJsepIceServers(jni, j_ice_servers, &ice_servers);
+ talk_base::scoped_ptr<ConstraintsWrapper> constraints(
+ new ConstraintsWrapper(jni, j_constraints));
+ return ExtractNativePC(jni, j_pc)->UpdateIce(ice_servers, constraints.get());
+}
+
+JOW(jboolean, PeerConnection_nativeAddIceCandidate)(
+ JNIEnv* jni, jobject j_pc, jstring j_sdp_mid,
+ jint j_sdp_mline_index, jstring j_candidate_sdp) {
+ std::string sdp_mid = JavaToStdString(jni, j_sdp_mid);
+ std::string sdp = JavaToStdString(jni, j_candidate_sdp);
+ talk_base::scoped_ptr<IceCandidateInterface> candidate(
+ webrtc::CreateIceCandidate(sdp_mid, j_sdp_mline_index, sdp, NULL));
+ return ExtractNativePC(jni, j_pc)->AddIceCandidate(candidate.get());
+}
+
+JOW(jboolean, PeerConnection_nativeAddLocalStream)(
+ JNIEnv* jni, jobject j_pc, jlong native_stream, jobject j_constraints) {
+ talk_base::scoped_ptr<ConstraintsWrapper> constraints(
+ new ConstraintsWrapper(jni, j_constraints));
+ return ExtractNativePC(jni, j_pc)->AddStream(
+ reinterpret_cast<MediaStreamInterface*>(native_stream),
+ constraints.get());
+}
+
+JOW(void, PeerConnection_nativeRemoveLocalStream)(
+ JNIEnv* jni, jobject j_pc, jlong native_stream) {
+ ExtractNativePC(jni, j_pc)->RemoveStream(
+ reinterpret_cast<MediaStreamInterface*>(native_stream));
+}
+
+JOW(bool, PeerConnection_nativeGetStats)(
+ JNIEnv* jni, jobject j_pc, jobject j_observer, jlong native_track) {
+ talk_base::scoped_refptr<StatsObserverWrapper> observer(
+ new talk_base::RefCountedObject<StatsObserverWrapper>(jni, j_observer));
+ return ExtractNativePC(jni, j_pc)->GetStats(
+ observer, reinterpret_cast<MediaStreamTrackInterface*>(native_track));
+}
+
+JOW(jobject, PeerConnection_signalingState)(JNIEnv* jni, jobject j_pc) {
+ PeerConnectionInterface::SignalingState state =
+ ExtractNativePC(jni, j_pc)->signaling_state();
+ return JavaEnumFromIndex(jni, "PeerConnection$SignalingState", state);
+}
+
+JOW(jobject, PeerConnection_iceConnectionState)(JNIEnv* jni, jobject j_pc) {
+ PeerConnectionInterface::IceConnectionState state =
+ ExtractNativePC(jni, j_pc)->ice_connection_state();
+ return JavaEnumFromIndex(jni, "PeerConnection$IceConnectionState", state);
+}
+
+JOW(jobject, PeerGathering_iceGatheringState)(JNIEnv* jni, jobject j_pc) {
+ PeerConnectionInterface::IceGatheringState state =
+ ExtractNativePC(jni, j_pc)->ice_gathering_state();
+ return JavaEnumFromIndex(jni, "PeerGathering$IceGatheringState", state);
+}
+
+JOW(void, PeerConnection_close)(JNIEnv* jni, jobject j_pc) {
+ ExtractNativePC(jni, j_pc)->Close();
+ return;
+}
+
+JOW(jobject, MediaSource_nativeState)(JNIEnv* jni, jclass, jlong j_p) {
+ talk_base::scoped_refptr<MediaSourceInterface> p(
+ reinterpret_cast<MediaSourceInterface*>(j_p));
+ return JavaEnumFromIndex(jni, "MediaSource$State", p->state());
+}
+
+JOW(jlong, VideoCapturer_nativeCreateVideoCapturer)(
+ JNIEnv* jni, jclass, jstring j_device_name) {
+ std::string device_name = JavaToStdString(jni, j_device_name);
+ talk_base::scoped_ptr<cricket::DeviceManagerInterface> device_manager(
+ cricket::DeviceManagerFactory::Create());
+ CHECK(device_manager->Init(), "DeviceManager::Init() failed");
+ cricket::Device device;
+ if (!device_manager->GetVideoCaptureDevice(device_name, &device)) {
+ LOG(LS_ERROR) << "GetVideoCaptureDevice failed";
+ return 0;
+ }
+ talk_base::scoped_ptr<cricket::VideoCapturer> capturer(
+ device_manager->CreateVideoCapturer(device));
+ return (jlong)capturer.release();
+}
+
+JOW(jlong, VideoRenderer_nativeCreateGuiVideoRenderer)(
+ JNIEnv* jni, jclass, int x, int y) {
+ talk_base::scoped_ptr<VideoRendererWrapper> renderer(
+ VideoRendererWrapper::Create(
+ cricket::VideoRendererFactory::CreateGuiVideoRenderer(x, y)));
+ return (jlong)renderer.release();
+}
+
+JOW(jlong, VideoRenderer_nativeWrapVideoRenderer)(
+ JNIEnv* jni, jclass, jobject j_callbacks) {
+ talk_base::scoped_ptr<JavaVideoRendererWrapper> renderer(
+ new JavaVideoRendererWrapper(jni, j_callbacks));
+ return (jlong)renderer.release();
+}
+
+JOW(jstring, MediaStreamTrack_nativeId)(JNIEnv* jni, jclass, jlong j_p) {
+ talk_base::scoped_refptr<MediaStreamTrackInterface> p(
+ reinterpret_cast<MediaStreamTrackInterface*>(j_p));
+ return JavaStringFromStdString(jni, p->id());
+}
+
+JOW(jstring, MediaStreamTrack_nativeKind)(JNIEnv* jni, jclass, jlong j_p) {
+ talk_base::scoped_refptr<MediaStreamTrackInterface> p(
+ reinterpret_cast<MediaStreamTrackInterface*>(j_p));
+ return JavaStringFromStdString(jni, p->kind());
+}
+
+JOW(jboolean, MediaStreamTrack_nativeEnabled)(JNIEnv* jni, jclass, jlong j_p) {
+ talk_base::scoped_refptr<MediaStreamTrackInterface> p(
+ reinterpret_cast<MediaStreamTrackInterface*>(j_p));
+ return p->enabled();
+}
+
+JOW(jobject, MediaStreamTrack_nativeState)(JNIEnv* jni, jclass, jlong j_p) {
+ talk_base::scoped_refptr<MediaStreamTrackInterface> p(
+ reinterpret_cast<MediaStreamTrackInterface*>(j_p));
+ return JavaEnumFromIndex(jni, "MediaStreamTrack$State", p->state());
+}
+
+JOW(jboolean, MediaStreamTrack_nativeSetState)(
+ JNIEnv* jni, jclass, jlong j_p, jint j_new_state) {
+ talk_base::scoped_refptr<MediaStreamTrackInterface> p(
+ reinterpret_cast<MediaStreamTrackInterface*>(j_p));
+ MediaStreamTrackInterface::TrackState new_state =
+ (MediaStreamTrackInterface::TrackState)j_new_state;
+ return p->set_state(new_state);
+}
+
+JOW(jboolean, MediaStreamTrack_nativeSetEnabled)(
+ JNIEnv* jni, jclass, jlong j_p, jboolean enabled) {
+ talk_base::scoped_refptr<MediaStreamTrackInterface> p(
+ reinterpret_cast<MediaStreamTrackInterface*>(j_p));
+ return p->set_enabled(enabled);
+}
+
+JOW(void, VideoTrack_nativeAddRenderer)(
+ JNIEnv* jni, jclass,
+ jlong j_video_track_pointer, jlong j_renderer_pointer) {
+ talk_base::scoped_refptr<VideoTrackInterface> track(
+ reinterpret_cast<VideoTrackInterface*>(j_video_track_pointer));
+ track->AddRenderer(
+ reinterpret_cast<VideoRendererInterface*>(j_renderer_pointer));
+}
+
+JOW(void, VideoTrack_nativeRemoveRenderer)(
+ JNIEnv* jni, jclass,
+ jlong j_video_track_pointer, jlong j_renderer_pointer) {
+ talk_base::scoped_refptr<VideoTrackInterface> track(
+ reinterpret_cast<VideoTrackInterface*>(j_video_track_pointer));
+ track->RemoveRenderer(
+ reinterpret_cast<VideoRendererInterface*>(j_renderer_pointer));
+}
diff --git a/talk/app/webrtc/java/src/org/webrtc/AudioSource.java b/talk/app/webrtc/java/src/org/webrtc/AudioSource.java
new file mode 100644
index 0000000..8b7a8f7
--- /dev/null
+++ b/talk/app/webrtc/java/src/org/webrtc/AudioSource.java
@@ -0,0 +1,38 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.webrtc;
+
+/**
+ * Java wrapper for a C++ AudioSourceInterface. Used as the source for one or
+ * more {@code AudioTrack} objects.
+ */
+public class AudioSource extends MediaSource {
+ public AudioSource(long nativeSource) {
+ super(nativeSource);
+ }
+}
diff --git a/talk/app/webrtc/java/src/org/webrtc/AudioTrack.java b/talk/app/webrtc/java/src/org/webrtc/AudioTrack.java
new file mode 100644
index 0000000..35d7c41
--- /dev/null
+++ b/talk/app/webrtc/java/src/org/webrtc/AudioTrack.java
@@ -0,0 +1,35 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.webrtc;
+
+/** Java wrapper for a C++ AudioTrackInterface */
+public class AudioTrack extends MediaStreamTrack {
+ public AudioTrack(long nativeTrack) {
+ super(nativeTrack);
+ }
+}
diff --git a/talk/app/webrtc/java/src/org/webrtc/IceCandidate.java b/talk/app/webrtc/java/src/org/webrtc/IceCandidate.java
new file mode 100644
index 0000000..b5d2dc9
--- /dev/null
+++ b/talk/app/webrtc/java/src/org/webrtc/IceCandidate.java
@@ -0,0 +1,48 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.webrtc;
+
+/**
+ * Representation of a single ICE Candidate, mirroring
+ * {@code IceCandidateInterface} in the C++ API.
+ */
+public class IceCandidate {
+ public final String sdpMid;
+ public final int sdpMLineIndex;
+ public final String sdp;
+
+ public IceCandidate(String sdpMid, int sdpMLineIndex, String sdp) {
+ this.sdpMid = sdpMid;
+ this.sdpMLineIndex = sdpMLineIndex;
+ this.sdp = sdp;
+ }
+
+ public String toString() {
+ return sdpMid + ":" + sdpMLineIndex + ":" + sdp;
+ }
+}
diff --git a/talk/app/webrtc/java/src/org/webrtc/MediaConstraints.java b/talk/app/webrtc/java/src/org/webrtc/MediaConstraints.java
new file mode 100644
index 0000000..ef30301
--- /dev/null
+++ b/talk/app/webrtc/java/src/org/webrtc/MediaConstraints.java
@@ -0,0 +1,85 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.webrtc;
+
+import java.util.LinkedList;
+import java.util.List;
+
+/**
+ * Description of media constraints for {@code MediaStream} and
+ * {@code PeerConnection}.
+ */
+public class MediaConstraints {
+ /** Simple String key/value pair. */
+ public static class KeyValuePair {
+ private final String key;
+ private final String value;
+
+ public KeyValuePair(String key, String value) {
+ this.key = key;
+ this.value = value;
+ }
+
+ public String getKey() {
+ return key;
+ }
+
+ public String getValue() {
+ return value;
+ }
+
+ public String toString() {
+ return key + ": " + value;
+ }
+ }
+
+
+ public final List<KeyValuePair> mandatory;
+ public final List<KeyValuePair> optional;
+
+ public MediaConstraints() {
+ mandatory = new LinkedList<KeyValuePair>();
+ optional = new LinkedList<KeyValuePair>();
+ }
+
+ private static String stringifyKeyValuePairList(List<KeyValuePair> list) {
+ StringBuilder builder = new StringBuilder("[");
+ for (KeyValuePair pair : list) {
+ if (builder.length() > 1) {
+ builder.append(", ");
+ }
+ builder.append(pair.toString());
+ }
+ return builder.append("]").toString();
+ }
+
+ public String toString() {
+ return "mandatory: " + stringifyKeyValuePairList(mandatory) +
+ ", optional: " + stringifyKeyValuePairList(optional);
+ }
+}
diff --git a/talk/app/webrtc/java/src/org/webrtc/MediaSource.java b/talk/app/webrtc/java/src/org/webrtc/MediaSource.java
new file mode 100644
index 0000000..2949049
--- /dev/null
+++ b/talk/app/webrtc/java/src/org/webrtc/MediaSource.java
@@ -0,0 +1,55 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+package org.webrtc;
+
+/** Java wrapper for a C++ MediaSourceInterface. */
+public class MediaSource {
+ /** Tracks MediaSourceInterface.SourceState */
+ public enum State {
+ INITIALIZING, LIVE, ENDED, MUTED
+ }
+
+ final long nativeSource; // Package-protected for PeerConnectionFactory.
+
+ public MediaSource(long nativeSource) {
+ this.nativeSource = nativeSource;
+ }
+
+ public State state() {
+ return nativeState(nativeSource);
+ }
+
+ void dispose() {
+ free(nativeSource);
+ }
+
+ private static native State nativeState(long pointer);
+
+ private static native void free(long nativeSource);
+}
diff --git a/talk/app/webrtc/java/src/org/webrtc/MediaStream.java b/talk/app/webrtc/java/src/org/webrtc/MediaStream.java
new file mode 100644
index 0000000..431c561
--- /dev/null
+++ b/talk/app/webrtc/java/src/org/webrtc/MediaStream.java
@@ -0,0 +1,114 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.webrtc;
+
+import java.util.LinkedList;
+import java.util.List;
+
+/** Java wrapper for a C++ MediaStreamInterface. */
+public class MediaStream {
+ public final List<AudioTrack> audioTracks;
+ public final List<VideoTrack> videoTracks;
+ // Package-protected for LocalMediaStream and PeerConnection.
+ final long nativeStream;
+
+ public MediaStream(long nativeStream) {
+ audioTracks = new LinkedList<AudioTrack>();
+ videoTracks = new LinkedList<VideoTrack>();
+ this.nativeStream = nativeStream;
+ }
+
+ public boolean addTrack(AudioTrack track) {
+ if (nativeAddAudioTrack(nativeStream, track.nativeTrack)) {
+ audioTracks.add(track);
+ return true;
+ }
+ return false;
+ }
+
+ public boolean addTrack(VideoTrack track) {
+ if (nativeAddVideoTrack(nativeStream, track.nativeTrack)) {
+ videoTracks.add(track);
+ return true;
+ }
+ return false;
+ }
+
+ public boolean removeTrack(AudioTrack track) {
+ if (nativeRemoveAudioTrack(nativeStream, track.nativeTrack)) {
+ audioTracks.remove(track);
+ return true;
+ }
+ return false;
+ }
+
+ public boolean removeTrack(VideoTrack track) {
+ if (nativeRemoveVideoTrack(nativeStream, track.nativeTrack)) {
+ videoTracks.remove(track);
+ return true;
+ }
+ return false;
+ }
+
+ public void dispose() {
+ for (AudioTrack track : audioTracks) {
+ track.dispose();
+ }
+ audioTracks.clear();
+ for (VideoTrack track : videoTracks) {
+ track.dispose();
+ }
+ videoTracks.clear();
+ free(nativeStream);
+ }
+
+ public String label() {
+ return nativeLabel(nativeStream);
+ }
+
+ public String toString() {
+ return "[" + label() + ":A=" + audioTracks.size() +
+ ":V=" + videoTracks.size() + "]";
+ }
+
+ private static native boolean nativeAddAudioTrack(
+ long nativeStream, long nativeAudioTrack);
+
+ private static native boolean nativeAddVideoTrack(
+ long nativeStream, long nativeVideoTrack);
+
+ private static native boolean nativeRemoveAudioTrack(
+ long nativeStream, long nativeAudioTrack);
+
+ private static native boolean nativeRemoveVideoTrack(
+ long nativeStream, long nativeVideoTrack);
+
+ private static native String nativeLabel(long nativeStream);
+
+ private static native void free(long nativeStream);
+}
diff --git a/talk/app/webrtc/java/src/org/webrtc/MediaStreamTrack.java b/talk/app/webrtc/java/src/org/webrtc/MediaStreamTrack.java
new file mode 100644
index 0000000..5cd2f4c
--- /dev/null
+++ b/talk/app/webrtc/java/src/org/webrtc/MediaStreamTrack.java
@@ -0,0 +1,86 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.webrtc;
+
+/** Java wrapper for a C++ MediaStreamTrackInterface. */
+public class MediaStreamTrack {
+ /** Tracks MediaStreamTrackInterface.TrackState */
+ public enum State {
+ INITIALIZING, LIVE, ENDED, FAILED
+ }
+
+ final long nativeTrack;
+
+ public MediaStreamTrack(long nativeTrack) {
+ this.nativeTrack = nativeTrack;
+ }
+
+ public String id() {
+ return nativeId(nativeTrack);
+ }
+
+ public String kind() {
+ return nativeKind(nativeTrack);
+ }
+
+ public boolean enabled() {
+ return nativeEnabled(nativeTrack);
+ }
+
+ public boolean setEnabled(boolean enable) {
+ return nativeSetEnabled(nativeTrack, enable);
+ }
+
+ public State state() {
+ return nativeState(nativeTrack);
+ }
+
+ public boolean setState(State newState) {
+ return nativeSetState(nativeTrack, newState.ordinal());
+ }
+
+ public void dispose() {
+ free(nativeTrack);
+ }
+
+ private static native String nativeId(long nativeTrack);
+
+ private static native String nativeKind(long nativeTrack);
+
+ private static native boolean nativeEnabled(long nativeTrack);
+
+ private static native boolean nativeSetEnabled(
+ long nativeTrack, boolean enabled);
+
+ private static native State nativeState(long nativeTrack);
+
+ private static native boolean nativeSetState(
+ long nativeTrack, int newState);
+
+ private static native void free(long nativeTrack);
+}
diff --git a/talk/app/webrtc/java/src/org/webrtc/PeerConnection.java b/talk/app/webrtc/java/src/org/webrtc/PeerConnection.java
new file mode 100644
index 0000000..5d08c04
--- /dev/null
+++ b/talk/app/webrtc/java/src/org/webrtc/PeerConnection.java
@@ -0,0 +1,194 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+package org.webrtc;
+
+import java.util.LinkedList;
+import java.util.List;
+
+/**
+ * Java-land version of the PeerConnection APIs; wraps the C++ API
+ * http://www.webrtc.org/reference/native-apis, which in turn is inspired by the
+ * JS APIs: http://dev.w3.org/2011/webrtc/editor/webrtc.html and
+ * http://www.w3.org/TR/mediacapture-streams/
+ */
+public class PeerConnection {
+ static {
+ System.loadLibrary("jingle_peerconnection_so");
+ }
+
+ /** Tracks PeerConnectionInterface::IceGatheringState */
+ public enum IceGatheringState { NEW, GATHERING, COMPLETE };
+
+
+ /** Tracks PeerConnectionInterface::IceConnectionState */
+ public enum IceConnectionState {
+ NEW, CHECKING, CONNECTED, COMPLETED, FAILED, DISCONNECTED, CLOSED
+ };
+
+ /** Tracks PeerConnectionInterface::SignalingState */
+ public enum SignalingState {
+ STABLE, HAVE_LOCAL_OFFER, HAVE_LOCAL_PRANSWER, HAVE_REMOTE_OFFER,
+ HAVE_REMOTE_PRANSWER, CLOSED
+ };
+
+ /** Java version of PeerConnectionObserver. */
+ public static interface Observer {
+ /** Triggered when the SignalingState changes. */
+ public void onSignalingChange(SignalingState newState);
+
+ /** Triggered when the IceConnectionState changes. */
+ public void onIceConnectionChange(IceConnectionState newState);
+
+ /** Triggered when the IceGatheringState changes. */
+ public void onIceGatheringChange(IceGatheringState newState);
+
+ /** Triggered when a new ICE candidate has been found. */
+ public void onIceCandidate(IceCandidate candidate);
+
+ /** Triggered on any error. */
+ public void onError();
+
+ /** Triggered when media is received on a new stream from remote peer. */
+ public void onAddStream(MediaStream stream);
+
+ /** Triggered when a remote peer close a stream. */
+ public void onRemoveStream(MediaStream stream);
+ }
+
+ /** Java version of PeerConnectionInterface.IceServer. */
+ public static class IceServer {
+ public final String uri;
+ public final String username;
+ public final String password;
+
+ /** Convenience constructor for STUN servers. */
+ public IceServer(String uri) {
+ this(uri, "", "");
+ }
+
+ public IceServer(String uri, String username, String password) {
+ this.uri = uri;
+ this.username = username;
+ this.password = password;
+ }
+
+ public String toString() {
+ return uri + "[" + username + ":" + password + "]";
+ }
+ }
+
+ private final List<MediaStream> localStreams;
+ private final long nativePeerConnection;
+ private final long nativeObserver;
+
+ PeerConnection(long nativePeerConnection, long nativeObserver) {
+ this.nativePeerConnection = nativePeerConnection;
+ this.nativeObserver = nativeObserver;
+ localStreams = new LinkedList<MediaStream>();
+ }
+
+ // JsepInterface.
+ public native SessionDescription getLocalDescription();
+
+ public native SessionDescription getRemoteDescription();
+
+ public native void createOffer(
+ SdpObserver observer, MediaConstraints constraints);
+
+ public native void createAnswer(
+ SdpObserver observer, MediaConstraints constraints);
+
+ public native void setLocalDescription(
+ SdpObserver observer, SessionDescription sdp);
+
+ public native void setRemoteDescription(
+ SdpObserver observer, SessionDescription sdp);
+
+ public native boolean updateIce(
+ List<IceServer> iceServers, MediaConstraints constraints);
+
+ public boolean addIceCandidate(IceCandidate candidate) {
+ return nativeAddIceCandidate(
+ candidate.sdpMid, candidate.sdpMLineIndex, candidate.sdp);
+ }
+
+ public boolean addStream(
+ MediaStream stream, MediaConstraints constraints) {
+ boolean ret = nativeAddLocalStream(stream.nativeStream, constraints);
+ if (!ret) {
+ return false;
+ }
+ localStreams.add(stream);
+ return true;
+ }
+
+ public void removeStream(MediaStream stream) {
+ nativeRemoveLocalStream(stream.nativeStream);
+ localStreams.remove(stream);
+ }
+
+ public boolean getStats(StatsObserver observer, MediaStreamTrack track) {
+ return nativeGetStats(observer, (track == null) ? 0 : track.nativeTrack);
+ }
+
+ // TODO(fischman): add support for DTMF-related methods once that API
+ // stabilizes.
+ public native SignalingState signalingState();
+
+ public native IceConnectionState iceConnectionState();
+
+ public native IceGatheringState iceGatheringState();
+
+ public native void close();
+
+ public void dispose() {
+ close();
+ for (MediaStream stream : localStreams) {
+ stream.dispose();
+ }
+ localStreams.clear();
+ freePeerConnection(nativePeerConnection);
+ freeObserver(nativeObserver);
+ }
+
+ private static native void freePeerConnection(long nativePeerConnection);
+
+ private static native void freeObserver(long nativeObserver);
+
+ private native boolean nativeAddIceCandidate(
+ String sdpMid, int sdpMLineIndex, String iceCandidateSdp);
+
+ private native boolean nativeAddLocalStream(
+ long nativeStream, MediaConstraints constraints);
+
+ private native void nativeRemoveLocalStream(long nativeStream);
+
+ private native boolean nativeGetStats(
+ StatsObserver observer, long nativeTrack);
+}
diff --git a/talk/app/webrtc/java/src/org/webrtc/PeerConnectionFactory.java b/talk/app/webrtc/java/src/org/webrtc/PeerConnectionFactory.java
new file mode 100644
index 0000000..03ed03f
--- /dev/null
+++ b/talk/app/webrtc/java/src/org/webrtc/PeerConnectionFactory.java
@@ -0,0 +1,119 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+package org.webrtc;
+
+import java.util.List;
+
+/**
+ * Java wrapper for a C++ PeerConnectionFactoryInterface. Main entry point to
+ * the PeerConnection API for clients.
+ */
+public class PeerConnectionFactory {
+ static {
+ System.loadLibrary("jingle_peerconnection_so");
+ }
+
+ private final long nativeFactory;
+
+ // |context| is an android.content.Context object, but we keep it untyped here
+ // to allow building on non-Android platforms.
+ public static native boolean initializeAndroidGlobals(Object context);
+
+ public PeerConnectionFactory() {
+ nativeFactory = nativeCreatePeerConnectionFactory();
+ if (nativeFactory == 0) {
+ throw new RuntimeException("Failed to initialize PeerConnectionFactory!");
+ }
+ }
+
+
+ public PeerConnection createPeerConnection(
+ List<PeerConnection.IceServer> iceServers,
+ MediaConstraints constraints,
+ PeerConnection.Observer observer) {
+ long nativeObserver = nativeCreateObserver(observer);
+ if (nativeObserver == 0) {
+ return null;
+ }
+ long nativePeerConnection = nativeCreatePeerConnection(
+ nativeFactory, iceServers, constraints, nativeObserver);
+ if (nativePeerConnection == 0) {
+ return null;
+ }
+ return new PeerConnection(nativePeerConnection, nativeObserver);
+ }
+
+ public MediaStream createLocalMediaStream(String label) {
+ return new MediaStream(
+ nativeCreateLocalMediaStream(nativeFactory, label));
+ }
+
+ public VideoSource createVideoSource(
+ VideoCapturer capturer, MediaConstraints constraints) {
+ return new VideoSource(nativeCreateVideoSource(
+ nativeFactory, capturer.nativeVideoCapturer, constraints));
+ }
+
+ public VideoTrack createVideoTrack(String id, VideoSource source) {
+ return new VideoTrack(nativeCreateVideoTrack(
+ nativeFactory, id, source.nativeSource));
+ }
+
+ public AudioTrack createAudioTrack(String id) {
+ return new AudioTrack(nativeCreateAudioTrack(nativeFactory, id));
+ }
+
+ public void dispose() {
+ freeFactory(nativeFactory);
+ }
+
+ private static native long nativeCreatePeerConnectionFactory();
+
+ private static native long nativeCreateObserver(
+ PeerConnection.Observer observer);
+
+ private static native long nativeCreatePeerConnection(
+ long nativeFactory, List<PeerConnection.IceServer> iceServers,
+ MediaConstraints constraints, long nativeObserver);
+
+ private static native long nativeCreateLocalMediaStream(
+ long nativeFactory, String label);
+
+ private static native long nativeCreateVideoSource(
+ long nativeFactory, long nativeVideoCapturer,
+ MediaConstraints constraints);
+
+ private static native long nativeCreateVideoTrack(
+ long nativeFactory, String id, long nativeVideoSource);
+
+ private static native long nativeCreateAudioTrack(
+ long nativeFactory, String id);
+
+ private static native void freeFactory(long nativeFactory);
+}
diff --git a/talk/app/webrtc/java/src/org/webrtc/SdpObserver.java b/talk/app/webrtc/java/src/org/webrtc/SdpObserver.java
new file mode 100644
index 0000000..c9eb14a
--- /dev/null
+++ b/talk/app/webrtc/java/src/org/webrtc/SdpObserver.java
@@ -0,0 +1,43 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.webrtc;
+
+/** Interface for observing SDP-related events. */
+public interface SdpObserver {
+ /** Called on success of Create{Offer,Answer}(). */
+ public void onCreateSuccess(SessionDescription sdp);
+
+ /** Called on success of Set{Local,Remote}Description(). */
+ public void onSetSuccess();
+
+ /** Called on error of Create{Offer,Answer}(). */
+ public void onCreateFailure(String error);
+
+ /** Called on error of Set{Local,Remote}Description(). */
+ public void onSetFailure(String error);
+}
diff --git a/talk/app/webrtc/java/src/org/webrtc/SessionDescription.java b/talk/app/webrtc/java/src/org/webrtc/SessionDescription.java
new file mode 100644
index 0000000..982db8f
--- /dev/null
+++ b/talk/app/webrtc/java/src/org/webrtc/SessionDescription.java
@@ -0,0 +1,57 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+package org.webrtc;
+
+/**
+ * Description of an RFC 4566 Session.
+ * SDPs are passed as serialized Strings in Java-land and are materialized
+ * to SessionDescriptionInterface as appropriate in the JNI layer.
+ */
+public class SessionDescription {
+ /** Java-land enum version of SessionDescriptionInterface's type() string. */
+ public static enum Type {
+ OFFER, PRANSWER, ANSWER;
+
+ public String canonicalForm() {
+ return name().toLowerCase();
+ }
+
+ public static Type fromCanonicalForm(String canonical) {
+ return Type.valueOf(Type.class, canonical.toUpperCase());
+ }
+ }
+
+ public final Type type;
+ public final String description;
+
+ public SessionDescription(Type type, String description) {
+ this.type = type;
+ this.description = description;
+ }
+}
diff --git a/talk/app/webrtc/java/src/org/webrtc/StatsObserver.java b/talk/app/webrtc/java/src/org/webrtc/StatsObserver.java
new file mode 100644
index 0000000..e61d8f7
--- /dev/null
+++ b/talk/app/webrtc/java/src/org/webrtc/StatsObserver.java
@@ -0,0 +1,34 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.webrtc;
+
+/** Interface for observing Stats reports (see webrtc::StatsObservers). */
+public interface StatsObserver {
+ /** Called when the reports are ready.*/
+ public void onComplete(StatsReport[] reports);
+}
diff --git a/talk/app/webrtc/java/src/org/webrtc/StatsReport.java b/talk/app/webrtc/java/src/org/webrtc/StatsReport.java
new file mode 100644
index 0000000..8285ba2
--- /dev/null
+++ b/talk/app/webrtc/java/src/org/webrtc/StatsReport.java
@@ -0,0 +1,72 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.webrtc;
+
+/** Java version of webrtc::StatsReport. */
+public class StatsReport {
+
+ /** Java version of webrtc::StatsReport::Value. */
+ public static class Value {
+ public final String name;
+ public final String value;
+
+ public Value(String name, String value) {
+ this.name = name;
+ this.value = value;
+ }
+
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("[").append(name).append(": ").append(value).append("]");
+ return builder.toString();
+ }
+ }
+
+ public final String id;
+ public final String type;
+ // Time since 1970-01-01T00:00:00Z in milliseconds.
+ public final double timestamp;
+ public final Value[] values;
+
+ public StatsReport(String id, String type, double timestamp, Value[] values) {
+ this.id = id;
+ this.type = type;
+ this.timestamp = timestamp;
+ this.values = values;
+ }
+
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("id: ").append(id).append(", type: ").append(type)
+ .append(", timestamp: ").append(timestamp).append(", values: ");
+ for (int i = 0; i < values.length; ++i) {
+ builder.append(values[i].toString()).append(", ");
+ }
+ return builder.toString();
+ }
+}
diff --git a/talk/app/webrtc/java/src/org/webrtc/VideoCapturer.java b/talk/app/webrtc/java/src/org/webrtc/VideoCapturer.java
new file mode 100644
index 0000000..eab5797
--- /dev/null
+++ b/talk/app/webrtc/java/src/org/webrtc/VideoCapturer.java
@@ -0,0 +1,53 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.webrtc;
+
+/** Java version of VideoCapturerInterface. */
+public class VideoCapturer {
+ final long nativeVideoCapturer;
+
+ private VideoCapturer(long nativeVideoCapturer) {
+ this.nativeVideoCapturer = nativeVideoCapturer;
+ }
+
+ public static VideoCapturer create(String deviceName) {
+ long nativeVideoCapturer = nativeCreateVideoCapturer(deviceName);
+ if (nativeVideoCapturer == 0) {
+ return null;
+ }
+ return new VideoCapturer(nativeVideoCapturer);
+ }
+
+ public void dispose() {
+ free(nativeVideoCapturer);
+ }
+
+ private static native long nativeCreateVideoCapturer(String deviceName);
+
+ private static native void free(long nativeVideoCapturer);
+}
diff --git a/talk/app/webrtc/java/src/org/webrtc/VideoRenderer.java b/talk/app/webrtc/java/src/org/webrtc/VideoRenderer.java
new file mode 100644
index 0000000..4cc341a
--- /dev/null
+++ b/talk/app/webrtc/java/src/org/webrtc/VideoRenderer.java
@@ -0,0 +1,136 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.webrtc;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+
+/**
+ * Java version of VideoRendererInterface. In addition to allowing clients to
+ * define their own rendering behavior (by passing in a Callbacks object), this
+ * class also provides a createGui() method for creating a GUI-rendering window
+ * on various platforms.
+ */
+public class VideoRenderer {
+
+ /** Java version of cricket::VideoFrame. */
+ public static class I420Frame {
+ public final int width;
+ public final int height;
+ public final int[] yuvStrides;
+ public final ByteBuffer[] yuvPlanes;
+
+ /**
+ * Construct a frame of the given dimensions with the specified planar
+ * data. If |yuvPlanes| is null, new planes of the appropriate sizes are
+ * allocated.
+ */
+ public I420Frame(
+ int width, int height, int[] yuvStrides, ByteBuffer[] yuvPlanes) {
+ this.width = width;
+ this.height = height;
+ this.yuvStrides = yuvStrides;
+ if (yuvPlanes == null) {
+ yuvPlanes = new ByteBuffer[3];
+ yuvPlanes[0] = ByteBuffer.allocateDirect(yuvStrides[0] * height);
+ yuvPlanes[1] = ByteBuffer.allocateDirect(yuvStrides[1] * height);
+ yuvPlanes[2] = ByteBuffer.allocateDirect(yuvStrides[2] * height);
+ }
+ this.yuvPlanes = yuvPlanes;
+ }
+
+ /**
+ * Copy the planes out of |source| into |this| and return |this|. Calling
+ * this with mismatched frame dimensions is a programming error and will
+ * likely crash.
+ */
+ public I420Frame copyFrom(I420Frame source) {
+ if (!Arrays.equals(yuvStrides, source.yuvStrides) ||
+ width != source.width || height != source.height) {
+ throw new RuntimeException("Mismatched dimensions! Source: " +
+ source.toString() + ", destination: " + toString());
+ }
+ copyPlane(source.yuvPlanes[0], yuvPlanes[0]);
+ copyPlane(source.yuvPlanes[1], yuvPlanes[1]);
+ copyPlane(source.yuvPlanes[2], yuvPlanes[2]);
+ return this;
+ }
+
+ @Override
+ public String toString() {
+ return width + "x" + height + ":" + yuvStrides[0] + ":" + yuvStrides[1] +
+ ":" + yuvStrides[2];
+ }
+
+ // Copy the bytes out of |src| and into |dst|, ignoring and overwriting
+ // positon & limit in both buffers.
+ private void copyPlane(ByteBuffer src, ByteBuffer dst) {
+ src.position(0).limit(src.capacity());
+ dst.put(src);
+ dst.position(0).limit(dst.capacity());
+ }
+}
+
+ /** The real meat of VideoRendererInterface. */
+ public static interface Callbacks {
+ public void setSize(int width, int height);
+ public void renderFrame(I420Frame frame);
+ }
+
+ // |this| either wraps a native (GUI) renderer or a client-supplied Callbacks
+ // (Java) implementation; so exactly one of these will be non-0/null.
+ final long nativeVideoRenderer;
+ private final Callbacks callbacks;
+
+ public static VideoRenderer createGui(int x, int y) {
+ long nativeVideoRenderer = nativeCreateGuiVideoRenderer(x, y);
+ if (nativeVideoRenderer == 0) {
+ return null;
+ }
+ return new VideoRenderer(nativeVideoRenderer);
+ }
+
+ public VideoRenderer(Callbacks callbacks) {
+ nativeVideoRenderer = nativeWrapVideoRenderer(callbacks);
+ this.callbacks = callbacks;
+ }
+
+ private VideoRenderer(long nativeVideoRenderer) {
+ this.nativeVideoRenderer = nativeVideoRenderer;
+ callbacks = null;
+ }
+
+ public void dispose() {
+ free(nativeVideoRenderer);
+ }
+
+ private static native long nativeCreateGuiVideoRenderer(int x, int y);
+ private static native long nativeWrapVideoRenderer(Callbacks callbacks);
+
+ private static native void free(long nativeVideoRenderer);
+}
diff --git a/talk/app/webrtc/java/src/org/webrtc/VideoSource.java b/talk/app/webrtc/java/src/org/webrtc/VideoSource.java
new file mode 100644
index 0000000..f29f312
--- /dev/null
+++ b/talk/app/webrtc/java/src/org/webrtc/VideoSource.java
@@ -0,0 +1,36 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+package org.webrtc;
+
+/** Java version of VideoSourceInterface. */
+public class VideoSource extends MediaSource {
+ public VideoSource(long nativeSource) {
+ super(nativeSource);
+ }
+}
diff --git a/talk/app/webrtc/java/src/org/webrtc/VideoTrack.java b/talk/app/webrtc/java/src/org/webrtc/VideoTrack.java
new file mode 100644
index 0000000..90e5c95
--- /dev/null
+++ b/talk/app/webrtc/java/src/org/webrtc/VideoTrack.java
@@ -0,0 +1,65 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.webrtc;
+
+import java.util.LinkedList;
+
+/** Java version of VideoTrackInterface. */
+public class VideoTrack extends MediaStreamTrack {
+ private final LinkedList<VideoRenderer> renderers;
+
+ public VideoTrack(long nativeTrack) {
+ super(nativeTrack);
+ renderers = new LinkedList<VideoRenderer>();
+ }
+
+ public void addRenderer(VideoRenderer renderer) {
+ renderers.add(renderer);
+ nativeAddRenderer(nativeTrack, renderer.nativeVideoRenderer);
+ }
+
+ public void removeRenderer(VideoRenderer renderer) {
+ if (!renderers.remove(renderer)) {
+ return;
+ }
+ nativeRemoveRenderer(nativeTrack, renderer.nativeVideoRenderer);
+ renderer.dispose();
+ }
+
+ public void dispose() {
+ while (!renderers.isEmpty()) {
+ removeRenderer(renderers.getFirst());
+ }
+ }
+
+ private static native void nativeAddRenderer(
+ long nativeTrack, long nativeRenderer);
+
+ private static native void nativeRemoveRenderer(
+ long nativeTrack, long nativeRenderer);
+}
diff --git a/talk/app/webrtc/javatests/libjingle_peerconnection_java_unittest.sh b/talk/app/webrtc/javatests/libjingle_peerconnection_java_unittest.sh
new file mode 100644
index 0000000..0ecb730
--- /dev/null
+++ b/talk/app/webrtc/javatests/libjingle_peerconnection_java_unittest.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+#
+# libjingle
+# Copyright 2013, Google Inc.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# 3. The name of the author may not be used to endorse or promote products
+# derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Wrapper script for running the Java tests under this directory.
+
+# Exit with error immediately if any subcommand fails.
+set -e
+
+# Change directory to the PRODUCT_DIR (e.g. out/Debug).
+cd -P $(dirname $0)
+
+export CLASSPATH=`pwd`/junit-4.11.jar
+CLASSPATH=$CLASSPATH:`pwd`/libjingle_peerconnection_test.jar
+CLASSPATH=$CLASSPATH:`pwd`/libjingle_peerconnection.jar
+
+export LD_LIBRARY_PATH=`pwd`
+
+# The RHS value is replaced by the build action that copies this script to
+# <(PRODUCT_DIR).
+export JAVA_HOME=GYP_JAVA_HOME
+
+${JAVA_HOME}/bin/java -Xcheck:jni -classpath $CLASSPATH \
+ junit.textui.TestRunner org.webrtc.PeerConnectionTest
diff --git a/talk/app/webrtc/javatests/src/org/webrtc/PeerConnectionTest.java b/talk/app/webrtc/javatests/src/org/webrtc/PeerConnectionTest.java
new file mode 100644
index 0000000..cdd8c73
--- /dev/null
+++ b/talk/app/webrtc/javatests/src/org/webrtc/PeerConnectionTest.java
@@ -0,0 +1,532 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.webrtc;
+
+import junit.framework.TestCase;
+
+import org.junit.Test;
+import org.webrtc.PeerConnection.IceConnectionState;
+import org.webrtc.PeerConnection.IceGatheringState;
+import org.webrtc.PeerConnection.SignalingState;
+
+import java.lang.ref.WeakReference;
+import java.util.IdentityHashMap;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+/** End-to-end tests for PeerConnection.java. */
+public class PeerConnectionTest extends TestCase {
+ // Set to true to render video.
+ private static final boolean RENDER_TO_GUI = false;
+
+ private static class ObserverExpectations implements PeerConnection.Observer,
+ VideoRenderer.Callbacks,
+ StatsObserver {
+ private int expectedIceCandidates = 0;
+ private int expectedErrors = 0;
+ private LinkedList<Integer> expectedSetSizeDimensions =
+ new LinkedList<Integer>(); // Alternating width/height.
+ private int expectedFramesDelivered = 0;
+ private LinkedList<SignalingState> expectedSignalingChanges =
+ new LinkedList<SignalingState>();
+ private LinkedList<IceConnectionState> expectedIceConnectionChanges =
+ new LinkedList<IceConnectionState>();
+ private LinkedList<IceGatheringState> expectedIceGatheringChanges =
+ new LinkedList<IceGatheringState>();
+ private LinkedList<String> expectedAddStreamLabels =
+ new LinkedList<String>();
+ private LinkedList<String> expectedRemoveStreamLabels =
+ new LinkedList<String>();
+ public LinkedList<IceCandidate> gotIceCandidates =
+ new LinkedList<IceCandidate>();
+ private Map<MediaStream, WeakReference<VideoRenderer>> renderers =
+ new IdentityHashMap<MediaStream, WeakReference<VideoRenderer>>();
+ private int expectedStatsCallbacks = 0;
+ private LinkedList<StatsReport[]> gotStatsReports =
+ new LinkedList<StatsReport[]>();
+
+ public synchronized void expectIceCandidates(int count) {
+ expectedIceCandidates += count;
+ }
+
+ public synchronized void onIceCandidate(IceCandidate candidate) {
+ --expectedIceCandidates;
+ // We don't assert expectedIceCandidates >= 0 because it's hard to know
+ // how many to expect, in general. We only use expectIceCandidates to
+ // assert a minimal count.
+ gotIceCandidates.add(candidate);
+ }
+
+ public synchronized void expectError() {
+ ++expectedErrors;
+ }
+
+ public synchronized void onError() {
+ assertTrue(--expectedErrors >= 0);
+ }
+
+ public synchronized void expectSetSize(int width, int height) {
+ expectedSetSizeDimensions.add(width);
+ expectedSetSizeDimensions.add(height);
+ }
+
+ @Override
+ public synchronized void setSize(int width, int height) {
+ assertEquals(width, expectedSetSizeDimensions.removeFirst().intValue());
+ assertEquals(height, expectedSetSizeDimensions.removeFirst().intValue());
+ }
+
+ public synchronized void expectFramesDelivered(int count) {
+ expectedFramesDelivered += count;
+ }
+
+ @Override
+ public synchronized void renderFrame(VideoRenderer.I420Frame frame) {
+ --expectedFramesDelivered;
+ }
+
+ public synchronized void expectSignalingChange(SignalingState newState) {
+ expectedSignalingChanges.add(newState);
+ }
+
+ @Override
+ public synchronized void onSignalingChange(SignalingState newState) {
+ assertEquals(expectedSignalingChanges.removeFirst(), newState);
+ }
+
+ public synchronized void expectIceConnectionChange(
+ IceConnectionState newState) {
+ expectedIceConnectionChanges.add(newState);
+ }
+
+ @Override
+ public void onIceConnectionChange(IceConnectionState newState) {
+ assertEquals(expectedIceConnectionChanges.removeFirst(), newState);
+ }
+
+ public synchronized void expectIceGatheringChange(
+ IceGatheringState newState) {
+ expectedIceGatheringChanges.add(newState);
+ }
+
+ @Override
+ public void onIceGatheringChange(IceGatheringState newState) {
+ // It's fine to get a variable number of GATHERING messages before
+ // COMPLETE fires (depending on how long the test runs) so we don't assert
+ // any particular count.
+ if (newState == IceGatheringState.GATHERING) {
+ return;
+ }
+ assertEquals(expectedIceGatheringChanges.removeFirst(), newState);
+ }
+
+ public synchronized void expectAddStream(String label) {
+ expectedAddStreamLabels.add(label);
+ }
+
+ public synchronized void onAddStream(MediaStream stream) {
+ assertEquals(expectedAddStreamLabels.removeFirst(), stream.label());
+ assertEquals(1, stream.videoTracks.size());
+ assertEquals(1, stream.audioTracks.size());
+ assertTrue(stream.videoTracks.get(0).id().endsWith("LMSv0"));
+ assertTrue(stream.audioTracks.get(0).id().endsWith("LMSa0"));
+ assertEquals("video", stream.videoTracks.get(0).kind());
+ assertEquals("audio", stream.audioTracks.get(0).kind());
+ VideoRenderer renderer = createVideoRenderer(this);
+ stream.videoTracks.get(0).addRenderer(renderer);
+ assertNull(renderers.put(
+ stream, new WeakReference<VideoRenderer>(renderer)));
+ }
+
+ public synchronized void expectRemoveStream(String label) {
+ expectedRemoveStreamLabels.add(label);
+ }
+
+ public synchronized void onRemoveStream(MediaStream stream) {
+ assertEquals(expectedRemoveStreamLabels.removeFirst(), stream.label());
+ WeakReference<VideoRenderer> renderer = renderers.remove(stream);
+ assertNotNull(renderer);
+ assertNotNull(renderer.get());
+ assertEquals(1, stream.videoTracks.size());
+ stream.videoTracks.get(0).removeRenderer(renderer.get());
+ }
+
+ @Override
+ public synchronized void onComplete(StatsReport[] reports) {
+ if (--expectedStatsCallbacks < 0) {
+ throw new RuntimeException("Unexpected stats report: " + reports);
+ }
+ gotStatsReports.add(reports);
+ }
+
+ public synchronized void expectStatsCallback() {
+ ++expectedStatsCallbacks;
+ }
+
+ public synchronized LinkedList<StatsReport[]> takeStatsReports() {
+ LinkedList<StatsReport[]> got = gotStatsReports;
+ gotStatsReports = new LinkedList<StatsReport[]>();
+ return got;
+ }
+
+ public synchronized boolean areAllExpectationsSatisfied() {
+ return expectedIceCandidates <= 0 && // See comment in onIceCandidate.
+ expectedErrors == 0 &&
+ expectedSignalingChanges.size() == 0 &&
+ expectedIceConnectionChanges.size() == 0 &&
+ expectedIceGatheringChanges.size() == 0 &&
+ expectedAddStreamLabels.size() == 0 &&
+ expectedRemoveStreamLabels.size() == 0 &&
+ expectedSetSizeDimensions.isEmpty() &&
+ expectedFramesDelivered <= 0 &&
+ expectedStatsCallbacks == 0;
+ }
+
+ public void waitForAllExpectationsToBeSatisfied() {
+ // TODO(fischman): problems with this approach:
+ // - come up with something better than a poll loop
+ // - avoid serializing expectations explicitly; the test is not as robust
+ // as it could be because it must place expectations between wait
+ // statements very precisely (e.g. frame must not arrive before its
+ // expectation, and expectation must not be registered so early as to
+ // stall a wait). Use callbacks to fire off dependent steps instead of
+ // explicitly waiting, so there can be just a single wait at the end of
+ // the test.
+ while (!areAllExpectationsSatisfied()) {
+ try {
+ Thread.sleep(10);
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ }
+ }
+
+ private static class SdpObserverLatch implements SdpObserver {
+ private boolean success = false;
+ private SessionDescription sdp = null;
+ private String error = null;
+ private CountDownLatch latch = new CountDownLatch(1);
+
+ public SdpObserverLatch() {}
+
+ public void onCreateSuccess(SessionDescription sdp) {
+ this.sdp = sdp;
+ onSetSuccess();
+ }
+
+ public void onSetSuccess() {
+ success = true;
+ latch.countDown();
+ }
+
+ public void onCreateFailure(String error) {
+ onSetFailure(error);
+ }
+
+ public void onSetFailure(String error) {
+ this.error = error;
+ latch.countDown();
+ }
+
+ public boolean await() {
+ try {
+ assertTrue(latch.await(1000, TimeUnit.MILLISECONDS));
+ return getSuccess();
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ public boolean getSuccess() {
+ return success;
+ }
+
+ public SessionDescription getSdp() {
+ return sdp;
+ }
+
+ public String getError() {
+ return error;
+ }
+ }
+
+ static int videoWindowsMapped = -1;
+
+ private static class TestRenderer implements VideoRenderer.Callbacks {
+ public int width = -1;
+ public int height = -1;
+ public int numFramesDelivered = 0;
+
+ public void setSize(int width, int height) {
+ assertEquals(this.width, -1);
+ assertEquals(this.height, -1);
+ this.width = width;
+ this.height = height;
+ }
+
+ public void renderFrame(VideoRenderer.I420Frame frame) {
+ ++numFramesDelivered;
+ }
+ }
+
+ private static VideoRenderer createVideoRenderer(
+ ObserverExpectations observer) {
+ if (!RENDER_TO_GUI) {
+ return new VideoRenderer(observer);
+ }
+ ++videoWindowsMapped;
+ assertTrue(videoWindowsMapped < 4);
+ int x = videoWindowsMapped % 2 != 0 ? 700 : 0;
+ int y = videoWindowsMapped >= 2 ? 0 : 500;
+ return VideoRenderer.createGui(x, y);
+ }
+
+ // Return a weak reference to test that ownership is correctly held by
+ // PeerConnection, not by test code.
+ private static WeakReference<MediaStream> addTracksToPC(
+ PeerConnectionFactory factory, PeerConnection pc,
+ VideoSource videoSource,
+ String streamLabel, String videoTrackId, String audioTrackId,
+ ObserverExpectations observer) {
+ MediaStream lMS = factory.createLocalMediaStream(streamLabel);
+ VideoTrack videoTrack =
+ factory.createVideoTrack(videoTrackId, videoSource);
+ assertNotNull(videoTrack);
+ VideoRenderer videoRenderer = createVideoRenderer(observer);
+ assertNotNull(videoRenderer);
+ videoTrack.addRenderer(videoRenderer);
+ lMS.addTrack(videoTrack);
+ // Just for fun, let's remove and re-add the track.
+ lMS.removeTrack(videoTrack);
+ lMS.addTrack(videoTrack);
+ lMS.addTrack(factory.createAudioTrack(audioTrackId));
+ pc.addStream(lMS, new MediaConstraints());
+ return new WeakReference<MediaStream>(lMS);
+ }
+
+ private static void assertEquals(
+ SessionDescription lhs, SessionDescription rhs) {
+ assertEquals(lhs.type, rhs.type);
+ assertEquals(lhs.description, rhs.description);
+ }
+
+ @Test
+ public void testCompleteSession() throws Exception {
+ CountDownLatch testDone = new CountDownLatch(1);
+
+ PeerConnectionFactory factory = new PeerConnectionFactory();
+ MediaConstraints constraints = new MediaConstraints();
+
+ LinkedList<PeerConnection.IceServer> iceServers =
+ new LinkedList<PeerConnection.IceServer>();
+ iceServers.add(new PeerConnection.IceServer(
+ "stun:stun.l.google.com:19302"));
+ iceServers.add(new PeerConnection.IceServer(
+ "turn:fake.example.com", "fakeUsername", "fakePassword"));
+ ObserverExpectations offeringExpectations = new ObserverExpectations();
+ PeerConnection offeringPC = factory.createPeerConnection(
+ iceServers, constraints, offeringExpectations);
+ assertNotNull(offeringPC);
+
+ ObserverExpectations answeringExpectations = new ObserverExpectations();
+ PeerConnection answeringPC = factory.createPeerConnection(
+ iceServers, constraints, answeringExpectations);
+ assertNotNull(answeringPC);
+
+ // We want to use the same camera for offerer & answerer, so create it here
+ // instead of in addTracksToPC.
+ VideoSource videoSource = factory.createVideoSource(
+ VideoCapturer.create(""), new MediaConstraints());
+
+ // TODO(fischman): the track ids here and in the addTracksToPC() call
+ // below hard-code the <mediaStreamLabel>[av]<index> scheme used in the
+ // serialized SDP, because the C++ API doesn't auto-translate.
+ // Drop |label| params from {Audio,Video}Track-related APIs once
+ // https://code.google.com/p/webrtc/issues/detail?id=1253 is fixed.
+ WeakReference<MediaStream> oLMS = addTracksToPC(
+ factory, offeringPC, videoSource, "oLMS", "oLMSv0", "oLMSa0",
+ offeringExpectations);
+
+ SdpObserverLatch sdpLatch = new SdpObserverLatch();
+ offeringPC.createOffer(sdpLatch, constraints);
+ assertTrue(sdpLatch.await());
+ SessionDescription offerSdp = sdpLatch.getSdp();
+ assertEquals(offerSdp.type, SessionDescription.Type.OFFER);
+ assertFalse(offerSdp.description.isEmpty());
+
+ sdpLatch = new SdpObserverLatch();
+ answeringExpectations.expectSignalingChange(
+ SignalingState.HAVE_REMOTE_OFFER);
+ answeringExpectations.expectAddStream("oLMS");
+ answeringPC.setRemoteDescription(sdpLatch, offerSdp);
+ answeringExpectations.waitForAllExpectationsToBeSatisfied();
+ assertEquals(
+ PeerConnection.SignalingState.STABLE, offeringPC.signalingState());
+ assertTrue(sdpLatch.await());
+ assertNull(sdpLatch.getSdp());
+
+ WeakReference<MediaStream> aLMS = addTracksToPC(
+ factory, answeringPC, videoSource, "aLMS", "aLMSv0", "aLMSa0",
+ answeringExpectations);
+
+ sdpLatch = new SdpObserverLatch();
+ answeringPC.createAnswer(sdpLatch, constraints);
+ assertTrue(sdpLatch.await());
+ SessionDescription answerSdp = sdpLatch.getSdp();
+ assertEquals(answerSdp.type, SessionDescription.Type.ANSWER);
+ assertFalse(answerSdp.description.isEmpty());
+
+ offeringExpectations.expectIceCandidates(2);
+ answeringExpectations.expectIceCandidates(2);
+
+ sdpLatch = new SdpObserverLatch();
+ answeringExpectations.expectSignalingChange(SignalingState.STABLE);
+ answeringPC.setLocalDescription(sdpLatch, answerSdp);
+ assertTrue(sdpLatch.await());
+ assertNull(sdpLatch.getSdp());
+
+ sdpLatch = new SdpObserverLatch();
+ offeringExpectations.expectSignalingChange(SignalingState.HAVE_LOCAL_OFFER);
+ offeringPC.setLocalDescription(sdpLatch, offerSdp);
+ assertTrue(sdpLatch.await());
+ assertNull(sdpLatch.getSdp());
+ sdpLatch = new SdpObserverLatch();
+ offeringExpectations.expectSignalingChange(SignalingState.STABLE);
+ offeringExpectations.expectAddStream("aLMS");
+ offeringPC.setRemoteDescription(sdpLatch, answerSdp);
+ assertTrue(sdpLatch.await());
+ assertNull(sdpLatch.getSdp());
+
+ offeringExpectations.waitForAllExpectationsToBeSatisfied();
+ answeringExpectations.waitForAllExpectationsToBeSatisfied();
+
+ assertEquals(offeringPC.getLocalDescription().type, offerSdp.type);
+ assertEquals(offeringPC.getRemoteDescription().type, answerSdp.type);
+ assertEquals(answeringPC.getLocalDescription().type, answerSdp.type);
+ assertEquals(answeringPC.getRemoteDescription().type, offerSdp.type);
+
+ if (!RENDER_TO_GUI) {
+ offeringExpectations.expectSetSize(640, 480);
+ offeringExpectations.expectSetSize(640, 480);
+ answeringExpectations.expectSetSize(640, 480);
+ answeringExpectations.expectSetSize(640, 480);
+ // Wait for at least some frames to be delivered at each end (number
+ // chosen arbitrarily).
+ offeringExpectations.expectFramesDelivered(10);
+ answeringExpectations.expectFramesDelivered(10);
+ }
+
+ offeringExpectations.expectIceConnectionChange(
+ IceConnectionState.CHECKING);
+ offeringExpectations.expectIceConnectionChange(
+ IceConnectionState.CONNECTED);
+ answeringExpectations.expectIceConnectionChange(
+ IceConnectionState.CHECKING);
+ answeringExpectations.expectIceConnectionChange(
+ IceConnectionState.CONNECTED);
+
+ offeringExpectations.expectIceGatheringChange(IceGatheringState.COMPLETE);
+ answeringExpectations.expectIceGatheringChange(IceGatheringState.COMPLETE);
+
+ for (IceCandidate candidate : offeringExpectations.gotIceCandidates) {
+ answeringPC.addIceCandidate(candidate);
+ }
+ offeringExpectations.gotIceCandidates.clear();
+ for (IceCandidate candidate : answeringExpectations.gotIceCandidates) {
+ offeringPC.addIceCandidate(candidate);
+ }
+ answeringExpectations.gotIceCandidates.clear();
+
+ offeringExpectations.waitForAllExpectationsToBeSatisfied();
+ answeringExpectations.waitForAllExpectationsToBeSatisfied();
+
+ assertEquals(
+ PeerConnection.SignalingState.STABLE, offeringPC.signalingState());
+ assertEquals(
+ PeerConnection.SignalingState.STABLE, answeringPC.signalingState());
+
+ if (RENDER_TO_GUI) {
+ try {
+ Thread.sleep(3000);
+ } catch (Throwable t) {
+ throw new RuntimeException(t);
+ }
+ }
+
+ // TODO(fischman) MOAR test ideas:
+ // - Test that PC.removeStream() works; requires a second
+ // createOffer/createAnswer dance.
+ // - audit each place that uses |constraints| for specifying non-trivial
+ // constraints (and ensure they're honored).
+ // - test error cases
+ // - ensure reasonable coverage of _jni.cc is achieved. Coverage is
+ // extra-important because of all the free-text (class/method names, etc)
+ // in JNI-style programming; make sure no typos!
+ // - Test that shutdown mid-interaction is crash-free.
+
+ // Free the Java-land objects, collect them, and sleep a bit to make sure we
+ // don't get late-arrival crashes after the Java-land objects have been
+ // freed.
+ shutdownPC(offeringPC, offeringExpectations);
+ offeringPC = null;
+ shutdownPC(answeringPC, answeringExpectations);
+ answeringPC = null;
+ System.gc();
+ Thread.sleep(100);
+ }
+
+ private static void shutdownPC(
+ PeerConnection pc, ObserverExpectations expectations) {
+ expectations.expectStatsCallback();
+ assertTrue(pc.getStats(expectations, null));
+ expectations.waitForAllExpectationsToBeSatisfied();
+ expectations.expectIceConnectionChange(IceConnectionState.CLOSED);
+ expectations.expectSignalingChange(SignalingState.CLOSED);
+ pc.close();
+ expectations.waitForAllExpectationsToBeSatisfied();
+ expectations.expectStatsCallback();
+ assertTrue(pc.getStats(expectations, null));
+ expectations.waitForAllExpectationsToBeSatisfied();
+
+ System.out.println("FYI stats: ");
+ int reportIndex = -1;
+ for (StatsReport[] reports : expectations.takeStatsReports()) {
+ System.out.println(" Report #" + (++reportIndex));
+ for (int i = 0; i < reports.length; ++i) {
+ System.out.println(" " + reports[i].toString());
+ }
+ }
+ assertEquals(1, reportIndex);
+ System.out.println("End stats.");
+
+ pc.dispose();
+ }
+}
diff --git a/talk/app/webrtc/jsep.h b/talk/app/webrtc/jsep.h
new file mode 100644
index 0000000..5f28fc8
--- /dev/null
+++ b/talk/app/webrtc/jsep.h
@@ -0,0 +1,164 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// Interfaces matching the draft-ietf-rtcweb-jsep-01.
+
+#ifndef TALK_APP_WEBRTC_JSEP_H_
+#define TALK_APP_WEBRTC_JSEP_H_
+
+#include <string>
+#include <vector>
+
+#include "talk/base/basictypes.h"
+#include "talk/base/refcount.h"
+
+namespace cricket {
+class SessionDescription;
+class Candidate;
+} // namespace cricket
+
+namespace webrtc {
+
+struct SdpParseError {
+ public:
+ // The sdp line that causes the error.
+ std::string line;
+ // Explains the error.
+ std::string description;
+};
+
+// Class representation of an ICE candidate.
+// An instance of this interface is supposed to be owned by one class at
+// a time and is therefore not expected to be thread safe.
+class IceCandidateInterface {
+ public:
+ virtual ~IceCandidateInterface() {}
+ /// If present, this contains the identierfier of the "media stream
+ // identification" as defined in [RFC 3388] for m-line this candidate is
+ // assocated with.
+ virtual std::string sdp_mid() const = 0;
+ // This indeicates the index (starting at zero) of m-line in the SDP this
+ // candidate is assocated with.
+ virtual int sdp_mline_index() const = 0;
+ virtual const cricket::Candidate& candidate() const = 0;
+ // Creates a SDP-ized form of this candidate.
+ virtual bool ToString(std::string* out) const = 0;
+};
+
+// Creates a IceCandidateInterface based on SDP string.
+// Returns NULL if the sdp string can't be parsed.
+// TODO(ronghuawu): Deprecated.
+IceCandidateInterface* CreateIceCandidate(const std::string& sdp_mid,
+ int sdp_mline_index,
+ const std::string& sdp);
+
+// |error| can be NULL if doesn't care about the failure reason.
+IceCandidateInterface* CreateIceCandidate(const std::string& sdp_mid,
+ int sdp_mline_index,
+ const std::string& sdp,
+ SdpParseError* error);
+
+// This class represents a collection of candidates for a specific m-line.
+// This class is used in SessionDescriptionInterface to represent all known
+// candidates for a certain m-line.
+class IceCandidateCollection {
+ public:
+ virtual ~IceCandidateCollection() {}
+ virtual size_t count() const = 0;
+ // Returns true if an equivalent |candidate| exist in the collection.
+ virtual bool HasCandidate(const IceCandidateInterface* candidate) const = 0;
+ virtual const IceCandidateInterface* at(size_t index) const = 0;
+};
+
+// Class representation of a Session description.
+// An instance of this interface is supposed to be owned by one class at
+// a time and is therefore not expected to be thread safe.
+class SessionDescriptionInterface {
+ public:
+ // Supported types:
+ static const char kOffer[];
+ static const char kPrAnswer[];
+ static const char kAnswer[];
+
+ virtual ~SessionDescriptionInterface() {}
+ virtual cricket::SessionDescription* description() = 0;
+ virtual const cricket::SessionDescription* description() const = 0;
+ // Get the session id and session version, which are defined based on
+ // RFC 4566 for the SDP o= line.
+ virtual std::string session_id() const = 0;
+ virtual std::string session_version() const = 0;
+ virtual std::string type() const = 0;
+ // Adds the specified candidate to the description.
+ // Ownership is not transferred.
+ // Returns false if the session description does not have a media section that
+ // corresponds to the |candidate| label.
+ virtual bool AddCandidate(const IceCandidateInterface* candidate) = 0;
+ // Returns the number of m- lines in the session description.
+ virtual size_t number_of_mediasections() const = 0;
+ // Returns a collection of all candidates that belong to a certain m-line
+ virtual const IceCandidateCollection* candidates(
+ size_t mediasection_index) const = 0;
+ // Serializes the description to SDP.
+ virtual bool ToString(std::string* out) const = 0;
+};
+
+// Creates a SessionDescriptionInterface based on SDP string and the type.
+// Returns NULL if the sdp string can't be parsed or the type is unsupported.
+// TODO(ronghuawu): Deprecated.
+SessionDescriptionInterface* CreateSessionDescription(const std::string& type,
+ const std::string& sdp);
+
+// |error| can be NULL if doesn't care about the failure reason.
+SessionDescriptionInterface* CreateSessionDescription(const std::string& type,
+ const std::string& sdp,
+ SdpParseError* error);
+
+// Jsep CreateOffer and CreateAnswer callback interface.
+class CreateSessionDescriptionObserver : public talk_base::RefCountInterface {
+ public:
+ // The implementation of the CreateSessionDescriptionObserver takes
+ // the ownership of the |desc|.
+ virtual void OnSuccess(SessionDescriptionInterface* desc) = 0;
+ virtual void OnFailure(const std::string& error) = 0;
+
+ protected:
+ ~CreateSessionDescriptionObserver() {}
+};
+
+// Jsep SetLocalDescription and SetRemoteDescription callback interface.
+class SetSessionDescriptionObserver : public talk_base::RefCountInterface {
+ public:
+ virtual void OnSuccess() = 0;
+ virtual void OnFailure(const std::string& error) = 0;
+
+ protected:
+ ~SetSessionDescriptionObserver() {}
+};
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_JSEP_H_
diff --git a/talk/app/webrtc/jsepicecandidate.cc b/talk/app/webrtc/jsepicecandidate.cc
new file mode 100644
index 0000000..13cc812
--- /dev/null
+++ b/talk/app/webrtc/jsepicecandidate.cc
@@ -0,0 +1,105 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/jsepicecandidate.h"
+
+#include <vector>
+
+#include "talk/app/webrtc/webrtcsdp.h"
+#include "talk/base/stringencode.h"
+
+namespace webrtc {
+
+IceCandidateInterface* CreateIceCandidate(const std::string& sdp_mid,
+ int sdp_mline_index,
+ const std::string& sdp) {
+ return CreateIceCandidate(sdp_mid, sdp_mline_index, sdp, NULL);
+}
+
+IceCandidateInterface* CreateIceCandidate(const std::string& sdp_mid,
+ int sdp_mline_index,
+ const std::string& sdp,
+ SdpParseError* error) {
+ JsepIceCandidate* jsep_ice = new JsepIceCandidate(sdp_mid, sdp_mline_index);
+ if (!jsep_ice->Initialize(sdp, error)) {
+ delete jsep_ice;
+ return NULL;
+ }
+ return jsep_ice;
+}
+
+JsepIceCandidate::JsepIceCandidate(const std::string& sdp_mid,
+ int sdp_mline_index)
+ : sdp_mid_(sdp_mid),
+ sdp_mline_index_(sdp_mline_index) {
+}
+
+JsepIceCandidate::JsepIceCandidate(const std::string& sdp_mid,
+ int sdp_mline_index,
+ const cricket::Candidate& candidate)
+ : sdp_mid_(sdp_mid),
+ sdp_mline_index_(sdp_mline_index),
+ candidate_(candidate) {
+}
+
+JsepIceCandidate::~JsepIceCandidate() {
+}
+
+bool JsepIceCandidate::Initialize(const std::string& sdp, SdpParseError* err) {
+ return SdpDeserializeCandidate(sdp, this, err);
+}
+
+bool JsepIceCandidate::ToString(std::string* out) const {
+ if (!out)
+ return false;
+ *out = SdpSerializeCandidate(*this);
+ return !out->empty();
+}
+
+JsepCandidateCollection::~JsepCandidateCollection() {
+ for (std::vector<JsepIceCandidate*>::iterator it = candidates_.begin();
+ it != candidates_.end(); ++it) {
+ delete *it;
+ }
+}
+
+bool JsepCandidateCollection::HasCandidate(
+ const IceCandidateInterface* candidate) const {
+ bool ret = false;
+ for (std::vector<JsepIceCandidate*>::const_iterator it = candidates_.begin();
+ it != candidates_.end(); ++it) {
+ if ((*it)->sdp_mid() == candidate->sdp_mid() &&
+ (*it)->sdp_mline_index() == candidate->sdp_mline_index() &&
+ (*it)->candidate().IsEquivalent(candidate->candidate())) {
+ ret = true;
+ break;
+ }
+ }
+ return ret;
+}
+
+} // namespace webrtc
diff --git a/talk/app/webrtc/jsepicecandidate.h b/talk/app/webrtc/jsepicecandidate.h
new file mode 100644
index 0000000..54de950
--- /dev/null
+++ b/talk/app/webrtc/jsepicecandidate.h
@@ -0,0 +1,92 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// Implements the IceCandidateInterface.
+
+#ifndef TALK_APP_WEBRTC_JSEPICECANDIDATE_H_
+#define TALK_APP_WEBRTC_JSEPICECANDIDATE_H_
+
+#include <string>
+
+#include "talk/app/webrtc/jsep.h"
+#include "talk/base/constructormagic.h"
+#include "talk/p2p/base/candidate.h"
+
+namespace webrtc {
+
+class JsepIceCandidate : public IceCandidateInterface {
+ public:
+ JsepIceCandidate(const std::string& sdp_mid, int sdp_mline_index);
+ JsepIceCandidate(const std::string& sdp_mid, int sdp_mline_index,
+ const cricket::Candidate& candidate);
+ ~JsepIceCandidate();
+ // |error| can be NULL if don't care about the failure reason.
+ bool Initialize(const std::string& sdp, SdpParseError* err);
+ void SetCandidate(const cricket::Candidate& candidate) {
+ candidate_ = candidate;
+ }
+
+ virtual std::string sdp_mid() const { return sdp_mid_; }
+ virtual int sdp_mline_index() const { return sdp_mline_index_; }
+ virtual const cricket::Candidate& candidate() const {
+ return candidate_;
+ }
+
+ virtual bool ToString(std::string* out) const;
+
+ private:
+ std::string sdp_mid_;
+ int sdp_mline_index_;
+ cricket::Candidate candidate_;
+
+ DISALLOW_COPY_AND_ASSIGN(JsepIceCandidate);
+};
+
+// Implementation of IceCandidateCollection.
+// This implementation stores JsepIceCandidates.
+class JsepCandidateCollection : public IceCandidateCollection {
+ public:
+ ~JsepCandidateCollection();
+ virtual size_t count() const {
+ return candidates_.size();
+ }
+ virtual bool HasCandidate(const IceCandidateInterface* candidate) const;
+ // Adds and takes ownership of the JsepIceCandidate.
+ virtual void add(JsepIceCandidate* candidate) {
+ candidates_.push_back(candidate);
+ }
+ virtual const IceCandidateInterface* at(size_t index) const {
+ return candidates_[index];
+ }
+
+ private:
+ std::vector<JsepIceCandidate*> candidates_;
+};
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_JSEPICECANDIDATE_H_
diff --git a/talk/app/webrtc/jsepsessiondescription.cc b/talk/app/webrtc/jsepsessiondescription.cc
new file mode 100644
index 0000000..bc65ca5
--- /dev/null
+++ b/talk/app/webrtc/jsepsessiondescription.cc
@@ -0,0 +1,193 @@
+/* libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/jsepsessiondescription.h"
+
+#include "talk/app/webrtc/webrtcsdp.h"
+#include "talk/base/stringencode.h"
+#include "talk/session/media/mediasession.h"
+
+using talk_base::scoped_ptr;
+using cricket::SessionDescription;
+
+namespace webrtc {
+
+static const char* kSupportedTypes[] = {
+ JsepSessionDescription::kOffer,
+ JsepSessionDescription::kPrAnswer,
+ JsepSessionDescription::kAnswer
+};
+
+static bool IsTypeSupported(const std::string& type) {
+ bool type_supported = false;
+ for (size_t i = 0; i < ARRAY_SIZE(kSupportedTypes); ++i) {
+ if (kSupportedTypes[i] == type) {
+ type_supported = true;
+ break;
+ }
+ }
+ return type_supported;
+}
+
+const char SessionDescriptionInterface::kOffer[] = "offer";
+const char SessionDescriptionInterface::kPrAnswer[] = "pranswer";
+const char SessionDescriptionInterface::kAnswer[] = "answer";
+
+const int JsepSessionDescription::kDefaultVideoCodecId = 100;
+const int JsepSessionDescription::kDefaultVideoCodecFramerate = 30;
+const char JsepSessionDescription::kDefaultVideoCodecName[] = "VP8";
+const int JsepSessionDescription::kMaxVideoCodecWidth = 1280;
+const int JsepSessionDescription::kMaxVideoCodecHeight = 720;
+const int JsepSessionDescription::kDefaultVideoCodecPreference = 1;
+
+SessionDescriptionInterface* CreateSessionDescription(const std::string& type,
+ const std::string& sdp) {
+ return CreateSessionDescription(type, sdp, NULL);
+}
+
+SessionDescriptionInterface* CreateSessionDescription(const std::string& type,
+ const std::string& sdp,
+ SdpParseError* error) {
+ if (!IsTypeSupported(type)) {
+ return NULL;
+ }
+
+ JsepSessionDescription* jsep_desc = new JsepSessionDescription(type);
+ if (!jsep_desc->Initialize(sdp, error)) {
+ delete jsep_desc;
+ return NULL;
+ }
+ return jsep_desc;
+}
+
+JsepSessionDescription::JsepSessionDescription(const std::string& type)
+ : type_(type) {
+}
+
+JsepSessionDescription::~JsepSessionDescription() {}
+
+bool JsepSessionDescription::Initialize(
+ cricket::SessionDescription* description,
+ const std::string& session_id,
+ const std::string& session_version) {
+ if (!description)
+ return false;
+
+ session_id_ = session_id;
+ session_version_ = session_version;
+ description_.reset(description);
+ candidate_collection_.resize(number_of_mediasections());
+ return true;
+}
+
+bool JsepSessionDescription::Initialize(const std::string& sdp,
+ SdpParseError* error) {
+ return SdpDeserialize(sdp, this, error);
+}
+
+bool JsepSessionDescription::AddCandidate(
+ const IceCandidateInterface* candidate) {
+ if (!candidate || candidate->sdp_mline_index() < 0)
+ return false;
+ size_t mediasection_index = 0;
+ if (!GetMediasectionIndex(candidate, &mediasection_index)) {
+ return false;
+ }
+ if (mediasection_index >= number_of_mediasections())
+ return false;
+ if (candidate_collection_[mediasection_index].HasCandidate(candidate)) {
+ return true; // Silently ignore this candidate if we already have it.
+ }
+ const std::string content_name =
+ description_->contents()[mediasection_index].name;
+ const cricket::TransportInfo* transport_info =
+ description_->GetTransportInfoByName(content_name);
+ if (!transport_info) {
+ return false;
+ }
+
+ cricket::Candidate updated_candidate = candidate->candidate();
+ if (updated_candidate.username().empty()) {
+ updated_candidate.set_username(transport_info->description.ice_ufrag);
+ }
+ if (updated_candidate.password().empty()) {
+ updated_candidate.set_password(transport_info->description.ice_pwd);
+ }
+
+ candidate_collection_[mediasection_index].add(
+ new JsepIceCandidate(candidate->sdp_mid(),
+ mediasection_index,
+ updated_candidate));
+ return true;
+}
+
+size_t JsepSessionDescription::number_of_mediasections() const {
+ if (!description_)
+ return 0;
+ return description_->contents().size();
+}
+
+const IceCandidateCollection* JsepSessionDescription::candidates(
+ size_t mediasection_index) const {
+ if (mediasection_index >= candidate_collection_.size())
+ return NULL;
+ return &candidate_collection_[mediasection_index];
+}
+
+bool JsepSessionDescription::ToString(std::string* out) const {
+ if (!description_ || !out)
+ return false;
+ *out = SdpSerialize(*this);
+ return !out->empty();
+}
+
+bool JsepSessionDescription::GetMediasectionIndex(
+ const IceCandidateInterface* candidate,
+ size_t* index) {
+ if (!candidate || !index) {
+ return false;
+ }
+ *index = static_cast<size_t>(candidate->sdp_mline_index());
+ if (description_ && !candidate->sdp_mid().empty()) {
+ bool found = false;
+ // Try to match the sdp_mid with content name.
+ for (size_t i = 0; i < description_->contents().size(); ++i) {
+ if (candidate->sdp_mid() == description_->contents().at(i).name) {
+ *index = i;
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ // If the sdp_mid is presented but we can't find a match, we consider
+ // this as an error.
+ return false;
+ }
+ }
+ return true;
+}
+
+} // namespace webrtc
diff --git a/talk/app/webrtc/jsepsessiondescription.h b/talk/app/webrtc/jsepsessiondescription.h
new file mode 100644
index 0000000..7ca7a22
--- /dev/null
+++ b/talk/app/webrtc/jsepsessiondescription.h
@@ -0,0 +1,106 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// Implements the SessionDescriptionInterface.
+
+#ifndef TALK_APP_WEBRTC_JSEPSESSIONDESCRIPTION_H_
+#define TALK_APP_WEBRTC_JSEPSESSIONDESCRIPTION_H_
+
+#include <string>
+#include <vector>
+
+#include "talk/app/webrtc/jsep.h"
+#include "talk/app/webrtc/jsepicecandidate.h"
+#include "talk/base/scoped_ptr.h"
+
+namespace cricket {
+class SessionDescription;
+}
+
+namespace webrtc {
+
+class JsepSessionDescription : public SessionDescriptionInterface {
+ public:
+ explicit JsepSessionDescription(const std::string& type);
+ virtual ~JsepSessionDescription();
+
+ // |error| can be NULL if don't care about the failure reason.
+ bool Initialize(const std::string& sdp, SdpParseError* error);
+
+ // Takes ownership of |description|.
+ bool Initialize(cricket::SessionDescription* description,
+ const std::string& session_id,
+ const std::string& session_version);
+
+ virtual cricket::SessionDescription* description() {
+ return description_.get();
+ }
+ virtual const cricket::SessionDescription* description() const {
+ return description_.get();
+ }
+ virtual std::string session_id() const {
+ return session_id_;
+ }
+ virtual std::string session_version() const {
+ return session_version_;
+ }
+ virtual std::string type() const {
+ return type_;
+ }
+ // Allow changing the type. Used for testing.
+ void set_type(const std::string& type) { type_ = type; }
+ virtual bool AddCandidate(const IceCandidateInterface* candidate);
+ virtual size_t number_of_mediasections() const;
+ virtual const IceCandidateCollection* candidates(
+ size_t mediasection_index) const;
+ virtual bool ToString(std::string* out) const;
+
+ // Default video encoder settings. The resolution is the max resolution.
+ // TODO(perkj): Implement proper negotiation of video resolution.
+ static const int kDefaultVideoCodecId;
+ static const int kDefaultVideoCodecFramerate;
+ static const char kDefaultVideoCodecName[];
+ static const int kMaxVideoCodecWidth;
+ static const int kMaxVideoCodecHeight;
+ static const int kDefaultVideoCodecPreference;
+
+ private:
+ talk_base::scoped_ptr<cricket::SessionDescription> description_;
+ std::string session_id_;
+ std::string session_version_;
+ std::string type_;
+ std::vector<JsepCandidateCollection> candidate_collection_;
+
+ bool GetMediasectionIndex(const IceCandidateInterface* candidate,
+ size_t* index);
+
+ DISALLOW_COPY_AND_ASSIGN(JsepSessionDescription);
+};
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_JSEPSESSIONDESCRIPTION_H_
diff --git a/talk/app/webrtc/jsepsessiondescription_unittest.cc b/talk/app/webrtc/jsepsessiondescription_unittest.cc
new file mode 100644
index 0000000..83f67cb
--- /dev/null
+++ b/talk/app/webrtc/jsepsessiondescription_unittest.cc
@@ -0,0 +1,223 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string>
+
+#include "talk/app/webrtc/jsepicecandidate.h"
+#include "talk/app/webrtc/jsepsessiondescription.h"
+#include "talk/base/gunit.h"
+#include "talk/base/helpers.h"
+#include "talk/base/scoped_ptr.h"
+#include "talk/base/stringencode.h"
+#include "talk/p2p/base/candidate.h"
+#include "talk/p2p/base/constants.h"
+#include "talk/p2p/base/sessiondescription.h"
+#include "talk/session/media/mediasession.h"
+
+using webrtc::IceCandidateCollection;
+using webrtc::IceCandidateInterface;
+using webrtc::JsepIceCandidate;
+using webrtc::JsepSessionDescription;
+using webrtc::SessionDescriptionInterface;
+using talk_base::scoped_ptr;
+
+static const char kCandidateUfrag[] = "ufrag";
+static const char kCandidatePwd[] = "pwd";
+static const char kCandidateUfragVoice[] = "ufrag_voice";
+static const char kCandidatePwdVoice[] = "pwd_voice";
+static const char kCandidateUfragVideo[] = "ufrag_video";
+static const char kCandidatePwdVideo[] = "pwd_video";
+
+// This creates a session description with both audio and video media contents.
+// In SDP this is described by two m lines, one audio and one video.
+static cricket::SessionDescription* CreateCricketSessionDescription() {
+ cricket::SessionDescription* desc(new cricket::SessionDescription());
+ // AudioContentDescription
+ scoped_ptr<cricket::AudioContentDescription> audio(
+ new cricket::AudioContentDescription());
+
+ // VideoContentDescription
+ scoped_ptr<cricket::VideoContentDescription> video(
+ new cricket::VideoContentDescription());
+
+ audio->AddCodec(cricket::AudioCodec(103, "ISAC", 16000, 0, 0, 0));
+ desc->AddContent(cricket::CN_AUDIO, cricket::NS_JINGLE_RTP,
+ audio.release());
+
+ video->AddCodec(cricket::VideoCodec(120, "VP8", 640, 480, 30, 0));
+ desc->AddContent(cricket::CN_VIDEO, cricket::NS_JINGLE_RTP,
+ video.release());
+
+ EXPECT_TRUE(desc->AddTransportInfo(
+ cricket::TransportInfo(
+ cricket::CN_AUDIO,
+ cricket::TransportDescription(
+ cricket::NS_GINGLE_P2P,
+ std::vector<std::string>(),
+ kCandidateUfragVoice, kCandidatePwdVoice,
+ cricket::ICEMODE_FULL, NULL,
+ cricket::Candidates()))));
+ EXPECT_TRUE(desc->AddTransportInfo(
+ cricket::TransportInfo(cricket::CN_VIDEO,
+ cricket::TransportDescription(
+ cricket::NS_GINGLE_P2P,
+ std::vector<std::string>(),
+ kCandidateUfragVideo, kCandidatePwdVideo,
+ cricket::ICEMODE_FULL, NULL,
+ cricket::Candidates()))));
+ return desc;
+}
+
+class JsepSessionDescriptionTest : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ int port = 1234;
+ talk_base::SocketAddress address("127.0.0.1", port++);
+ cricket::Candidate candidate("rtp", cricket::ICE_CANDIDATE_COMPONENT_RTP,
+ "udp", address, 1, "",
+ "", "local", "eth0", 0, "1");
+ candidate_ = candidate;
+ const std::string session_id =
+ talk_base::ToString(talk_base::CreateRandomId64());
+ const std::string session_version =
+ talk_base::ToString(talk_base::CreateRandomId());
+ jsep_desc_.reset(new JsepSessionDescription("dummy"));
+ ASSERT_TRUE(jsep_desc_->Initialize(CreateCricketSessionDescription(),
+ session_id, session_version));
+ }
+
+ std::string Serialize(const SessionDescriptionInterface* desc) {
+ std::string sdp;
+ EXPECT_TRUE(desc->ToString(&sdp));
+ EXPECT_FALSE(sdp.empty());
+ return sdp;
+ }
+
+ SessionDescriptionInterface* DeSerialize(const std::string& sdp) {
+ JsepSessionDescription* desc(new JsepSessionDescription("dummy"));
+ EXPECT_TRUE(desc->Initialize(sdp, NULL));
+ return desc;
+ }
+
+ cricket::Candidate candidate_;
+ talk_base::scoped_ptr<JsepSessionDescription> jsep_desc_;
+};
+
+// Test that number_of_mediasections() returns the number of media contents in
+// a session description.
+TEST_F(JsepSessionDescriptionTest, CheckSessionDescription) {
+ EXPECT_EQ(2u, jsep_desc_->number_of_mediasections());
+}
+
+// Test that we can add a candidate to a session description.
+TEST_F(JsepSessionDescriptionTest, AddCandidateWithoutMid) {
+ JsepIceCandidate jsep_candidate("", 0, candidate_);
+ EXPECT_TRUE(jsep_desc_->AddCandidate(&jsep_candidate));
+ const IceCandidateCollection* ice_candidates = jsep_desc_->candidates(0);
+ ASSERT_TRUE(ice_candidates != NULL);
+ EXPECT_EQ(1u, ice_candidates->count());
+ const IceCandidateInterface* ice_candidate = ice_candidates->at(0);
+ ASSERT_TRUE(ice_candidate != NULL);
+ candidate_.set_username(kCandidateUfragVoice);
+ candidate_.set_password(kCandidatePwdVoice);
+ EXPECT_TRUE(ice_candidate->candidate().IsEquivalent(candidate_));
+ EXPECT_EQ(0, ice_candidate->sdp_mline_index());
+ EXPECT_EQ(0u, jsep_desc_->candidates(1)->count());
+}
+
+TEST_F(JsepSessionDescriptionTest, AddCandidateWithMid) {
+ // mid and m-line index don't match, in this case mid is preferred.
+ JsepIceCandidate jsep_candidate("video", 0, candidate_);
+ EXPECT_TRUE(jsep_desc_->AddCandidate(&jsep_candidate));
+ EXPECT_EQ(0u, jsep_desc_->candidates(0)->count());
+ const IceCandidateCollection* ice_candidates = jsep_desc_->candidates(1);
+ ASSERT_TRUE(ice_candidates != NULL);
+ EXPECT_EQ(1u, ice_candidates->count());
+ const IceCandidateInterface* ice_candidate = ice_candidates->at(0);
+ ASSERT_TRUE(ice_candidate != NULL);
+ candidate_.set_username(kCandidateUfragVideo);
+ candidate_.set_password(kCandidatePwdVideo);
+ EXPECT_TRUE(ice_candidate->candidate().IsEquivalent(candidate_));
+ // The mline index should have been updated according to mid.
+ EXPECT_EQ(1, ice_candidate->sdp_mline_index());
+}
+
+TEST_F(JsepSessionDescriptionTest, AddCandidateAlreadyHasUfrag) {
+ candidate_.set_username(kCandidateUfrag);
+ candidate_.set_password(kCandidatePwd);
+ JsepIceCandidate jsep_candidate("audio", 0, candidate_);
+ EXPECT_TRUE(jsep_desc_->AddCandidate(&jsep_candidate));
+ const IceCandidateCollection* ice_candidates = jsep_desc_->candidates(0);
+ ASSERT_TRUE(ice_candidates != NULL);
+ EXPECT_EQ(1u, ice_candidates->count());
+ const IceCandidateInterface* ice_candidate = ice_candidates->at(0);
+ ASSERT_TRUE(ice_candidate != NULL);
+ candidate_.set_username(kCandidateUfrag);
+ candidate_.set_password(kCandidatePwd);
+ EXPECT_TRUE(ice_candidate->candidate().IsEquivalent(candidate_));
+
+ EXPECT_EQ(0u, jsep_desc_->candidates(1)->count());
+}
+
+// Test that we can not add a candidate if there is no corresponding media
+// content in the session description.
+TEST_F(JsepSessionDescriptionTest, AddBadCandidate) {
+ JsepIceCandidate bad_candidate1("", 55, candidate_);
+ EXPECT_FALSE(jsep_desc_->AddCandidate(&bad_candidate1));
+
+ JsepIceCandidate bad_candidate2("some weird mid", 0, candidate_);
+ EXPECT_FALSE(jsep_desc_->AddCandidate(&bad_candidate2));
+}
+
+// Test that we can serialize a JsepSessionDescription and deserialize it again.
+TEST_F(JsepSessionDescriptionTest, SerializeDeserialize) {
+ std::string sdp = Serialize(jsep_desc_.get());
+
+ scoped_ptr<SessionDescriptionInterface> parsed_jsep_desc(DeSerialize(sdp));
+ EXPECT_EQ(2u, parsed_jsep_desc->number_of_mediasections());
+
+ std::string parsed_sdp = Serialize(parsed_jsep_desc.get());
+ EXPECT_EQ(sdp, parsed_sdp);
+}
+
+// Tests that we can serialize and deserialize a JsepSesssionDescription
+// with candidates.
+TEST_F(JsepSessionDescriptionTest, SerializeDeserializeWithCandidates) {
+ std::string sdp = Serialize(jsep_desc_.get());
+
+ // Add a candidate and check that the serialized result is different.
+ JsepIceCandidate jsep_candidate("audio", 0, candidate_);
+ EXPECT_TRUE(jsep_desc_->AddCandidate(&jsep_candidate));
+ std::string sdp_with_candidate = Serialize(jsep_desc_.get());
+ EXPECT_NE(sdp, sdp_with_candidate);
+
+ scoped_ptr<SessionDescriptionInterface> parsed_jsep_desc(
+ DeSerialize(sdp_with_candidate));
+ std::string parsed_sdp_with_candidate = Serialize(parsed_jsep_desc.get());
+
+ EXPECT_EQ(sdp_with_candidate, parsed_sdp_with_candidate);
+}
diff --git a/talk/app/webrtc/localaudiosource.cc b/talk/app/webrtc/localaudiosource.cc
new file mode 100644
index 0000000..9706c07
--- /dev/null
+++ b/talk/app/webrtc/localaudiosource.cc
@@ -0,0 +1,127 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/localaudiosource.h"
+
+#include <vector>
+
+#include "talk/media/base/mediaengine.h"
+#include "talk/app/webrtc/mediaconstraintsinterface.h"
+
+using webrtc::MediaConstraintsInterface;
+using webrtc::MediaSourceInterface;
+
+namespace webrtc {
+
+// Constraint keys.
+// They are declared as static members in mediaconstraintsinterface.h
+const char MediaConstraintsInterface::kEchoCancellation[] =
+ "googEchoCancellation";
+const char MediaConstraintsInterface::kExperimentalEchoCancellation[] =
+ "googEchoCancellation2";
+const char MediaConstraintsInterface::kAutoGainControl[] =
+ "googAutoGainControl";
+const char MediaConstraintsInterface::kExperimentalAutoGainControl[] =
+ "googAutoGainControl2";
+const char MediaConstraintsInterface::kNoiseSuppression[] =
+ "googNoiseSuppression";
+const char MediaConstraintsInterface::kHighpassFilter[] =
+ "googHighpassFilter";
+const char MediaConstraintsInterface::kInternalAecDump[] = "internalAecDump";
+
+namespace {
+
+// Convert constraints to audio options. Return false if constraints are
+// invalid.
+bool FromConstraints(const MediaConstraintsInterface::Constraints& constraints,
+ cricket::AudioOptions* options) {
+ bool success = true;
+ MediaConstraintsInterface::Constraints::const_iterator iter;
+
+ // This design relies on the fact that all the audio constraints are actually
+ // "options", i.e. boolean-valued and always satisfiable. If the constraints
+ // are extended to include non-boolean values or actual format constraints,
+ // a different algorithm will be required.
+ for (iter = constraints.begin(); iter != constraints.end(); ++iter) {
+ bool value = false;
+
+ if (!talk_base::FromString(iter->value, &value)) {
+ success = false;
+ continue;
+ }
+
+ if (iter->key == MediaConstraintsInterface::kEchoCancellation)
+ options->echo_cancellation.Set(value);
+ else if (iter->key ==
+ MediaConstraintsInterface::kExperimentalEchoCancellation)
+ options->experimental_aec.Set(value);
+ else if (iter->key == MediaConstraintsInterface::kAutoGainControl)
+ options->auto_gain_control.Set(value);
+ else if (iter->key ==
+ MediaConstraintsInterface::kExperimentalAutoGainControl)
+ options->experimental_agc.Set(value);
+ else if (iter->key == MediaConstraintsInterface::kNoiseSuppression)
+ options->noise_suppression.Set(value);
+ else if (iter->key == MediaConstraintsInterface::kHighpassFilter)
+ options->highpass_filter.Set(value);
+ else if (iter->key == MediaConstraintsInterface::kInternalAecDump)
+ options->aec_dump.Set(value);
+ else
+ success = false;
+ }
+ return success;
+}
+
+} // namespace
+
+talk_base::scoped_refptr<LocalAudioSource> LocalAudioSource::Create(
+ const MediaConstraintsInterface* constraints) {
+ talk_base::scoped_refptr<LocalAudioSource> source(
+ new talk_base::RefCountedObject<LocalAudioSource>());
+ source->Initialize(constraints);
+ return source;
+}
+
+void LocalAudioSource::Initialize(
+ const MediaConstraintsInterface* constraints) {
+ if (!constraints)
+ return;
+
+ // Apply optional constraints first, they will be overwritten by mandatory
+ // constraints.
+ FromConstraints(constraints->GetOptional(), &options_);
+
+ cricket::AudioOptions options;
+ if (!FromConstraints(constraints->GetMandatory(), &options)) {
+ source_state_ = kEnded;
+ return;
+ }
+ options_.SetAll(options);
+ source_state_ = kLive;
+}
+
+} // namespace webrtc
diff --git a/talk/app/webrtc/localaudiosource.h b/talk/app/webrtc/localaudiosource.h
new file mode 100644
index 0000000..e0fda03
--- /dev/null
+++ b/talk/app/webrtc/localaudiosource.h
@@ -0,0 +1,69 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_APP_WEBRTC_LOCALAUDIOSOURCE_H_
+#define TALK_APP_WEBRTC_LOCALAUDIOSOURCE_H_
+
+#include "talk/app/webrtc/mediastreaminterface.h"
+#include "talk/app/webrtc/notifier.h"
+#include "talk/base/scoped_ptr.h"
+#include "talk/media/base/mediachannel.h"
+
+// LocalAudioSource implements AudioSourceInterface.
+// This contains settings for switching audio processing on and off.
+
+namespace webrtc {
+
+class MediaConstraintsInterface;
+
+class LocalAudioSource : public Notifier<AudioSourceInterface> {
+ public:
+ // Creates an instance of LocalAudioSource.
+ static talk_base::scoped_refptr<LocalAudioSource> Create(
+ const MediaConstraintsInterface* constraints);
+
+ virtual SourceState state() const { return source_state_; }
+ virtual const cricket::AudioOptions& options() const { return options_; }
+
+ protected:
+ LocalAudioSource()
+ : source_state_(kInitializing) {
+ }
+
+ ~LocalAudioSource() {
+ }
+
+ private:
+ void Initialize(const MediaConstraintsInterface* constraints);
+
+ cricket::AudioOptions options_;
+ SourceState source_state_;
+};
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_LOCALAUDIOSOURCE_H_
diff --git a/talk/app/webrtc/localaudiosource_unittest.cc b/talk/app/webrtc/localaudiosource_unittest.cc
new file mode 100644
index 0000000..ae07787
--- /dev/null
+++ b/talk/app/webrtc/localaudiosource_unittest.cc
@@ -0,0 +1,118 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/localaudiosource.h"
+
+#include <string>
+#include <vector>
+
+#include "talk/app/webrtc/test/fakeconstraints.h"
+#include "talk/base/gunit.h"
+#include "talk/media/base/fakemediaengine.h"
+#include "talk/media/base/fakevideorenderer.h"
+#include "talk/media/devices/fakedevicemanager.h"
+
+using webrtc::LocalAudioSource;
+using webrtc::MediaConstraintsInterface;
+using webrtc::MediaSourceInterface;
+
+TEST(LocalAudioSourceTest, SetValidOptions) {
+ webrtc::FakeConstraints constraints;
+ constraints.AddMandatory(MediaConstraintsInterface::kEchoCancellation, false);
+ constraints.AddOptional(
+ MediaConstraintsInterface::kExperimentalEchoCancellation, true);
+ constraints.AddOptional(MediaConstraintsInterface::kAutoGainControl, true);
+ constraints.AddOptional(
+ MediaConstraintsInterface::kExperimentalAutoGainControl, true);
+ constraints.AddMandatory(MediaConstraintsInterface::kNoiseSuppression, false);
+ constraints.AddOptional(MediaConstraintsInterface::kHighpassFilter, true);
+
+ talk_base::scoped_refptr<LocalAudioSource> source =
+ LocalAudioSource::Create(&constraints);
+
+ bool value;
+ EXPECT_TRUE(source->options().echo_cancellation.Get(&value));
+ EXPECT_FALSE(value);
+ EXPECT_TRUE(source->options().experimental_aec.Get(&value));
+ EXPECT_TRUE(value);
+ EXPECT_TRUE(source->options().auto_gain_control.Get(&value));
+ EXPECT_TRUE(value);
+ EXPECT_TRUE(source->options().experimental_agc.Get(&value));
+ EXPECT_TRUE(value);
+ EXPECT_TRUE(source->options().noise_suppression.Get(&value));
+ EXPECT_FALSE(value);
+ EXPECT_TRUE(source->options().highpass_filter.Get(&value));
+ EXPECT_TRUE(value);
+}
+
+TEST(LocalAudioSourceTest, OptionNotSet) {
+ webrtc::FakeConstraints constraints;
+ talk_base::scoped_refptr<LocalAudioSource> source =
+ LocalAudioSource::Create(&constraints);
+ bool value;
+ EXPECT_FALSE(source->options().highpass_filter.Get(&value));
+}
+
+TEST(LocalAudioSourceTest, MandatoryOverridesOptional) {
+ webrtc::FakeConstraints constraints;
+ constraints.AddMandatory(MediaConstraintsInterface::kEchoCancellation, false);
+ constraints.AddOptional(MediaConstraintsInterface::kEchoCancellation, true);
+
+ talk_base::scoped_refptr<LocalAudioSource> source =
+ LocalAudioSource::Create(&constraints);
+
+ bool value;
+ EXPECT_TRUE(source->options().echo_cancellation.Get(&value));
+ EXPECT_FALSE(value);
+}
+
+TEST(LocalAudioSourceTest, InvalidOptional) {
+ webrtc::FakeConstraints constraints;
+ constraints.AddOptional(MediaConstraintsInterface::kHighpassFilter, false);
+ constraints.AddOptional("invalidKey", false);
+
+ talk_base::scoped_refptr<LocalAudioSource> source =
+ LocalAudioSource::Create(&constraints);
+
+ EXPECT_EQ(MediaSourceInterface::kLive, source->state());
+ bool value;
+ EXPECT_TRUE(source->options().highpass_filter.Get(&value));
+ EXPECT_FALSE(value);
+}
+
+TEST(LocalAudioSourceTest, InvalidMandatory) {
+ webrtc::FakeConstraints constraints;
+ constraints.AddMandatory(MediaConstraintsInterface::kHighpassFilter, false);
+ constraints.AddMandatory("invalidKey", false);
+
+ talk_base::scoped_refptr<LocalAudioSource> source =
+ LocalAudioSource::Create(&constraints);
+
+ EXPECT_EQ(MediaSourceInterface::kEnded, source->state());
+ bool value;
+ EXPECT_FALSE(source->options().highpass_filter.Get(&value));
+}
diff --git a/talk/app/webrtc/localvideosource.cc b/talk/app/webrtc/localvideosource.cc
new file mode 100644
index 0000000..2d43885
--- /dev/null
+++ b/talk/app/webrtc/localvideosource.cc
@@ -0,0 +1,442 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/localvideosource.h"
+
+#include <vector>
+
+#include "talk/app/webrtc/mediaconstraintsinterface.h"
+#include "talk/session/media/channelmanager.h"
+
+using cricket::CaptureState;
+using webrtc::MediaConstraintsInterface;
+using webrtc::MediaSourceInterface;
+
+namespace webrtc {
+
+// Constraint keys. Specified by draft-alvestrand-constraints-resolution-00b
+// They are declared as static members in mediastreaminterface.h
+const char MediaConstraintsInterface::kMinAspectRatio[] = "minAspectRatio";
+const char MediaConstraintsInterface::kMaxAspectRatio[] = "maxAspectRatio";
+const char MediaConstraintsInterface::kMaxWidth[] = "maxWidth";
+const char MediaConstraintsInterface::kMinWidth[] = "minWidth";
+const char MediaConstraintsInterface::kMaxHeight[] = "maxHeight";
+const char MediaConstraintsInterface::kMinHeight[] = "minHeight";
+const char MediaConstraintsInterface::kMaxFrameRate[] = "maxFrameRate";
+const char MediaConstraintsInterface::kMinFrameRate[] = "minFrameRate";
+
+// Google-specific keys
+const char MediaConstraintsInterface::kNoiseReduction[] = "googNoiseReduction";
+const char MediaConstraintsInterface::kLeakyBucket[] = "googLeakyBucket";
+const char MediaConstraintsInterface::kTemporalLayeredScreencast[] =
+ "googTemporalLayeredScreencast";
+
+} // namespace webrtc
+
+namespace {
+
+const double kRoundingTruncation = 0.0005;
+
+enum {
+ MSG_VIDEOCAPTURESTATECONNECT,
+ MSG_VIDEOCAPTURESTATEDISCONNECT,
+ MSG_VIDEOCAPTURESTATECHANGE,
+};
+
+// Default resolution. If no constraint is specified, this is the resolution we
+// will use.
+static const cricket::VideoFormatPod kDefaultResolution =
+ {640, 480, FPS_TO_INTERVAL(30), cricket::FOURCC_ANY};
+
+// List of formats used if the camera doesn't support capability enumeration.
+static const cricket::VideoFormatPod kVideoFormats[] = {
+ {1920, 1080, FPS_TO_INTERVAL(30), cricket::FOURCC_ANY},
+ {1280, 720, FPS_TO_INTERVAL(30), cricket::FOURCC_ANY},
+ {960, 720, FPS_TO_INTERVAL(30), cricket::FOURCC_ANY},
+ {640, 360, FPS_TO_INTERVAL(30), cricket::FOURCC_ANY},
+ {640, 480, FPS_TO_INTERVAL(30), cricket::FOURCC_ANY},
+ {320, 240, FPS_TO_INTERVAL(30), cricket::FOURCC_ANY},
+ {320, 180, FPS_TO_INTERVAL(30), cricket::FOURCC_ANY}
+};
+
+MediaSourceInterface::SourceState
+GetReadyState(cricket::CaptureState state) {
+ switch (state) {
+ case cricket::CS_STARTING:
+ return MediaSourceInterface::kInitializing;
+ case cricket::CS_RUNNING:
+ return MediaSourceInterface::kLive;
+ case cricket::CS_FAILED:
+ case cricket::CS_NO_DEVICE:
+ case cricket::CS_STOPPED:
+ return MediaSourceInterface::kEnded;
+ case cricket::CS_PAUSED:
+ return MediaSourceInterface::kMuted;
+ default:
+ ASSERT(false && "GetReadyState unknown state");
+ }
+ return MediaSourceInterface::kEnded;
+}
+
+void SetUpperLimit(int new_limit, int* original_limit) {
+ if (*original_limit < 0 || new_limit < *original_limit)
+ *original_limit = new_limit;
+}
+
+// Updates |format_upper_limit| from |constraint|.
+// If constraint.maxFoo is smaller than format_upper_limit.foo,
+// set format_upper_limit.foo to constraint.maxFoo.
+void SetUpperLimitFromConstraint(
+ const MediaConstraintsInterface::Constraint& constraint,
+ cricket::VideoFormat* format_upper_limit) {
+ if (constraint.key == MediaConstraintsInterface::kMaxWidth) {
+ int value = talk_base::FromString<int>(constraint.value);
+ SetUpperLimit(value, &(format_upper_limit->width));
+ } else if (constraint.key == MediaConstraintsInterface::kMaxHeight) {
+ int value = talk_base::FromString<int>(constraint.value);
+ SetUpperLimit(value, &(format_upper_limit->height));
+ }
+}
+
+// Fills |format_out| with the max width and height allowed by |constraints|.
+void FromConstraintsForScreencast(
+ const MediaConstraintsInterface::Constraints& constraints,
+ cricket::VideoFormat* format_out) {
+ typedef MediaConstraintsInterface::Constraints::const_iterator
+ ConstraintsIterator;
+
+ cricket::VideoFormat upper_limit(-1, -1, 0, 0);
+ for (ConstraintsIterator constraints_it = constraints.begin();
+ constraints_it != constraints.end(); ++constraints_it)
+ SetUpperLimitFromConstraint(*constraints_it, &upper_limit);
+
+ if (upper_limit.width >= 0)
+ format_out->width = upper_limit.width;
+ if (upper_limit.height >= 0)
+ format_out->height = upper_limit.height;
+}
+
+// Returns true if |constraint| is fulfilled. |format_out| can differ from
+// |format_in| if the format is changed by the constraint. Ie - the frame rate
+// can be changed by setting maxFrameRate.
+bool NewFormatWithConstraints(
+ const MediaConstraintsInterface::Constraint& constraint,
+ const cricket::VideoFormat& format_in,
+ bool mandatory,
+ cricket::VideoFormat* format_out) {
+ ASSERT(format_out != NULL);
+ *format_out = format_in;
+
+ if (constraint.key == MediaConstraintsInterface::kMinWidth) {
+ int value = talk_base::FromString<int>(constraint.value);
+ return (value <= format_in.width);
+ } else if (constraint.key == MediaConstraintsInterface::kMaxWidth) {
+ int value = talk_base::FromString<int>(constraint.value);
+ return (value >= format_in.width);
+ } else if (constraint.key == MediaConstraintsInterface::kMinHeight) {
+ int value = talk_base::FromString<int>(constraint.value);
+ return (value <= format_in.height);
+ } else if (constraint.key == MediaConstraintsInterface::kMaxHeight) {
+ int value = talk_base::FromString<int>(constraint.value);
+ return (value >= format_in.height);
+ } else if (constraint.key == MediaConstraintsInterface::kMinFrameRate) {
+ int value = talk_base::FromString<int>(constraint.value);
+ return (value <= cricket::VideoFormat::IntervalToFps(format_in.interval));
+ } else if (constraint.key == MediaConstraintsInterface::kMaxFrameRate) {
+ int value = talk_base::FromString<int>(constraint.value);
+ if (value == 0) {
+ if (mandatory) {
+ // TODO(ronghuawu): Convert the constraint value to float when sub-1fps
+ // is supported by the capturer.
+ return false;
+ } else {
+ value = 1;
+ }
+ }
+ if (value <= cricket::VideoFormat::IntervalToFps(format_in.interval)) {
+ format_out->interval = cricket::VideoFormat::FpsToInterval(value);
+ return true;
+ } else {
+ return false;
+ }
+ } else if (constraint.key == MediaConstraintsInterface::kMinAspectRatio) {
+ double value = talk_base::FromString<double>(constraint.value);
+ // The aspect ratio in |constraint.value| has been converted to a string and
+ // back to a double, so it may have a rounding error.
+ // E.g if the value 1/3 is converted to a string, the string will not have
+ // infinite length.
+ // We add a margin of 0.0005 which is high enough to detect the same aspect
+ // ratio but small enough to avoid matching wrong aspect ratios.
+ double ratio = static_cast<double>(format_in.width) / format_in.height;
+ return (value <= ratio + kRoundingTruncation);
+ } else if (constraint.key == MediaConstraintsInterface::kMaxAspectRatio) {
+ double value = talk_base::FromString<double>(constraint.value);
+ double ratio = static_cast<double>(format_in.width) / format_in.height;
+ // Subtract 0.0005 to avoid rounding problems. Same as above.
+ const double kRoundingTruncation = 0.0005;
+ return (value >= ratio - kRoundingTruncation);
+ } else if (constraint.key == MediaConstraintsInterface::kNoiseReduction ||
+ constraint.key == MediaConstraintsInterface::kLeakyBucket ||
+ constraint.key ==
+ MediaConstraintsInterface::kTemporalLayeredScreencast) {
+ // These are actually options, not constraints, so they can be satisfied
+ // regardless of the format.
+ return true;
+ }
+ LOG(LS_WARNING) << "Found unknown MediaStream constraint. Name:"
+ << constraint.key << " Value:" << constraint.value;
+ return false;
+}
+
+// Removes cricket::VideoFormats from |formats| that don't meet |constraint|.
+void FilterFormatsByConstraint(
+ const MediaConstraintsInterface::Constraint& constraint,
+ bool mandatory,
+ std::vector<cricket::VideoFormat>* formats) {
+ std::vector<cricket::VideoFormat>::iterator format_it =
+ formats->begin();
+ while (format_it != formats->end()) {
+ // Modify the format_it to fulfill the constraint if possible.
+ // Delete it otherwise.
+ if (!NewFormatWithConstraints(constraint, (*format_it),
+ mandatory, &(*format_it))) {
+ format_it = formats->erase(format_it);
+ } else {
+ ++format_it;
+ }
+ }
+}
+
+// Returns a vector of cricket::VideoFormat that best match |constraints|.
+std::vector<cricket::VideoFormat> FilterFormats(
+ const MediaConstraintsInterface::Constraints& mandatory,
+ const MediaConstraintsInterface::Constraints& optional,
+ const std::vector<cricket::VideoFormat>& supported_formats) {
+ typedef MediaConstraintsInterface::Constraints::const_iterator
+ ConstraintsIterator;
+ std::vector<cricket::VideoFormat> candidates = supported_formats;
+
+ for (ConstraintsIterator constraints_it = mandatory.begin();
+ constraints_it != mandatory.end(); ++constraints_it)
+ FilterFormatsByConstraint(*constraints_it, true, &candidates);
+
+ if (candidates.size() == 0)
+ return candidates;
+
+ // Ok - all mandatory checked and we still have a candidate.
+ // Let's try filtering using the optional constraints.
+ for (ConstraintsIterator constraints_it = optional.begin();
+ constraints_it != optional.end(); ++constraints_it) {
+ std::vector<cricket::VideoFormat> current_candidates = candidates;
+ FilterFormatsByConstraint(*constraints_it, false, ¤t_candidates);
+ if (current_candidates.size() > 0) {
+ candidates = current_candidates;
+ }
+ }
+
+ // We have done as good as we can to filter the supported resolutions.
+ return candidates;
+}
+
+// Find the format that best matches the default video size.
+// Constraints are optional and since the performance of a video call
+// might be bad due to bitrate limitations, CPU, and camera performance,
+// it is better to select a resolution that is as close as possible to our
+// default and still meets the contraints.
+const cricket::VideoFormat& GetBestCaptureFormat(
+ const std::vector<cricket::VideoFormat>& formats) {
+ ASSERT(formats.size() > 0);
+
+ int default_area = kDefaultResolution.width * kDefaultResolution.height;
+
+ std::vector<cricket::VideoFormat>::const_iterator it = formats.begin();
+ std::vector<cricket::VideoFormat>::const_iterator best_it = formats.begin();
+ int best_diff = abs(default_area - it->width* it->height);
+ for (; it != formats.end(); ++it) {
+ int diff = abs(default_area - it->width* it->height);
+ if (diff < best_diff) {
+ best_diff = diff;
+ best_it = it;
+ }
+ }
+ return *best_it;
+}
+
+// Set |option| to the highest-priority value of |key| in the constraints.
+// Return false if the key is mandatory, and the value is invalid.
+bool ExtractOption(const MediaConstraintsInterface* all_constraints,
+ const std::string& key, cricket::Settable<bool>* option) {
+ size_t mandatory = 0;
+ bool value;
+ if (FindConstraint(all_constraints, key, &value, &mandatory)) {
+ option->Set(value);
+ return true;
+ }
+
+ return mandatory == 0;
+}
+
+// Search |all_constraints| for known video options. Apply all options that are
+// found with valid values, and return false if any mandatory video option was
+// found with an invalid value.
+bool ExtractVideoOptions(const MediaConstraintsInterface* all_constraints,
+ cricket::VideoOptions* options) {
+ bool all_valid = true;
+
+ all_valid &= ExtractOption(all_constraints,
+ MediaConstraintsInterface::kNoiseReduction,
+ &(options->video_noise_reduction));
+ all_valid &= ExtractOption(all_constraints,
+ MediaConstraintsInterface::kLeakyBucket,
+ &(options->video_leaky_bucket));
+ all_valid &= ExtractOption(all_constraints,
+ MediaConstraintsInterface::kTemporalLayeredScreencast,
+ &(options->video_temporal_layer_screencast));
+
+ return all_valid;
+}
+
+} // anonymous namespace
+
+namespace webrtc {
+
+talk_base::scoped_refptr<LocalVideoSource> LocalVideoSource::Create(
+ cricket::ChannelManager* channel_manager,
+ cricket::VideoCapturer* capturer,
+ const webrtc::MediaConstraintsInterface* constraints) {
+ ASSERT(channel_manager != NULL);
+ ASSERT(capturer != NULL);
+ talk_base::scoped_refptr<LocalVideoSource> source(
+ new talk_base::RefCountedObject<LocalVideoSource>(channel_manager,
+ capturer));
+ source->Initialize(constraints);
+ return source;
+}
+
+LocalVideoSource::LocalVideoSource(cricket::ChannelManager* channel_manager,
+ cricket::VideoCapturer* capturer)
+ : channel_manager_(channel_manager),
+ video_capturer_(capturer),
+ state_(kInitializing) {
+ channel_manager_->SignalVideoCaptureStateChange.connect(
+ this, &LocalVideoSource::OnStateChange);
+}
+
+LocalVideoSource::~LocalVideoSource() {
+ channel_manager_->StopVideoCapture(video_capturer_.get(), format_);
+ channel_manager_->SignalVideoCaptureStateChange.disconnect(this);
+}
+
+void LocalVideoSource::Initialize(
+ const webrtc::MediaConstraintsInterface* constraints) {
+
+ std::vector<cricket::VideoFormat> formats;
+ if (video_capturer_->GetSupportedFormats() &&
+ video_capturer_->GetSupportedFormats()->size() > 0) {
+ formats = *video_capturer_->GetSupportedFormats();
+ } else if (video_capturer_->IsScreencast()) {
+ // The screen capturer can accept any resolution and we will derive the
+ // format from the constraints if any.
+ // Note that this only affects tab capturing, not desktop capturing,
+ // since desktop capturer does not respect the VideoFormat passed in.
+ formats.push_back(cricket::VideoFormat(kDefaultResolution));
+ } else {
+ // The VideoCapturer implementation doesn't support capability enumeration.
+ // We need to guess what the camera support.
+ for (int i = 0; i < ARRAY_SIZE(kVideoFormats); ++i) {
+ formats.push_back(cricket::VideoFormat(kVideoFormats[i]));
+ }
+ }
+
+ if (constraints) {
+ MediaConstraintsInterface::Constraints mandatory_constraints =
+ constraints->GetMandatory();
+ MediaConstraintsInterface::Constraints optional_constraints;
+ optional_constraints = constraints->GetOptional();
+
+ if (video_capturer_->IsScreencast()) {
+ // Use the maxWidth and maxHeight allowed by constraints for screencast.
+ FromConstraintsForScreencast(mandatory_constraints, &(formats[0]));
+ }
+
+ formats = FilterFormats(mandatory_constraints, optional_constraints,
+ formats);
+ }
+
+ if (formats.size() == 0) {
+ LOG(LS_WARNING) << "Failed to find a suitable video format.";
+ SetState(kEnded);
+ return;
+ }
+
+ cricket::VideoOptions options;
+ if (!ExtractVideoOptions(constraints, &options)) {
+ LOG(LS_WARNING) << "Could not satisfy mandatory options.";
+ SetState(kEnded);
+ return;
+ }
+ options_.SetAll(options);
+
+ format_ = GetBestCaptureFormat(formats);
+ // Start the camera with our best guess.
+ // TODO(perkj): Should we try again with another format it it turns out that
+ // the camera doesn't produce frames with the correct format? Or will
+ // cricket::VideCapturer be able to re-scale / crop to the requested
+ // resolution?
+ if (!channel_manager_->StartVideoCapture(video_capturer_.get(), format_)) {
+ SetState(kEnded);
+ return;
+ }
+ // Initialize hasn't succeeded until a successful state change has occurred.
+}
+
+void LocalVideoSource::AddSink(cricket::VideoRenderer* output) {
+ channel_manager_->AddVideoRenderer(video_capturer_.get(), output);
+}
+
+void LocalVideoSource::RemoveSink(cricket::VideoRenderer* output) {
+ channel_manager_->RemoveVideoRenderer(video_capturer_.get(), output);
+}
+
+// OnStateChange listens to the ChannelManager::SignalVideoCaptureStateChange.
+// This signal is triggered for all video capturers. Not only the one we are
+// interested in.
+void LocalVideoSource::OnStateChange(cricket::VideoCapturer* capturer,
+ cricket::CaptureState capture_state) {
+ if (capturer == video_capturer_.get()) {
+ SetState(GetReadyState(capture_state));
+ }
+}
+
+void LocalVideoSource::SetState(SourceState new_state) {
+ if (VERIFY(state_ != new_state)) {
+ state_ = new_state;
+ FireOnChanged();
+ }
+}
+
+} // namespace webrtc
diff --git a/talk/app/webrtc/localvideosource.h b/talk/app/webrtc/localvideosource.h
new file mode 100644
index 0000000..0a3bac0
--- /dev/null
+++ b/talk/app/webrtc/localvideosource.h
@@ -0,0 +1,100 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_APP_WEBRTC_LOCALVIDEOSOURCE_H_
+#define TALK_APP_WEBRTC_LOCALVIDEOSOURCE_H_
+
+#include "talk/app/webrtc/mediastreaminterface.h"
+#include "talk/app/webrtc/notifier.h"
+#include "talk/app/webrtc/videosourceinterface.h"
+#include "talk/base/scoped_ptr.h"
+#include "talk/base/sigslot.h"
+#include "talk/media/base/videocapturer.h"
+#include "talk/media/base/videocommon.h"
+
+// LocalVideoSource implements VideoSourceInterface. It owns a
+// cricket::VideoCapturer and make sure the camera is started at a resolution
+// that honors the constraints.
+// The state is set depending on the result of starting the capturer.
+// If the constraint can't be met or the capturer fails to start, the state
+// transition to kEnded, otherwise it transitions to kLive.
+
+namespace cricket {
+
+class ChannelManager;
+
+} // namespace cricket
+
+namespace webrtc {
+
+class MediaConstraintsInterface;
+
+class LocalVideoSource : public Notifier<VideoSourceInterface>,
+ public sigslot::has_slots<> {
+ public:
+ // Creates an instance of LocalVideoSource.
+ // LocalVideoSource take ownership of |capturer|.
+ // |constraints| can be NULL and in that case the camera is opened using a
+ // default resolution.
+ static talk_base::scoped_refptr<LocalVideoSource> Create(
+ cricket::ChannelManager* channel_manager,
+ cricket::VideoCapturer* capturer,
+ const webrtc::MediaConstraintsInterface* constraints);
+
+ virtual SourceState state() const { return state_; }
+ virtual const cricket::VideoOptions* options() const { return &options_; }
+
+ virtual cricket::VideoCapturer* GetVideoCapturer() {
+ return video_capturer_.get();
+ }
+ // |output| will be served video frames as long as the underlying capturer
+ // is running video frames.
+ virtual void AddSink(cricket::VideoRenderer* output);
+ virtual void RemoveSink(cricket::VideoRenderer* output);
+
+ protected:
+ LocalVideoSource(cricket::ChannelManager* channel_manager,
+ cricket::VideoCapturer* capturer);
+ ~LocalVideoSource();
+
+ private:
+ void Initialize(const webrtc::MediaConstraintsInterface* constraints);
+ void OnStateChange(cricket::VideoCapturer* capturer,
+ cricket::CaptureState capture_state);
+ void SetState(SourceState new_state);
+
+ cricket::ChannelManager* channel_manager_;
+ talk_base::scoped_ptr<cricket::VideoCapturer> video_capturer_;
+
+ cricket::VideoFormat format_;
+ cricket::VideoOptions options_;
+ SourceState state_;
+};
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_LOCALVIDEOSOURCE_H_
diff --git a/talk/app/webrtc/localvideosource_unittest.cc b/talk/app/webrtc/localvideosource_unittest.cc
new file mode 100644
index 0000000..24a8588
--- /dev/null
+++ b/talk/app/webrtc/localvideosource_unittest.cc
@@ -0,0 +1,523 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/localvideosource.h"
+
+#include <string>
+#include <vector>
+
+#include "talk/app/webrtc/test/fakeconstraints.h"
+#include "talk/base/gunit.h"
+#include "talk/media/base/fakemediaengine.h"
+#include "talk/media/base/fakevideorenderer.h"
+#include "talk/media/devices/fakedevicemanager.h"
+#include "talk/session/media/channelmanager.h"
+
+using webrtc::FakeConstraints;
+using webrtc::LocalVideoSource;
+using webrtc::MediaConstraintsInterface;
+using webrtc::MediaSourceInterface;
+using webrtc::ObserverInterface;
+using webrtc::VideoSourceInterface;
+
+namespace {
+
+// Max wait time for a test.
+const int kMaxWaitMs = 100;
+
+} // anonymous namespace
+
+
+// TestVideoCapturer extends cricket::FakeVideoCapturer so it can be used for
+// testing without known camera formats.
+// It keeps its own lists of cricket::VideoFormats for the unit tests in this
+// file.
+class TestVideoCapturer : public cricket::FakeVideoCapturer {
+ public:
+ TestVideoCapturer() : test_without_formats_(false) {
+ std::vector<cricket::VideoFormat> formats;
+ formats.push_back(cricket::VideoFormat(1280, 720,
+ cricket::VideoFormat::FpsToInterval(30), cricket::FOURCC_I420));
+ formats.push_back(cricket::VideoFormat(640, 480,
+ cricket::VideoFormat::FpsToInterval(30), cricket::FOURCC_I420));
+ formats.push_back(cricket::VideoFormat(640, 400,
+ cricket::VideoFormat::FpsToInterval(30), cricket::FOURCC_I420));
+ formats.push_back(cricket::VideoFormat(320, 240,
+ cricket::VideoFormat::FpsToInterval(30), cricket::FOURCC_I420));
+ formats.push_back(cricket::VideoFormat(352, 288,
+ cricket::VideoFormat::FpsToInterval(30), cricket::FOURCC_I420));
+ ResetSupportedFormats(formats);
+ }
+
+ // This function is used for resetting the supported capture formats and
+ // simulating a cricket::VideoCapturer implementation that don't support
+ // capture format enumeration. This is used to simulate the current
+ // Chrome implementation.
+ void TestWithoutCameraFormats() {
+ test_without_formats_ = true;
+ std::vector<cricket::VideoFormat> formats;
+ ResetSupportedFormats(formats);
+ }
+
+ virtual cricket::CaptureState Start(
+ const cricket::VideoFormat& capture_format) {
+ if (test_without_formats_) {
+ std::vector<cricket::VideoFormat> formats;
+ formats.push_back(capture_format);
+ ResetSupportedFormats(formats);
+ }
+ return FakeVideoCapturer::Start(capture_format);
+ }
+
+ virtual bool GetBestCaptureFormat(const cricket::VideoFormat& desired,
+ cricket::VideoFormat* best_format) {
+ if (test_without_formats_) {
+ *best_format = desired;
+ return true;
+ }
+ return FakeVideoCapturer::GetBestCaptureFormat(desired,
+ best_format);
+ }
+
+ private:
+ bool test_without_formats_;
+};
+
+class StateObserver : public ObserverInterface {
+ public:
+ explicit StateObserver(VideoSourceInterface* source)
+ : state_(source->state()),
+ source_(source) {
+ }
+ virtual void OnChanged() {
+ state_ = source_->state();
+ }
+ MediaSourceInterface::SourceState state() const { return state_; }
+
+ private:
+ MediaSourceInterface::SourceState state_;
+ talk_base::scoped_refptr<VideoSourceInterface> source_;
+};
+
+class LocalVideoSourceTest : public testing::Test {
+ protected:
+ LocalVideoSourceTest()
+ : channel_manager_(new cricket::ChannelManager(
+ new cricket::FakeMediaEngine(),
+ new cricket::FakeDeviceManager(), talk_base::Thread::Current())) {
+ }
+
+ void SetUp() {
+ ASSERT_TRUE(channel_manager_->Init());
+ capturer_ = new TestVideoCapturer();
+ }
+
+ void CreateLocalVideoSource() {
+ CreateLocalVideoSource(NULL);
+ }
+
+ void CreateLocalVideoSource(
+ const webrtc::MediaConstraintsInterface* constraints) {
+ // VideoSource take ownership of |capturer_|
+ local_source_ = LocalVideoSource::Create(channel_manager_.get(),
+ capturer_,
+ constraints);
+
+ ASSERT_TRUE(local_source_.get() != NULL);
+ EXPECT_EQ(capturer_, local_source_->GetVideoCapturer());
+
+ state_observer_.reset(new StateObserver(local_source_));
+ local_source_->RegisterObserver(state_observer_.get());
+ local_source_->AddSink(&renderer_);
+ }
+
+ TestVideoCapturer* capturer_; // Raw pointer. Owned by local_source_.
+ cricket::FakeVideoRenderer renderer_;
+ talk_base::scoped_ptr<cricket::ChannelManager> channel_manager_;
+ talk_base::scoped_ptr<StateObserver> state_observer_;
+ talk_base::scoped_refptr<LocalVideoSource> local_source_;
+};
+
+
+// Test that a LocalVideoSource transition to kLive state when the capture
+// device have started and kEnded if it is stopped.
+// It also test that an output can receive video frames.
+TEST_F(LocalVideoSourceTest, StartStop) {
+ // Initialize without constraints.
+ CreateLocalVideoSource();
+ EXPECT_EQ_WAIT(MediaSourceInterface::kLive, state_observer_->state(),
+ kMaxWaitMs);
+
+ ASSERT_TRUE(capturer_->CaptureFrame());
+ EXPECT_EQ(1, renderer_.num_rendered_frames());
+
+ capturer_->Stop();
+ EXPECT_EQ_WAIT(MediaSourceInterface::kEnded, state_observer_->state(),
+ kMaxWaitMs);
+}
+
+// Test that a LocalVideoSource transition to kEnded if the capture device
+// fails.
+TEST_F(LocalVideoSourceTest, CameraFailed) {
+ CreateLocalVideoSource();
+ EXPECT_EQ_WAIT(MediaSourceInterface::kLive, state_observer_->state(),
+ kMaxWaitMs);
+
+ capturer_->SignalStateChange(capturer_, cricket::CS_FAILED);
+ EXPECT_EQ_WAIT(MediaSourceInterface::kEnded, state_observer_->state(),
+ kMaxWaitMs);
+}
+
+// Test that the capture output is CIF if we set max constraints to CIF.
+// and the capture device support CIF.
+TEST_F(LocalVideoSourceTest, MandatoryConstraintCif5Fps) {
+ FakeConstraints constraints;
+ constraints.AddMandatory(MediaConstraintsInterface::kMaxWidth, 352);
+ constraints.AddMandatory(MediaConstraintsInterface::kMaxHeight, 288);
+ constraints.AddMandatory(MediaConstraintsInterface::kMaxFrameRate, 5);
+
+ CreateLocalVideoSource(&constraints);
+ EXPECT_EQ_WAIT(MediaSourceInterface::kLive, state_observer_->state(),
+ kMaxWaitMs);
+ const cricket::VideoFormat* format = capturer_->GetCaptureFormat();
+ ASSERT_TRUE(format != NULL);
+ EXPECT_EQ(352, format->width);
+ EXPECT_EQ(288, format->height);
+ EXPECT_EQ(5, format->framerate());
+}
+
+// Test that the capture output is 720P if the camera support it and the
+// optional constraint is set to 720P.
+TEST_F(LocalVideoSourceTest, MandatoryMinVgaOptional720P) {
+ FakeConstraints constraints;
+ constraints.AddMandatory(MediaConstraintsInterface::kMinWidth, 640);
+ constraints.AddMandatory(MediaConstraintsInterface::kMinHeight, 480);
+ constraints.AddOptional(MediaConstraintsInterface::kMinWidth, 1280);
+ constraints.AddOptional(MediaConstraintsInterface::kMinAspectRatio,
+ 1280.0 / 720);
+
+ CreateLocalVideoSource(&constraints);
+ EXPECT_EQ_WAIT(MediaSourceInterface::kLive, state_observer_->state(),
+ kMaxWaitMs);
+ const cricket::VideoFormat* format = capturer_->GetCaptureFormat();
+ ASSERT_TRUE(format != NULL);
+ EXPECT_EQ(1280, format->width);
+ EXPECT_EQ(720, format->height);
+ EXPECT_EQ(30, format->framerate());
+}
+
+// Test that the capture output have aspect ratio 4:3 if a mandatory constraint
+// require it even if an optional constraint request a higher resolution
+// that don't have this aspect ratio.
+TEST_F(LocalVideoSourceTest, MandatoryAspectRatio4To3) {
+ FakeConstraints constraints;
+ constraints.AddMandatory(MediaConstraintsInterface::kMinWidth, 640);
+ constraints.AddMandatory(MediaConstraintsInterface::kMinHeight, 480);
+ constraints.AddMandatory(MediaConstraintsInterface::kMaxAspectRatio,
+ 640.0 / 480);
+ constraints.AddOptional(MediaConstraintsInterface::kMinWidth, 1280);
+
+ CreateLocalVideoSource(&constraints);
+ EXPECT_EQ_WAIT(MediaSourceInterface::kLive, state_observer_->state(),
+ kMaxWaitMs);
+ const cricket::VideoFormat* format = capturer_->GetCaptureFormat();
+ ASSERT_TRUE(format != NULL);
+ EXPECT_EQ(640, format->width);
+ EXPECT_EQ(480, format->height);
+ EXPECT_EQ(30, format->framerate());
+}
+
+
+// Test that the source state transition to kEnded if the mandatory aspect ratio
+// is set higher than supported.
+TEST_F(LocalVideoSourceTest, MandatoryAspectRatioTooHigh) {
+ FakeConstraints constraints;
+ constraints.AddMandatory(MediaConstraintsInterface::kMinAspectRatio, 2);
+ CreateLocalVideoSource(&constraints);
+ EXPECT_EQ_WAIT(MediaSourceInterface::kEnded, state_observer_->state(),
+ kMaxWaitMs);
+}
+
+// Test that the source ignores an optional aspect ratio that is higher than
+// supported.
+TEST_F(LocalVideoSourceTest, OptionalAspectRatioTooHigh) {
+ FakeConstraints constraints;
+ constraints.AddOptional(MediaConstraintsInterface::kMinAspectRatio, 2);
+ CreateLocalVideoSource(&constraints);
+ EXPECT_EQ_WAIT(MediaSourceInterface::kLive, state_observer_->state(),
+ kMaxWaitMs);
+ const cricket::VideoFormat* format = capturer_->GetCaptureFormat();
+ ASSERT_TRUE(format != NULL);
+ double aspect_ratio = static_cast<double>(format->width) / format->height;
+ EXPECT_LT(aspect_ratio, 2);
+}
+
+// Test that the source starts video with the default resolution if the
+// camera doesn't support capability enumeration and there are no constraints.
+TEST_F(LocalVideoSourceTest, NoCameraCapability) {
+ capturer_->TestWithoutCameraFormats();
+
+ CreateLocalVideoSource();
+ EXPECT_EQ_WAIT(MediaSourceInterface::kLive, state_observer_->state(),
+ kMaxWaitMs);
+ const cricket::VideoFormat* format = capturer_->GetCaptureFormat();
+ ASSERT_TRUE(format != NULL);
+ EXPECT_EQ(640, format->width);
+ EXPECT_EQ(480, format->height);
+ EXPECT_EQ(30, format->framerate());
+}
+
+// Test that the source can start the video and get the requested aspect ratio
+// if the camera doesn't support capability enumeration and the aspect ratio is
+// set.
+TEST_F(LocalVideoSourceTest, NoCameraCapability16To9Ratio) {
+ capturer_->TestWithoutCameraFormats();
+
+ FakeConstraints constraints;
+ double requested_aspect_ratio = 640.0 / 360;
+ constraints.AddMandatory(MediaConstraintsInterface::kMinWidth, 640);
+ constraints.AddMandatory(MediaConstraintsInterface::kMinAspectRatio,
+ requested_aspect_ratio);
+
+ CreateLocalVideoSource(&constraints);
+ EXPECT_EQ_WAIT(MediaSourceInterface::kLive, state_observer_->state(),
+ kMaxWaitMs);
+ const cricket::VideoFormat* format = capturer_->GetCaptureFormat();
+ double aspect_ratio = static_cast<double>(format->width) / format->height;
+ EXPECT_LE(requested_aspect_ratio, aspect_ratio);
+}
+
+// Test that the source state transitions to kEnded if an unknown mandatory
+// constraint is found.
+TEST_F(LocalVideoSourceTest, InvalidMandatoryConstraint) {
+ FakeConstraints constraints;
+ constraints.AddMandatory("weird key", 640);
+
+ CreateLocalVideoSource(&constraints);
+ EXPECT_EQ_WAIT(MediaSourceInterface::kEnded, state_observer_->state(),
+ kMaxWaitMs);
+}
+
+// Test that the source ignores an unknown optional constraint.
+TEST_F(LocalVideoSourceTest, InvalidOptionalConstraint) {
+ FakeConstraints constraints;
+ constraints.AddOptional("weird key", 640);
+
+ CreateLocalVideoSource(&constraints);
+ EXPECT_EQ_WAIT(MediaSourceInterface::kLive, state_observer_->state(),
+ kMaxWaitMs);
+}
+
+TEST_F(LocalVideoSourceTest, SetValidOptionValues) {
+ FakeConstraints constraints;
+ constraints.AddMandatory(MediaConstraintsInterface::kNoiseReduction, "false");
+ constraints.AddMandatory(
+ MediaConstraintsInterface::kTemporalLayeredScreencast, "false");
+ constraints.AddOptional(
+ MediaConstraintsInterface::kLeakyBucket, "true");
+
+ CreateLocalVideoSource(&constraints);
+
+ bool value = true;
+ EXPECT_TRUE(local_source_->options()->video_noise_reduction.Get(&value));
+ EXPECT_FALSE(value);
+ EXPECT_TRUE(local_source_->options()->
+ video_temporal_layer_screencast.Get(&value));
+ EXPECT_FALSE(value);
+ EXPECT_TRUE(local_source_->options()->video_leaky_bucket.Get(&value));
+ EXPECT_TRUE(value);
+}
+
+TEST_F(LocalVideoSourceTest, OptionNotSet) {
+ FakeConstraints constraints;
+ CreateLocalVideoSource(&constraints);
+ bool value;
+ EXPECT_FALSE(local_source_->options()->video_noise_reduction.Get(&value));
+}
+
+TEST_F(LocalVideoSourceTest, MandatoryOptionOverridesOptional) {
+ FakeConstraints constraints;
+ constraints.AddMandatory(
+ MediaConstraintsInterface::kNoiseReduction, true);
+ constraints.AddOptional(
+ MediaConstraintsInterface::kNoiseReduction, false);
+
+ CreateLocalVideoSource(&constraints);
+
+ bool value = false;
+ EXPECT_TRUE(local_source_->options()->video_noise_reduction.Get(&value));
+ EXPECT_TRUE(value);
+ EXPECT_FALSE(local_source_->options()->video_leaky_bucket.Get(&value));
+}
+
+TEST_F(LocalVideoSourceTest, InvalidOptionKeyOptional) {
+ FakeConstraints constraints;
+ constraints.AddOptional(
+ MediaConstraintsInterface::kNoiseReduction, false);
+ constraints.AddOptional("invalidKey", false);
+
+ CreateLocalVideoSource(&constraints);
+
+ EXPECT_EQ_WAIT(MediaSourceInterface::kLive, state_observer_->state(),
+ kMaxWaitMs);
+ bool value = true;
+ EXPECT_TRUE(local_source_->options()->video_noise_reduction.Get(&value));
+ EXPECT_FALSE(value);
+}
+
+TEST_F(LocalVideoSourceTest, InvalidOptionKeyMandatory) {
+ FakeConstraints constraints;
+ constraints.AddMandatory(
+ MediaConstraintsInterface::kNoiseReduction, false);
+ constraints.AddMandatory("invalidKey", false);
+
+ CreateLocalVideoSource(&constraints);
+
+ EXPECT_EQ_WAIT(MediaSourceInterface::kEnded, state_observer_->state(),
+ kMaxWaitMs);
+ bool value;
+ EXPECT_FALSE(local_source_->options()->video_noise_reduction.Get(&value));
+}
+
+TEST_F(LocalVideoSourceTest, InvalidOptionValueOptional) {
+ FakeConstraints constraints;
+ constraints.AddOptional(
+ MediaConstraintsInterface::kNoiseReduction, "true");
+ constraints.AddOptional(
+ MediaConstraintsInterface::kLeakyBucket, "not boolean");
+
+ CreateLocalVideoSource(&constraints);
+
+ EXPECT_EQ_WAIT(MediaSourceInterface::kLive, state_observer_->state(),
+ kMaxWaitMs);
+ bool value = false;
+ EXPECT_TRUE(local_source_->options()->video_noise_reduction.Get(&value));
+ EXPECT_TRUE(value);
+ EXPECT_FALSE(local_source_->options()->video_leaky_bucket.Get(&value));
+}
+
+TEST_F(LocalVideoSourceTest, InvalidOptionValueMandatory) {
+ FakeConstraints constraints;
+ // Optional constraints should be ignored if the mandatory constraints fail.
+ constraints.AddOptional(
+ MediaConstraintsInterface::kNoiseReduction, "false");
+ // Values are case-sensitive and must be all lower-case.
+ constraints.AddMandatory(
+ MediaConstraintsInterface::kLeakyBucket, "True");
+
+ CreateLocalVideoSource(&constraints);
+
+ EXPECT_EQ_WAIT(MediaSourceInterface::kEnded, state_observer_->state(),
+ kMaxWaitMs);
+ bool value;
+ EXPECT_FALSE(local_source_->options()->video_noise_reduction.Get(&value));
+}
+
+TEST_F(LocalVideoSourceTest, MixedOptionsAndConstraints) {
+ FakeConstraints constraints;
+ constraints.AddMandatory(MediaConstraintsInterface::kMaxWidth, 352);
+ constraints.AddMandatory(MediaConstraintsInterface::kMaxHeight, 288);
+ constraints.AddOptional(MediaConstraintsInterface::kMaxFrameRate, 5);
+
+ constraints.AddMandatory(
+ MediaConstraintsInterface::kNoiseReduction, false);
+ constraints.AddOptional(
+ MediaConstraintsInterface::kNoiseReduction, true);
+
+ CreateLocalVideoSource(&constraints);
+ EXPECT_EQ_WAIT(MediaSourceInterface::kLive, state_observer_->state(),
+ kMaxWaitMs);
+ const cricket::VideoFormat* format = capturer_->GetCaptureFormat();
+ ASSERT_TRUE(format != NULL);
+ EXPECT_EQ(352, format->width);
+ EXPECT_EQ(288, format->height);
+ EXPECT_EQ(5, format->framerate());
+
+ bool value = true;
+ EXPECT_TRUE(local_source_->options()->video_noise_reduction.Get(&value));
+ EXPECT_FALSE(value);
+ EXPECT_FALSE(local_source_->options()->video_leaky_bucket.Get(&value));
+}
+
+// Tests that the source starts video with the default resolution for
+// screencast if no constraint is set.
+TEST_F(LocalVideoSourceTest, ScreencastResolutionNoConstraint) {
+ capturer_->TestWithoutCameraFormats();
+ capturer_->SetScreencast(true);
+
+ CreateLocalVideoSource();
+ EXPECT_EQ_WAIT(MediaSourceInterface::kLive, state_observer_->state(),
+ kMaxWaitMs);
+ const cricket::VideoFormat* format = capturer_->GetCaptureFormat();
+ ASSERT_TRUE(format != NULL);
+ EXPECT_EQ(640, format->width);
+ EXPECT_EQ(480, format->height);
+ EXPECT_EQ(30, format->framerate());
+}
+
+// Tests that the source starts video with the max width and height set by
+// constraints for screencast.
+TEST_F(LocalVideoSourceTest, ScreencastResolutionWithConstraint) {
+ FakeConstraints constraints;
+ constraints.AddMandatory(MediaConstraintsInterface::kMaxWidth, 480);
+ constraints.AddMandatory(MediaConstraintsInterface::kMaxHeight, 270);
+
+ capturer_->TestWithoutCameraFormats();
+ capturer_->SetScreencast(true);
+
+ CreateLocalVideoSource(&constraints);
+ EXPECT_EQ_WAIT(MediaSourceInterface::kLive, state_observer_->state(),
+ kMaxWaitMs);
+ const cricket::VideoFormat* format = capturer_->GetCaptureFormat();
+ ASSERT_TRUE(format != NULL);
+ EXPECT_EQ(480, format->width);
+ EXPECT_EQ(270, format->height);
+ EXPECT_EQ(30, format->framerate());
+}
+
+TEST_F(LocalVideoSourceTest, MandatorySubOneFpsConstraints) {
+ FakeConstraints constraints;
+ constraints.AddMandatory(MediaConstraintsInterface::kMaxFrameRate, 0.5);
+
+ CreateLocalVideoSource(&constraints);
+ EXPECT_EQ_WAIT(MediaSourceInterface::kEnded, state_observer_->state(),
+ kMaxWaitMs);
+ ASSERT_TRUE(capturer_->GetCaptureFormat() == NULL);
+}
+
+TEST_F(LocalVideoSourceTest, OptionalSubOneFpsConstraints) {
+ FakeConstraints constraints;
+ constraints.AddOptional(MediaConstraintsInterface::kMaxFrameRate, 0.5);
+
+ CreateLocalVideoSource(&constraints);
+ EXPECT_EQ_WAIT(MediaSourceInterface::kLive, state_observer_->state(),
+ kMaxWaitMs);
+ const cricket::VideoFormat* format = capturer_->GetCaptureFormat();
+ ASSERT_TRUE(format != NULL);
+ EXPECT_EQ(1, format->framerate());
+}
+
diff --git a/talk/app/webrtc/mediaconstraintsinterface.cc b/talk/app/webrtc/mediaconstraintsinterface.cc
new file mode 100644
index 0000000..2e6af77
--- /dev/null
+++ b/talk/app/webrtc/mediaconstraintsinterface.cc
@@ -0,0 +1,78 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/mediaconstraintsinterface.h"
+
+#include "talk/base/stringencode.h"
+
+namespace webrtc {
+
+const char MediaConstraintsInterface::kValueTrue[] = "true";
+const char MediaConstraintsInterface::kValueFalse[] = "false";
+
+// Set |value| to the value associated with the first appearance of |key|, or
+// return false if |key| is not found.
+bool MediaConstraintsInterface::Constraints::FindFirst(
+ const std::string& key, std::string* value) const {
+ for (Constraints::const_iterator iter = begin(); iter != end(); ++iter) {
+ if (iter->key == key) {
+ *value = iter->value;
+ return true;
+ }
+ }
+ return false;
+}
+
+// Find the highest-priority instance of the boolean-valued constraint) named by
+// |key| and return its value as |value|. |constraints| can be null.
+// If |mandatory_constraints| is non-null, it is incremented if the key appears
+// among the mandatory constraints.
+// Returns true if the key was found and has a valid boolean value.
+// If the key appears multiple times as an optional constraint, appearances
+// after the first are ignored.
+// Note: Because this uses FindFirst, repeated optional constraints whose
+// first instance has an unrecognized value are not handled precisely in
+// accordance with the specification.
+bool FindConstraint(const MediaConstraintsInterface* constraints,
+ const std::string& key, bool* value,
+ size_t* mandatory_constraints) {
+ std::string string_value;
+ if (!constraints) {
+ return false;
+ }
+ if (constraints->GetMandatory().FindFirst(key, &string_value)) {
+ if (mandatory_constraints)
+ ++*mandatory_constraints;
+ return talk_base::FromString(string_value, value);
+ }
+ if (constraints->GetOptional().FindFirst(key, &string_value)) {
+ return talk_base::FromString(string_value, value);
+ }
+ return false;
+}
+
+} // namespace webrtc
diff --git a/talk/app/webrtc/mediaconstraintsinterface.h b/talk/app/webrtc/mediaconstraintsinterface.h
new file mode 100644
index 0000000..a6b23c6
--- /dev/null
+++ b/talk/app/webrtc/mediaconstraintsinterface.h
@@ -0,0 +1,129 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// This file contains the interface for MediaConstraints, corresponding to
+// the definition at
+// http://www.w3.org/TR/mediacapture-streams/#mediastreamconstraints and also
+// used in WebRTC: http://dev.w3.org/2011/webrtc/editor/webrtc.html#constraints.
+
+#ifndef TALK_APP_WEBRTC_MEDIACONSTRAINTSINTERFACE_H_
+#define TALK_APP_WEBRTC_MEDIACONSTRAINTSINTERFACE_H_
+
+#include <string>
+#include <vector>
+
+namespace webrtc {
+
+// MediaConstraintsInterface
+// Interface used for passing arguments about media constraints
+// to the MediaStream and PeerConnection implementation.
+class MediaConstraintsInterface {
+ public:
+ struct Constraint {
+ Constraint() {}
+ Constraint(const std::string& key, const std::string value)
+ : key(key), value(value) {
+ }
+ std::string key;
+ std::string value;
+ };
+
+ class Constraints : public std::vector<Constraint> {
+ public:
+ bool FindFirst(const std::string& key, std::string* value) const;
+ };
+
+ virtual const Constraints& GetMandatory() const = 0;
+ virtual const Constraints& GetOptional() const = 0;
+
+
+ // Constraint keys used by a local video source.
+ // Specified by draft-alvestrand-constraints-resolution-00b
+ static const char kMinAspectRatio[]; // minAspectRatio
+ static const char kMaxAspectRatio[]; // maxAspectRatio
+ static const char kMaxWidth[]; // maxWidth
+ static const char kMinWidth[]; // minWidth
+ static const char kMaxHeight[]; // maxHeight
+ static const char kMinHeight[]; // minHeight
+ static const char kMaxFrameRate[]; // maxFrameRate
+ static const char kMinFrameRate[]; // minFrameRate
+
+ // Constraint keys used by a local audio source.
+ // These keys are google specific.
+ static const char kEchoCancellation[]; // googEchoCancellation
+ static const char kExperimentalEchoCancellation[]; // googEchoCancellation2
+ static const char kAutoGainControl[]; // googAutoGainControl
+ static const char kExperimentalAutoGainControl[]; // googAutoGainControl2
+ static const char kNoiseSuppression[]; // googNoiseSuppression
+ static const char kHighpassFilter[]; // googHighpassFilter
+
+ // Google-specific constraint keys for a local video source
+ static const char kNoiseReduction[]; // googNoiseReduction
+ static const char kLeakyBucket[]; // googLeakyBucket
+ // googTemporalLayeredScreencast
+ static const char kTemporalLayeredScreencast[];
+
+ // Constraint keys for CreateOffer / CreateAnswer
+ // Specified by the W3C PeerConnection spec
+ static const char kOfferToReceiveVideo[]; // OfferToReceiveVideo
+ static const char kOfferToReceiveAudio[]; // OfferToReceiveAudio
+ static const char kVoiceActivityDetection[]; // VoiceActivityDetection
+ static const char kIceRestart[]; // IceRestart
+ // These keys are google specific.
+ static const char kUseRtpMux[]; // googUseRtpMUX
+
+ // Constraints values.
+ static const char kValueTrue[]; // true
+ static const char kValueFalse[]; // false
+
+ // Temporary pseudo-constraints used to enable DTLS-SRTP
+ static const char kEnableDtlsSrtp[]; // Enable DTLS-SRTP
+ // Temporary pseudo-constraints used to enable DataChannels
+ static const char kEnableRtpDataChannels[]; // Enable RTP DataChannels
+ static const char kEnableSctpDataChannels[]; // Enable SCTP DataChannels
+
+ // The prefix of internal-only constraints whose JS set values should be
+ // stripped by Chrome before passed down to Libjingle.
+ static const char kInternalConstraintPrefix[];
+
+ // This constraint is for internal use only, representing the Chrome command
+ // line flag. So it is prefixed with "internal" so JS values will be removed.
+ // Used by a local audio source.
+ static const char kInternalAecDump[]; // internalAecDump
+
+ protected:
+ // Dtor protected as objects shouldn't be deleted via this interface
+ virtual ~MediaConstraintsInterface() {}
+};
+
+bool FindConstraint(const MediaConstraintsInterface* constraints,
+ const std::string& key, bool* value,
+ size_t* mandatory_constraints);
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_MEDIACONSTRAINTSINTERFACE_H_
diff --git a/talk/app/webrtc/mediastream.cc b/talk/app/webrtc/mediastream.cc
new file mode 100644
index 0000000..aad8e85
--- /dev/null
+++ b/talk/app/webrtc/mediastream.cc
@@ -0,0 +1,112 @@
+/*
+ * libjingle
+ * Copyright 2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/mediastream.h"
+#include "talk/base/logging.h"
+
+namespace webrtc {
+
+template <class V>
+static typename V::iterator FindTrack(V* vector,
+ const std::string& track_id) {
+ typename V::iterator it = vector->begin();
+ for (; it != vector->end(); ++it) {
+ if ((*it)->id() == track_id) {
+ break;
+ }
+ }
+ return it;
+};
+
+talk_base::scoped_refptr<MediaStream> MediaStream::Create(
+ const std::string& label) {
+ talk_base::RefCountedObject<MediaStream>* stream =
+ new talk_base::RefCountedObject<MediaStream>(label);
+ return stream;
+}
+
+MediaStream::MediaStream(const std::string& label)
+ : label_(label) {
+}
+
+bool MediaStream::AddTrack(AudioTrackInterface* track) {
+ return AddTrack<AudioTrackVector, AudioTrackInterface>(&audio_tracks_, track);
+}
+
+bool MediaStream::AddTrack(VideoTrackInterface* track) {
+ return AddTrack<VideoTrackVector, VideoTrackInterface>(&video_tracks_, track);
+}
+
+bool MediaStream::RemoveTrack(AudioTrackInterface* track) {
+ return RemoveTrack<AudioTrackVector>(&audio_tracks_, track);
+}
+
+bool MediaStream::RemoveTrack(VideoTrackInterface* track) {
+ return RemoveTrack<VideoTrackVector>(&video_tracks_, track);
+}
+
+talk_base::scoped_refptr<AudioTrackInterface>
+MediaStream::FindAudioTrack(const std::string& track_id) {
+ AudioTrackVector::iterator it = FindTrack(&audio_tracks_, track_id);
+ if (it == audio_tracks_.end())
+ return NULL;
+ return *it;
+}
+
+talk_base::scoped_refptr<VideoTrackInterface>
+MediaStream::FindVideoTrack(const std::string& track_id) {
+ VideoTrackVector::iterator it = FindTrack(&video_tracks_, track_id);
+ if (it == video_tracks_.end())
+ return NULL;
+ return *it;
+}
+
+template <typename TrackVector, typename Track>
+bool MediaStream::AddTrack(TrackVector* tracks, Track* track) {
+ typename TrackVector::iterator it = FindTrack(tracks, track->id());
+ if (it != tracks->end())
+ return false;
+ tracks->push_back(track);
+ FireOnChanged();
+ return true;
+}
+
+template <typename TrackVector>
+bool MediaStream::RemoveTrack(TrackVector* tracks,
+ MediaStreamTrackInterface* track) {
+ ASSERT(tracks != NULL);
+ if (!track)
+ return false;
+ typename TrackVector::iterator it = FindTrack(tracks, track->id());
+ if (it == tracks->end())
+ return false;
+ tracks->erase(it);
+ FireOnChanged();
+ return true;
+}
+
+} // namespace webrtc
diff --git a/talk/app/webrtc/mediastream.h b/talk/app/webrtc/mediastream.h
new file mode 100644
index 0000000..e5ac6eb
--- /dev/null
+++ b/talk/app/webrtc/mediastream.h
@@ -0,0 +1,75 @@
+/*
+ * libjingle
+ * Copyright 2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// This file contains the implementation of MediaStreamInterface interface.
+
+#ifndef TALK_APP_WEBRTC_MEDIASTREAM_H_
+#define TALK_APP_WEBRTC_MEDIASTREAM_H_
+
+#include <string>
+#include <vector>
+
+#include "talk/app/webrtc/mediastreaminterface.h"
+#include "talk/app/webrtc/notifier.h"
+
+namespace webrtc {
+
+class MediaStream : public Notifier<MediaStreamInterface> {
+ public:
+ static talk_base::scoped_refptr<MediaStream> Create(const std::string& label);
+
+ virtual std::string label() const OVERRIDE { return label_; }
+
+ virtual bool AddTrack(AudioTrackInterface* track) OVERRIDE;
+ virtual bool AddTrack(VideoTrackInterface* track) OVERRIDE;
+ virtual bool RemoveTrack(AudioTrackInterface* track) OVERRIDE;
+ virtual bool RemoveTrack(VideoTrackInterface* track) OVERRIDE;
+ virtual talk_base::scoped_refptr<AudioTrackInterface>
+ FindAudioTrack(const std::string& track_id);
+ virtual talk_base::scoped_refptr<VideoTrackInterface>
+ FindVideoTrack(const std::string& track_id);
+
+ virtual AudioTrackVector GetAudioTracks() OVERRIDE { return audio_tracks_; }
+ virtual VideoTrackVector GetVideoTracks() OVERRIDE { return video_tracks_; }
+
+ protected:
+ explicit MediaStream(const std::string& label);
+
+ private:
+ template <typename TrackVector, typename Track>
+ bool AddTrack(TrackVector* Tracks, Track* track);
+ template <typename TrackVector>
+ bool RemoveTrack(TrackVector* Tracks, MediaStreamTrackInterface* track);
+
+ std::string label_;
+ AudioTrackVector audio_tracks_;
+ VideoTrackVector video_tracks_;
+};
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_MEDIASTREAM_H_
diff --git a/talk/app/webrtc/mediastream_unittest.cc b/talk/app/webrtc/mediastream_unittest.cc
new file mode 100644
index 0000000..bb2d50e
--- /dev/null
+++ b/talk/app/webrtc/mediastream_unittest.cc
@@ -0,0 +1,162 @@
+/*
+ * libjingle
+ * Copyright 2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string>
+
+#include "talk/app/webrtc/audiotrack.h"
+#include "talk/app/webrtc/mediastream.h"
+#include "talk/app/webrtc/videotrack.h"
+#include "talk/base/refcount.h"
+#include "talk/base/scoped_ptr.h"
+#include "talk/base/gunit.h"
+#include "testing/base/public/gmock.h"
+
+static const char kStreamLabel1[] = "local_stream_1";
+static const char kVideoTrackId[] = "dummy_video_cam_1";
+static const char kAudioTrackId[] = "dummy_microphone_1";
+
+using talk_base::scoped_refptr;
+using ::testing::Exactly;
+
+namespace webrtc {
+
+// Helper class to test Observer.
+class MockObserver : public ObserverInterface {
+ public:
+ MockObserver() {}
+
+ MOCK_METHOD0(OnChanged, void());
+};
+
+class MediaStreamTest: public testing::Test {
+ protected:
+ virtual void SetUp() {
+ stream_ = MediaStream::Create(kStreamLabel1);
+ ASSERT_TRUE(stream_.get() != NULL);
+
+ video_track_ = VideoTrack::Create(kVideoTrackId, NULL);
+ ASSERT_TRUE(video_track_.get() != NULL);
+ EXPECT_EQ(MediaStreamTrackInterface::kInitializing, video_track_->state());
+
+ audio_track_ = AudioTrack::Create(kAudioTrackId, NULL);
+
+ ASSERT_TRUE(audio_track_.get() != NULL);
+ EXPECT_EQ(MediaStreamTrackInterface::kInitializing, audio_track_->state());
+
+ EXPECT_TRUE(stream_->AddTrack(video_track_));
+ EXPECT_FALSE(stream_->AddTrack(video_track_));
+ EXPECT_TRUE(stream_->AddTrack(audio_track_));
+ EXPECT_FALSE(stream_->AddTrack(audio_track_));
+ }
+
+ void ChangeTrack(MediaStreamTrackInterface* track) {
+ MockObserver observer;
+ track->RegisterObserver(&observer);
+
+ EXPECT_CALL(observer, OnChanged())
+ .Times(Exactly(1));
+ track->set_enabled(false);
+ EXPECT_FALSE(track->enabled());
+
+ EXPECT_CALL(observer, OnChanged())
+ .Times(Exactly(1));
+ track->set_state(MediaStreamTrackInterface::kLive);
+ EXPECT_EQ(MediaStreamTrackInterface::kLive, track->state());
+ }
+
+ scoped_refptr<MediaStreamInterface> stream_;
+ scoped_refptr<AudioTrackInterface> audio_track_;
+ scoped_refptr<VideoTrackInterface> video_track_;
+};
+
+TEST_F(MediaStreamTest, GetTrackInfo) {
+ ASSERT_EQ(1u, stream_->GetVideoTracks().size());
+ ASSERT_EQ(1u, stream_->GetAudioTracks().size());
+
+ // Verify the video track.
+ scoped_refptr<webrtc::MediaStreamTrackInterface> video_track(
+ stream_->GetVideoTracks()[0]);
+ EXPECT_EQ(0, video_track->id().compare(kVideoTrackId));
+ EXPECT_TRUE(video_track->enabled());
+
+ ASSERT_EQ(1u, stream_->GetVideoTracks().size());
+ EXPECT_TRUE(stream_->GetVideoTracks()[0].get() == video_track.get());
+ EXPECT_TRUE(stream_->FindVideoTrack(video_track->id()).get()
+ == video_track.get());
+ video_track = stream_->GetVideoTracks()[0];
+ EXPECT_EQ(0, video_track->id().compare(kVideoTrackId));
+ EXPECT_TRUE(video_track->enabled());
+
+ // Verify the audio track.
+ scoped_refptr<webrtc::MediaStreamTrackInterface> audio_track(
+ stream_->GetAudioTracks()[0]);
+ EXPECT_EQ(0, audio_track->id().compare(kAudioTrackId));
+ EXPECT_TRUE(audio_track->enabled());
+ ASSERT_EQ(1u, stream_->GetAudioTracks().size());
+ EXPECT_TRUE(stream_->GetAudioTracks()[0].get() == audio_track.get());
+ EXPECT_TRUE(stream_->FindAudioTrack(audio_track->id()).get()
+ == audio_track.get());
+ audio_track = stream_->GetAudioTracks()[0];
+ EXPECT_EQ(0, audio_track->id().compare(kAudioTrackId));
+ EXPECT_TRUE(audio_track->enabled());
+}
+
+TEST_F(MediaStreamTest, RemoveTrack) {
+ MockObserver observer;
+ stream_->RegisterObserver(&observer);
+
+ EXPECT_CALL(observer, OnChanged())
+ .Times(Exactly(2));
+
+ EXPECT_TRUE(stream_->RemoveTrack(audio_track_));
+ EXPECT_FALSE(stream_->RemoveTrack(audio_track_));
+ EXPECT_EQ(0u, stream_->GetAudioTracks().size());
+ EXPECT_EQ(0u, stream_->GetAudioTracks().size());
+
+ EXPECT_TRUE(stream_->RemoveTrack(video_track_));
+ EXPECT_FALSE(stream_->RemoveTrack(video_track_));
+
+ EXPECT_EQ(0u, stream_->GetVideoTracks().size());
+ EXPECT_EQ(0u, stream_->GetVideoTracks().size());
+
+ EXPECT_FALSE(stream_->RemoveTrack(static_cast<AudioTrackInterface*>(NULL)));
+ EXPECT_FALSE(stream_->RemoveTrack(static_cast<VideoTrackInterface*>(NULL)));
+}
+
+TEST_F(MediaStreamTest, ChangeVideoTrack) {
+ scoped_refptr<webrtc::VideoTrackInterface> video_track(
+ stream_->GetVideoTracks()[0]);
+ ChangeTrack(video_track.get());
+}
+
+TEST_F(MediaStreamTest, ChangeAudioTrack) {
+ scoped_refptr<webrtc::AudioTrackInterface> audio_track(
+ stream_->GetAudioTracks()[0]);
+ ChangeTrack(audio_track.get());
+}
+
+} // namespace webrtc
diff --git a/talk/app/webrtc/mediastreamhandler.cc b/talk/app/webrtc/mediastreamhandler.cc
new file mode 100644
index 0000000..a6a45b2
--- /dev/null
+++ b/talk/app/webrtc/mediastreamhandler.cc
@@ -0,0 +1,440 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/mediastreamhandler.h"
+
+#include "talk/app/webrtc/localaudiosource.h"
+#include "talk/app/webrtc/localvideosource.h"
+#include "talk/app/webrtc/videosourceinterface.h"
+
+namespace webrtc {
+
+TrackHandler::TrackHandler(MediaStreamTrackInterface* track, uint32 ssrc)
+ : track_(track),
+ ssrc_(ssrc),
+ state_(track->state()),
+ enabled_(track->enabled()) {
+ track_->RegisterObserver(this);
+}
+
+TrackHandler::~TrackHandler() {
+ track_->UnregisterObserver(this);
+}
+
+void TrackHandler::OnChanged() {
+ if (state_ != track_->state()) {
+ state_ = track_->state();
+ OnStateChanged();
+ }
+ if (enabled_ != track_->enabled()) {
+ enabled_ = track_->enabled();
+ OnEnabledChanged();
+ }
+}
+
+LocalAudioTrackHandler::LocalAudioTrackHandler(
+ AudioTrackInterface* track,
+ uint32 ssrc,
+ AudioProviderInterface* provider)
+ : TrackHandler(track, ssrc),
+ audio_track_(track),
+ provider_(provider) {
+ OnEnabledChanged();
+}
+
+LocalAudioTrackHandler::~LocalAudioTrackHandler() {
+}
+
+void LocalAudioTrackHandler::OnStateChanged() {
+ // TODO(perkj): What should happen when the state change?
+}
+
+void LocalAudioTrackHandler::Stop() {
+ cricket::AudioOptions options;
+ provider_->SetAudioSend(ssrc(), false, options);
+}
+
+void LocalAudioTrackHandler::OnEnabledChanged() {
+ cricket::AudioOptions options;
+ if (audio_track_->enabled() && audio_track_->GetSource()) {
+ options = static_cast<LocalAudioSource*>(
+ audio_track_->GetSource())->options();
+ }
+ provider_->SetAudioSend(ssrc(), audio_track_->enabled(), options);
+}
+
+RemoteAudioTrackHandler::RemoteAudioTrackHandler(
+ AudioTrackInterface* track,
+ uint32 ssrc,
+ AudioProviderInterface* provider)
+ : TrackHandler(track, ssrc),
+ audio_track_(track),
+ provider_(provider) {
+ OnEnabledChanged();
+ provider_->SetAudioRenderer(ssrc, audio_track_->FrameInput());
+}
+
+RemoteAudioTrackHandler::~RemoteAudioTrackHandler() {
+}
+
+void RemoteAudioTrackHandler::Stop() {
+ provider_->SetAudioPlayout(ssrc(), false);
+}
+
+void RemoteAudioTrackHandler::OnStateChanged() {
+}
+
+void RemoteAudioTrackHandler::OnEnabledChanged() {
+ provider_->SetAudioPlayout(ssrc(), audio_track_->enabled());
+}
+
+LocalVideoTrackHandler::LocalVideoTrackHandler(
+ VideoTrackInterface* track,
+ uint32 ssrc,
+ VideoProviderInterface* provider)
+ : TrackHandler(track, ssrc),
+ local_video_track_(track),
+ provider_(provider) {
+ VideoSourceInterface* source = local_video_track_->GetSource();
+ if (source)
+ provider_->SetCaptureDevice(ssrc, source->GetVideoCapturer());
+ OnEnabledChanged();
+}
+
+LocalVideoTrackHandler::~LocalVideoTrackHandler() {
+}
+
+void LocalVideoTrackHandler::OnStateChanged() {
+}
+
+void LocalVideoTrackHandler::Stop() {
+ provider_->SetCaptureDevice(ssrc(), NULL);
+ provider_->SetVideoSend(ssrc(), false, NULL);
+}
+
+void LocalVideoTrackHandler::OnEnabledChanged() {
+ const cricket::VideoOptions* options = NULL;
+ VideoSourceInterface* source = local_video_track_->GetSource();
+ if (local_video_track_->enabled() && source) {
+ options = source->options();
+ }
+ provider_->SetVideoSend(ssrc(), local_video_track_->enabled(), options);
+}
+
+RemoteVideoTrackHandler::RemoteVideoTrackHandler(
+ VideoTrackInterface* track,
+ uint32 ssrc,
+ VideoProviderInterface* provider)
+ : TrackHandler(track, ssrc),
+ remote_video_track_(track),
+ provider_(provider) {
+ OnEnabledChanged();
+}
+
+RemoteVideoTrackHandler::~RemoteVideoTrackHandler() {
+}
+
+void RemoteVideoTrackHandler::Stop() {
+ // Since cricket::VideoRenderer is not reference counted
+ // we need to remove the renderer before we are deleted.
+ provider_->SetVideoPlayout(ssrc(), false, NULL);
+}
+
+void RemoteVideoTrackHandler::OnStateChanged() {
+}
+
+void RemoteVideoTrackHandler::OnEnabledChanged() {
+ provider_->SetVideoPlayout(ssrc(),
+ remote_video_track_->enabled(),
+ remote_video_track_->FrameInput());
+}
+
+MediaStreamHandler::MediaStreamHandler(MediaStreamInterface* stream,
+ AudioProviderInterface* audio_provider,
+ VideoProviderInterface* video_provider)
+ : stream_(stream),
+ audio_provider_(audio_provider),
+ video_provider_(video_provider) {
+}
+
+MediaStreamHandler::~MediaStreamHandler() {
+ for (TrackHandlers::iterator it = track_handlers_.begin();
+ it != track_handlers_.end(); ++it) {
+ delete *it;
+ }
+}
+
+void MediaStreamHandler::RemoveTrack(MediaStreamTrackInterface* track) {
+ for (TrackHandlers::iterator it = track_handlers_.begin();
+ it != track_handlers_.end(); ++it) {
+ if ((*it)->track() == track) {
+ TrackHandler* track = *it;
+ track->Stop();
+ delete track;
+ track_handlers_.erase(it);
+ break;
+ }
+ }
+}
+
+TrackHandler* MediaStreamHandler::FindTrackHandler(
+ MediaStreamTrackInterface* track) {
+ TrackHandlers::iterator it = track_handlers_.begin();
+ for (; it != track_handlers_.end(); ++it) {
+ if ((*it)->track() == track) {
+ return *it;
+ break;
+ }
+ }
+ return NULL;
+}
+
+MediaStreamInterface* MediaStreamHandler::stream() {
+ return stream_.get();
+}
+
+void MediaStreamHandler::OnChanged() {
+}
+
+void MediaStreamHandler::Stop() {
+ for (TrackHandlers::const_iterator it = track_handlers_.begin();
+ it != track_handlers_.end(); ++it) {
+ (*it)->Stop();
+ }
+}
+
+LocalMediaStreamHandler::LocalMediaStreamHandler(
+ MediaStreamInterface* stream,
+ AudioProviderInterface* audio_provider,
+ VideoProviderInterface* video_provider)
+ : MediaStreamHandler(stream, audio_provider, video_provider) {
+}
+
+LocalMediaStreamHandler::~LocalMediaStreamHandler() {
+}
+
+void LocalMediaStreamHandler::AddAudioTrack(AudioTrackInterface* audio_track,
+ uint32 ssrc) {
+ ASSERT(!FindTrackHandler(audio_track));
+
+ TrackHandler* handler(new LocalAudioTrackHandler(audio_track, ssrc,
+ audio_provider_));
+ track_handlers_.push_back(handler);
+}
+
+void LocalMediaStreamHandler::AddVideoTrack(VideoTrackInterface* video_track,
+ uint32 ssrc) {
+ ASSERT(!FindTrackHandler(video_track));
+
+ TrackHandler* handler(new LocalVideoTrackHandler(video_track, ssrc,
+ video_provider_));
+ track_handlers_.push_back(handler);
+}
+
+RemoteMediaStreamHandler::RemoteMediaStreamHandler(
+ MediaStreamInterface* stream,
+ AudioProviderInterface* audio_provider,
+ VideoProviderInterface* video_provider)
+ : MediaStreamHandler(stream, audio_provider, video_provider) {
+}
+
+RemoteMediaStreamHandler::~RemoteMediaStreamHandler() {
+}
+
+void RemoteMediaStreamHandler::AddAudioTrack(AudioTrackInterface* audio_track,
+ uint32 ssrc) {
+ ASSERT(!FindTrackHandler(audio_track));
+ TrackHandler* handler(
+ new RemoteAudioTrackHandler(audio_track, ssrc, audio_provider_));
+ track_handlers_.push_back(handler);
+}
+
+void RemoteMediaStreamHandler::AddVideoTrack(VideoTrackInterface* video_track,
+ uint32 ssrc) {
+ ASSERT(!FindTrackHandler(video_track));
+ TrackHandler* handler(
+ new RemoteVideoTrackHandler(video_track, ssrc, video_provider_));
+ track_handlers_.push_back(handler);
+}
+
+MediaStreamHandlerContainer::MediaStreamHandlerContainer(
+ AudioProviderInterface* audio_provider,
+ VideoProviderInterface* video_provider)
+ : audio_provider_(audio_provider),
+ video_provider_(video_provider) {
+}
+
+MediaStreamHandlerContainer::~MediaStreamHandlerContainer() {
+ ASSERT(remote_streams_handlers_.empty());
+ ASSERT(local_streams_handlers_.empty());
+}
+
+void MediaStreamHandlerContainer::TearDown() {
+ for (StreamHandlerList::iterator it = remote_streams_handlers_.begin();
+ it != remote_streams_handlers_.end(); ++it) {
+ (*it)->Stop();
+ delete *it;
+ }
+ remote_streams_handlers_.clear();
+ for (StreamHandlerList::iterator it = local_streams_handlers_.begin();
+ it != local_streams_handlers_.end(); ++it) {
+ (*it)->Stop();
+ delete *it;
+ }
+ local_streams_handlers_.clear();
+}
+
+void MediaStreamHandlerContainer::RemoveRemoteStream(
+ MediaStreamInterface* stream) {
+ DeleteStreamHandler(&remote_streams_handlers_, stream);
+}
+
+void MediaStreamHandlerContainer::AddRemoteAudioTrack(
+ MediaStreamInterface* stream,
+ AudioTrackInterface* audio_track,
+ uint32 ssrc) {
+ MediaStreamHandler* handler = FindStreamHandler(remote_streams_handlers_,
+ stream);
+ if (handler == NULL) {
+ handler = CreateRemoteStreamHandler(stream);
+ }
+ handler->AddAudioTrack(audio_track, ssrc);
+}
+
+void MediaStreamHandlerContainer::AddRemoteVideoTrack(
+ MediaStreamInterface* stream,
+ VideoTrackInterface* video_track,
+ uint32 ssrc) {
+ MediaStreamHandler* handler = FindStreamHandler(remote_streams_handlers_,
+ stream);
+ if (handler == NULL) {
+ handler = CreateRemoteStreamHandler(stream);
+ }
+ handler->AddVideoTrack(video_track, ssrc);
+}
+
+void MediaStreamHandlerContainer::RemoveRemoteTrack(
+ MediaStreamInterface* stream,
+ MediaStreamTrackInterface* track) {
+ MediaStreamHandler* handler = FindStreamHandler(remote_streams_handlers_,
+ stream);
+ if (!VERIFY(handler != NULL)) {
+ LOG(LS_WARNING) << "Local MediaStreamHandler for stream with id "
+ << stream->label() << "doesnt't exist.";
+ return;
+ }
+ handler->RemoveTrack(track);
+}
+
+void MediaStreamHandlerContainer::RemoveLocalStream(
+ MediaStreamInterface* stream) {
+ DeleteStreamHandler(&local_streams_handlers_, stream);
+}
+
+void MediaStreamHandlerContainer::AddLocalAudioTrack(
+ MediaStreamInterface* stream,
+ AudioTrackInterface* audio_track,
+ uint32 ssrc) {
+ MediaStreamHandler* handler = FindStreamHandler(local_streams_handlers_,
+ stream);
+ if (handler == NULL) {
+ handler = CreateLocalStreamHandler(stream);
+ }
+ handler->AddAudioTrack(audio_track, ssrc);
+}
+
+void MediaStreamHandlerContainer::AddLocalVideoTrack(
+ MediaStreamInterface* stream,
+ VideoTrackInterface* video_track,
+ uint32 ssrc) {
+ MediaStreamHandler* handler = FindStreamHandler(local_streams_handlers_,
+ stream);
+ if (handler == NULL) {
+ handler = CreateLocalStreamHandler(stream);
+ }
+ handler->AddVideoTrack(video_track, ssrc);
+}
+
+void MediaStreamHandlerContainer::RemoveLocalTrack(
+ MediaStreamInterface* stream,
+ MediaStreamTrackInterface* track) {
+ MediaStreamHandler* handler = FindStreamHandler(local_streams_handlers_,
+ stream);
+ if (!VERIFY(handler != NULL)) {
+ LOG(LS_WARNING) << "Remote MediaStreamHandler for stream with id "
+ << stream->label() << "doesnt't exist.";
+ return;
+ }
+ handler->RemoveTrack(track);
+}
+
+MediaStreamHandler* MediaStreamHandlerContainer::CreateRemoteStreamHandler(
+ MediaStreamInterface* stream) {
+ ASSERT(!FindStreamHandler(remote_streams_handlers_, stream));
+
+ RemoteMediaStreamHandler* handler =
+ new RemoteMediaStreamHandler(stream, audio_provider_, video_provider_);
+ remote_streams_handlers_.push_back(handler);
+ return handler;
+}
+
+MediaStreamHandler* MediaStreamHandlerContainer::CreateLocalStreamHandler(
+ MediaStreamInterface* stream) {
+ ASSERT(!FindStreamHandler(local_streams_handlers_, stream));
+
+ LocalMediaStreamHandler* handler =
+ new LocalMediaStreamHandler(stream, audio_provider_, video_provider_);
+ local_streams_handlers_.push_back(handler);
+ return handler;
+}
+
+MediaStreamHandler* MediaStreamHandlerContainer::FindStreamHandler(
+ const StreamHandlerList& handlers,
+ MediaStreamInterface* stream) {
+ StreamHandlerList::const_iterator it = handlers.begin();
+ for (; it != handlers.end(); ++it) {
+ if ((*it)->stream() == stream) {
+ return *it;
+ }
+ }
+ return NULL;
+}
+
+void MediaStreamHandlerContainer::DeleteStreamHandler(
+ StreamHandlerList* streamhandlers, MediaStreamInterface* stream) {
+ StreamHandlerList::iterator it = streamhandlers->begin();
+ for (; it != streamhandlers->end(); ++it) {
+ if ((*it)->stream() == stream) {
+ (*it)->Stop();
+ delete *it;
+ streamhandlers->erase(it);
+ break;
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/talk/app/webrtc/mediastreamhandler.h b/talk/app/webrtc/mediastreamhandler.h
new file mode 100644
index 0000000..0cd34d6
--- /dev/null
+++ b/talk/app/webrtc/mediastreamhandler.h
@@ -0,0 +1,264 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// This file contains classes for listening on changes on MediaStreams and
+// MediaTracks that are connected to a certain PeerConnection.
+// Example: If a user sets a rendererer on a remote video track the renderer is
+// connected to the appropriate remote video stream.
+
+#ifndef TALK_APP_WEBRTC_MEDIASTREAMHANDLER_H_
+#define TALK_APP_WEBRTC_MEDIASTREAMHANDLER_H_
+
+#include <list>
+#include <vector>
+
+#include "talk/app/webrtc/mediastreaminterface.h"
+#include "talk/app/webrtc/mediastreamprovider.h"
+#include "talk/app/webrtc/peerconnectioninterface.h"
+#include "talk/base/thread.h"
+
+namespace webrtc {
+
+// TrackHandler listen to events on a MediaStreamTrackInterface that is
+// connected to a certain PeerConnection.
+class TrackHandler : public ObserverInterface {
+ public:
+ TrackHandler(MediaStreamTrackInterface* track, uint32 ssrc);
+ virtual ~TrackHandler();
+ virtual void OnChanged();
+ // Stop using |track_| on this PeerConnection.
+ virtual void Stop() = 0;
+
+ MediaStreamTrackInterface* track() { return track_; }
+ uint32 ssrc() const { return ssrc_; }
+
+ protected:
+ virtual void OnStateChanged() = 0;
+ virtual void OnEnabledChanged() = 0;
+
+ private:
+ talk_base::scoped_refptr<MediaStreamTrackInterface> track_;
+ uint32 ssrc_;
+ MediaStreamTrackInterface::TrackState state_;
+ bool enabled_;
+};
+
+// LocalAudioTrackHandler listen to events on a local AudioTrack instance
+// connected to a PeerConnection and orders the |provider| to executes the
+// requested change.
+class LocalAudioTrackHandler : public TrackHandler {
+ public:
+ LocalAudioTrackHandler(AudioTrackInterface* track,
+ uint32 ssrc,
+ AudioProviderInterface* provider);
+ virtual ~LocalAudioTrackHandler();
+
+ virtual void Stop() OVERRIDE;
+
+ protected:
+ virtual void OnStateChanged() OVERRIDE;
+ virtual void OnEnabledChanged() OVERRIDE;
+
+ private:
+ AudioTrackInterface* audio_track_;
+ AudioProviderInterface* provider_;
+};
+
+// RemoteAudioTrackHandler listen to events on a remote AudioTrack instance
+// connected to a PeerConnection and orders the |provider| to executes the
+// requested change.
+class RemoteAudioTrackHandler : public TrackHandler {
+ public:
+ RemoteAudioTrackHandler(AudioTrackInterface* track,
+ uint32 ssrc,
+ AudioProviderInterface* provider);
+ virtual ~RemoteAudioTrackHandler();
+ virtual void Stop() OVERRIDE;
+
+ protected:
+ virtual void OnStateChanged() OVERRIDE;
+ virtual void OnEnabledChanged() OVERRIDE;
+
+ private:
+ AudioTrackInterface* audio_track_;
+ AudioProviderInterface* provider_;
+};
+
+// LocalVideoTrackHandler listen to events on a local VideoTrack instance
+// connected to a PeerConnection and orders the |provider| to executes the
+// requested change.
+class LocalVideoTrackHandler : public TrackHandler {
+ public:
+ LocalVideoTrackHandler(VideoTrackInterface* track,
+ uint32 ssrc,
+ VideoProviderInterface* provider);
+ virtual ~LocalVideoTrackHandler();
+ virtual void Stop() OVERRIDE;
+
+ protected:
+ virtual void OnStateChanged() OVERRIDE;
+ virtual void OnEnabledChanged() OVERRIDE;
+
+ private:
+ VideoTrackInterface* local_video_track_;
+ VideoProviderInterface* provider_;
+};
+
+// RemoteVideoTrackHandler listen to events on a remote VideoTrack instance
+// connected to a PeerConnection and orders the |provider| to execute
+// requested changes.
+class RemoteVideoTrackHandler : public TrackHandler {
+ public:
+ RemoteVideoTrackHandler(VideoTrackInterface* track,
+ uint32 ssrc,
+ VideoProviderInterface* provider);
+ virtual ~RemoteVideoTrackHandler();
+ virtual void Stop() OVERRIDE;
+
+ protected:
+ virtual void OnStateChanged() OVERRIDE;
+ virtual void OnEnabledChanged() OVERRIDE;
+
+ private:
+ VideoTrackInterface* remote_video_track_;
+ VideoProviderInterface* provider_;
+};
+
+class MediaStreamHandler : public ObserverInterface {
+ public:
+ MediaStreamHandler(MediaStreamInterface* stream,
+ AudioProviderInterface* audio_provider,
+ VideoProviderInterface* video_provider);
+ ~MediaStreamHandler();
+ MediaStreamInterface* stream();
+ void Stop();
+
+ virtual void AddAudioTrack(AudioTrackInterface* audio_track, uint32 ssrc) = 0;
+ virtual void AddVideoTrack(VideoTrackInterface* video_track, uint32 ssrc) = 0;
+
+ virtual void RemoveTrack(MediaStreamTrackInterface* track);
+ virtual void OnChanged() OVERRIDE;
+
+ protected:
+ TrackHandler* FindTrackHandler(MediaStreamTrackInterface* track);
+ talk_base::scoped_refptr<MediaStreamInterface> stream_;
+ AudioProviderInterface* audio_provider_;
+ VideoProviderInterface* video_provider_;
+ typedef std::vector<TrackHandler*> TrackHandlers;
+ TrackHandlers track_handlers_;
+};
+
+class LocalMediaStreamHandler : public MediaStreamHandler {
+ public:
+ LocalMediaStreamHandler(MediaStreamInterface* stream,
+ AudioProviderInterface* audio_provider,
+ VideoProviderInterface* video_provider);
+ ~LocalMediaStreamHandler();
+
+ virtual void AddAudioTrack(AudioTrackInterface* audio_track,
+ uint32 ssrc) OVERRIDE;
+ virtual void AddVideoTrack(VideoTrackInterface* video_track,
+ uint32 ssrc) OVERRIDE;
+};
+
+class RemoteMediaStreamHandler : public MediaStreamHandler {
+ public:
+ RemoteMediaStreamHandler(MediaStreamInterface* stream,
+ AudioProviderInterface* audio_provider,
+ VideoProviderInterface* video_provider);
+ ~RemoteMediaStreamHandler();
+ virtual void AddAudioTrack(AudioTrackInterface* audio_track,
+ uint32 ssrc) OVERRIDE;
+ virtual void AddVideoTrack(VideoTrackInterface* video_track,
+ uint32 ssrc) OVERRIDE;
+};
+
+// Container for MediaStreamHandlers of currently known local and remote
+// MediaStreams.
+class MediaStreamHandlerContainer {
+ public:
+ MediaStreamHandlerContainer(AudioProviderInterface* audio_provider,
+ VideoProviderInterface* video_provider);
+ ~MediaStreamHandlerContainer();
+
+ // Notify all referenced objects that MediaStreamHandlerContainer will be
+ // destroyed. This method must be called prior to the dtor and prior to the
+ // |audio_provider| and |video_provider| is destroyed.
+ void TearDown();
+
+ // Remove all TrackHandlers for tracks in |stream| and make sure
+ // the audio_provider and video_provider is notified that the tracks has been
+ // removed.
+ void RemoveRemoteStream(MediaStreamInterface* stream);
+
+ // Create a RemoteAudioTrackHandler and associate |audio_track| with |ssrc|.
+ void AddRemoteAudioTrack(MediaStreamInterface* stream,
+ AudioTrackInterface* audio_track,
+ uint32 ssrc);
+ // Create a RemoteVideoTrackHandler and associate |video_track| with |ssrc|.
+ void AddRemoteVideoTrack(MediaStreamInterface* stream,
+ VideoTrackInterface* video_track,
+ uint32 ssrc);
+ // Remove the TrackHandler for |track|.
+ void RemoveRemoteTrack(MediaStreamInterface* stream,
+ MediaStreamTrackInterface* track);
+
+ // Remove all TrackHandlers for tracks in |stream| and make sure
+ // the audio_provider and video_provider is notified that the tracks has been
+ // removed.
+ void RemoveLocalStream(MediaStreamInterface* stream);
+
+ // Create a LocalAudioTrackHandler and associate |audio_track| with |ssrc|.
+ void AddLocalAudioTrack(MediaStreamInterface* stream,
+ AudioTrackInterface* audio_track,
+ uint32 ssrc);
+ // Create a LocalVideoTrackHandler and associate |video_track| with |ssrc|.
+ void AddLocalVideoTrack(MediaStreamInterface* stream,
+ VideoTrackInterface* video_track,
+ uint32 ssrc);
+ // Remove the TrackHandler for |track|.
+ void RemoveLocalTrack(MediaStreamInterface* stream,
+ MediaStreamTrackInterface* track);
+
+ private:
+ typedef std::list<MediaStreamHandler*> StreamHandlerList;
+ MediaStreamHandler* FindStreamHandler(const StreamHandlerList& handlers,
+ MediaStreamInterface* stream);
+ MediaStreamHandler* CreateRemoteStreamHandler(MediaStreamInterface* stream);
+ MediaStreamHandler* CreateLocalStreamHandler(MediaStreamInterface* stream);
+ void DeleteStreamHandler(StreamHandlerList* streamhandlers,
+ MediaStreamInterface* stream);
+
+ StreamHandlerList local_streams_handlers_;
+ StreamHandlerList remote_streams_handlers_;
+ AudioProviderInterface* audio_provider_;
+ VideoProviderInterface* video_provider_;
+};
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_MEDIASTREAMHANDLER_H_
diff --git a/talk/app/webrtc/mediastreamhandler_unittest.cc b/talk/app/webrtc/mediastreamhandler_unittest.cc
new file mode 100644
index 0000000..bc4189bf
--- /dev/null
+++ b/talk/app/webrtc/mediastreamhandler_unittest.cc
@@ -0,0 +1,297 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/mediastreamhandler.h"
+
+#include <string>
+
+#include "talk/app/webrtc/audiotrack.h"
+#include "talk/app/webrtc/localvideosource.h"
+#include "talk/app/webrtc/mediastream.h"
+#include "talk/app/webrtc/streamcollection.h"
+#include "talk/app/webrtc/videotrack.h"
+#include "talk/base/gunit.h"
+#include "talk/media/base/fakevideocapturer.h"
+#include "talk/media/base/mediachannel.h"
+#include "testing/base/public/gmock.h"
+
+using ::testing::_;
+using ::testing::Exactly;
+
+static const char kStreamLabel1[] = "local_stream_1";
+static const char kVideoTrackId[] = "video_1";
+static const char kAudioTrackId[] = "audio_1";
+static const uint32 kVideoSsrc = 98;
+static const uint32 kAudioSsrc = 99;
+
+namespace webrtc {
+
+// Helper class to test MediaStreamHandler.
+class MockAudioProvider : public AudioProviderInterface {
+ public:
+ virtual ~MockAudioProvider() {}
+ MOCK_METHOD2(SetAudioPlayout, void(uint32 ssrc, bool enable));
+ MOCK_METHOD3(SetAudioSend, void(uint32 ssrc, bool enable,
+ const cricket::AudioOptions& options));
+ MOCK_METHOD2(SetAudioRenderer, bool(uint32, cricket::AudioRenderer*));
+};
+
+// Helper class to test MediaStreamHandler.
+class MockVideoProvider : public VideoProviderInterface {
+ public:
+ virtual ~MockVideoProvider() {}
+ MOCK_METHOD2(SetCaptureDevice, bool(uint32 ssrc,
+ cricket::VideoCapturer* camera));
+ MOCK_METHOD3(SetVideoPlayout, void(uint32 ssrc,
+ bool enable,
+ cricket::VideoRenderer* renderer));
+ MOCK_METHOD3(SetVideoSend, void(uint32 ssrc, bool enable,
+ const cricket::VideoOptions* options));
+};
+
+class FakeVideoSource : public Notifier<VideoSourceInterface> {
+ public:
+ static talk_base::scoped_refptr<FakeVideoSource> Create() {
+ return new talk_base::RefCountedObject<FakeVideoSource>();
+ }
+ virtual cricket::VideoCapturer* GetVideoCapturer() {
+ return &fake_capturer_;
+ }
+ virtual void AddSink(cricket::VideoRenderer* output) {}
+ virtual void RemoveSink(cricket::VideoRenderer* output) {}
+ virtual SourceState state() const { return state_; }
+ virtual const cricket::VideoOptions* options() const { return &options_; }
+
+ protected:
+ FakeVideoSource() : state_(kLive) {}
+ ~FakeVideoSource() {}
+
+ private:
+ cricket::FakeVideoCapturer fake_capturer_;
+ SourceState state_;
+ cricket::VideoOptions options_;
+};
+
+class MediaStreamHandlerTest : public testing::Test {
+ public:
+ MediaStreamHandlerTest()
+ : handlers_(&audio_provider_, &video_provider_) {
+ }
+
+ virtual void SetUp() {
+ stream_ = MediaStream::Create(kStreamLabel1);
+ talk_base::scoped_refptr<VideoSourceInterface> source(
+ FakeVideoSource::Create());
+ video_track_ = VideoTrack::Create(kVideoTrackId, source);
+ EXPECT_TRUE(stream_->AddTrack(video_track_));
+ audio_track_ = AudioTrack::Create(kAudioTrackId,
+ NULL);
+ EXPECT_TRUE(stream_->AddTrack(audio_track_));
+ }
+
+ void AddLocalAudioTrack() {
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, true, _));
+ handlers_.AddLocalAudioTrack(stream_, stream_->GetAudioTracks()[0],
+ kAudioSsrc);
+ }
+
+ void AddLocalVideoTrack() {
+ EXPECT_CALL(video_provider_, SetCaptureDevice(
+ kVideoSsrc, video_track_->GetSource()->GetVideoCapturer()));
+ EXPECT_CALL(video_provider_, SetVideoSend(kVideoSsrc, true, _));
+ handlers_.AddLocalVideoTrack(stream_, stream_->GetVideoTracks()[0],
+ kVideoSsrc);
+ }
+
+ void RemoveLocalAudioTrack() {
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, false, _))
+ .Times(1);
+ handlers_.RemoveLocalTrack(stream_, audio_track_);
+ }
+
+ void RemoveLocalVideoTrack() {
+ EXPECT_CALL(video_provider_, SetCaptureDevice(kVideoSsrc, NULL))
+ .Times(1);
+ EXPECT_CALL(video_provider_, SetVideoSend(kVideoSsrc, false, _))
+ .Times(1);
+ handlers_.RemoveLocalTrack(stream_, video_track_);
+ }
+
+ void AddRemoteAudioTrack() {
+ EXPECT_CALL(audio_provider_, SetAudioRenderer(kAudioSsrc, _));
+ EXPECT_CALL(audio_provider_, SetAudioPlayout(kAudioSsrc, true));
+ handlers_.AddRemoteAudioTrack(stream_, stream_->GetAudioTracks()[0],
+ kAudioSsrc);
+ }
+
+ void AddRemoteVideoTrack() {
+ EXPECT_CALL(video_provider_, SetVideoPlayout(kVideoSsrc, true,
+ video_track_->FrameInput()));
+ handlers_.AddRemoteVideoTrack(stream_, stream_->GetVideoTracks()[0],
+ kVideoSsrc);
+ }
+
+ void RemoveRemoteAudioTrack() {
+ EXPECT_CALL(audio_provider_, SetAudioPlayout(kAudioSsrc, false));
+ handlers_.RemoveRemoteTrack(stream_, stream_->GetAudioTracks()[0]);
+ }
+
+ void RemoveRemoteVideoTrack() {
+ EXPECT_CALL(video_provider_, SetVideoPlayout(kVideoSsrc, false, NULL));
+ handlers_.RemoveRemoteTrack(stream_, stream_->GetVideoTracks()[0]);
+ }
+
+ protected:
+ MockAudioProvider audio_provider_;
+ MockVideoProvider video_provider_;
+ MediaStreamHandlerContainer handlers_;
+ talk_base::scoped_refptr<MediaStreamInterface> stream_;
+ talk_base::scoped_refptr<VideoTrackInterface> video_track_;
+ talk_base::scoped_refptr<AudioTrackInterface> audio_track_;
+};
+
+// Test that |audio_provider_| is notified when an audio track is associated
+// and disassociated with a MediaStreamHandler.
+TEST_F(MediaStreamHandlerTest, AddAndRemoveLocalAudioTrack) {
+ AddLocalAudioTrack();
+ RemoveLocalAudioTrack();
+
+ handlers_.RemoveLocalStream(stream_);
+}
+
+// Test that |video_provider_| is notified when a video track is associated and
+// disassociated with a MediaStreamHandler.
+TEST_F(MediaStreamHandlerTest, AddAndRemoveLocalVideoTrack) {
+ AddLocalVideoTrack();
+ RemoveLocalVideoTrack();
+
+ handlers_.RemoveLocalStream(stream_);
+}
+
+// Test that |video_provider_| and |audio_provider_| is notified when an audio
+// and video track is disassociated with a MediaStreamHandler by calling
+// RemoveLocalStream.
+TEST_F(MediaStreamHandlerTest, RemoveLocalStream) {
+ AddLocalAudioTrack();
+ AddLocalVideoTrack();
+
+ EXPECT_CALL(video_provider_, SetCaptureDevice(kVideoSsrc, NULL))
+ .Times(1);
+ EXPECT_CALL(video_provider_, SetVideoSend(kVideoSsrc, false, _))
+ .Times(1);
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, false, _))
+ .Times(1);
+ handlers_.RemoveLocalStream(stream_);
+}
+
+
+// Test that |audio_provider_| is notified when a remote audio and track is
+// associated and disassociated with a MediaStreamHandler.
+TEST_F(MediaStreamHandlerTest, AddAndRemoveRemoteAudioTrack) {
+ AddRemoteAudioTrack();
+ RemoveRemoteAudioTrack();
+
+ handlers_.RemoveRemoteStream(stream_);
+}
+
+// Test that |video_provider_| is notified when a remote
+// video track is associated and disassociated with a MediaStreamHandler.
+TEST_F(MediaStreamHandlerTest, AddAndRemoveRemoteVideoTrack) {
+ AddRemoteVideoTrack();
+ RemoveRemoteVideoTrack();
+
+ handlers_.RemoveRemoteStream(stream_);
+}
+
+// Test that |audio_provider_| and |video_provider_| is notified when an audio
+// and video track is disassociated with a MediaStreamHandler by calling
+// RemoveRemoveStream.
+TEST_F(MediaStreamHandlerTest, RemoveRemoteStream) {
+ AddRemoteAudioTrack();
+ AddRemoteVideoTrack();
+
+ EXPECT_CALL(video_provider_, SetVideoPlayout(kVideoSsrc, false, NULL))
+ .Times(1);
+ EXPECT_CALL(audio_provider_, SetAudioPlayout(kAudioSsrc, false))
+ .Times(1);
+ handlers_.RemoveRemoteStream(stream_);
+}
+
+TEST_F(MediaStreamHandlerTest, LocalAudioTrackDisable) {
+ AddLocalAudioTrack();
+
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, false, _));
+ audio_track_->set_enabled(false);
+
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, true, _));
+ audio_track_->set_enabled(true);
+
+ RemoveLocalAudioTrack();
+ handlers_.TearDown();
+}
+
+TEST_F(MediaStreamHandlerTest, RemoteAudioTrackDisable) {
+ AddRemoteAudioTrack();
+
+ EXPECT_CALL(audio_provider_, SetAudioPlayout(kAudioSsrc, false));
+ audio_track_->set_enabled(false);
+
+ EXPECT_CALL(audio_provider_, SetAudioPlayout(kAudioSsrc, true));
+ audio_track_->set_enabled(true);
+
+ RemoveRemoteAudioTrack();
+ handlers_.TearDown();
+}
+
+TEST_F(MediaStreamHandlerTest, LocalVideoTrackDisable) {
+ AddLocalVideoTrack();
+
+ EXPECT_CALL(video_provider_, SetVideoSend(kVideoSsrc, false, _));
+ video_track_->set_enabled(false);
+
+ EXPECT_CALL(video_provider_, SetVideoSend(kVideoSsrc, true, _));
+ video_track_->set_enabled(true);
+
+ RemoveLocalVideoTrack();
+ handlers_.TearDown();
+}
+
+TEST_F(MediaStreamHandlerTest, RemoteVideoTrackDisable) {
+ AddRemoteVideoTrack();
+
+ EXPECT_CALL(video_provider_, SetVideoPlayout(kVideoSsrc, false, _));
+ video_track_->set_enabled(false);
+
+ EXPECT_CALL(video_provider_, SetVideoPlayout(kVideoSsrc, true,
+ video_track_->FrameInput()));
+ video_track_->set_enabled(true);
+
+ RemoveRemoteVideoTrack();
+ handlers_.TearDown();
+}
+
+} // namespace webrtc
diff --git a/talk/app/webrtc/mediastreaminterface.h b/talk/app/webrtc/mediastreaminterface.h
new file mode 100644
index 0000000..6f834d2
--- /dev/null
+++ b/talk/app/webrtc/mediastreaminterface.h
@@ -0,0 +1,196 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// This file contains interfaces for MediaStream, MediaTrack and MediaSource.
+// These interfaces are used for implementing MediaStream and MediaTrack as
+// defined in http://dev.w3.org/2011/webrtc/editor/webrtc.html#stream-api. These
+// interfaces must be used only with PeerConnection. PeerConnectionManager
+// interface provides the factory methods to create MediaStream and MediaTracks.
+
+#ifndef TALK_APP_WEBRTC_MEDIASTREAMINTERFACE_H_
+#define TALK_APP_WEBRTC_MEDIASTREAMINTERFACE_H_
+
+#include <string>
+#include <vector>
+
+#include "talk/base/basictypes.h"
+#include "talk/base/refcount.h"
+#include "talk/base/scoped_ref_ptr.h"
+
+namespace cricket {
+
+class AudioRenderer;
+class VideoCapturer;
+class VideoRenderer;
+class VideoFrame;
+
+} // namespace cricket
+
+namespace webrtc {
+
+// Generic observer interface.
+class ObserverInterface {
+ public:
+ virtual void OnChanged() = 0;
+
+ protected:
+ virtual ~ObserverInterface() {}
+};
+
+class NotifierInterface {
+ public:
+ virtual void RegisterObserver(ObserverInterface* observer) = 0;
+ virtual void UnregisterObserver(ObserverInterface* observer) = 0;
+
+ virtual ~NotifierInterface() {}
+};
+
+// Base class for sources. A MediaStreamTrack have an underlying source that
+// provide media. A source can be shared with multiple tracks.
+// TODO(perkj): Implement sources for local and remote audio tracks and
+// remote video tracks.
+class MediaSourceInterface : public talk_base::RefCountInterface,
+ public NotifierInterface {
+ public:
+ enum SourceState {
+ kInitializing,
+ kLive,
+ kEnded,
+ kMuted
+ };
+
+ virtual SourceState state() const = 0;
+
+ protected:
+ virtual ~MediaSourceInterface() {}
+};
+
+// Information about a track.
+class MediaStreamTrackInterface : public talk_base::RefCountInterface,
+ public NotifierInterface {
+ public:
+ enum TrackState {
+ kInitializing, // Track is beeing negotiated.
+ kLive = 1, // Track alive
+ kEnded = 2, // Track have ended
+ kFailed = 3, // Track negotiation failed.
+ };
+
+ virtual std::string kind() const = 0;
+ virtual std::string id() const = 0;
+ virtual bool enabled() const = 0;
+ virtual TrackState state() const = 0;
+ virtual bool set_enabled(bool enable) = 0;
+ // These methods should be called by implementation only.
+ virtual bool set_state(TrackState new_state) = 0;
+};
+
+// Interface for rendering VideoFrames from a VideoTrack
+class VideoRendererInterface {
+ public:
+ virtual void SetSize(int width, int height) = 0;
+ virtual void RenderFrame(const cricket::VideoFrame* frame) = 0;
+
+ protected:
+ // The destructor is protected to prevent deletion via the interface.
+ // This is so that we allow reference counted classes, where the destructor
+ // should never be public, to implement the interface.
+ virtual ~VideoRendererInterface() {}
+};
+
+class VideoSourceInterface;
+
+class VideoTrackInterface : public MediaStreamTrackInterface {
+ public:
+ // Register a renderer that will render all frames received on this track.
+ virtual void AddRenderer(VideoRendererInterface* renderer) = 0;
+ // Deregister a renderer.
+ virtual void RemoveRenderer(VideoRendererInterface* renderer) = 0;
+
+ // Gets a pointer to the frame input of this VideoTrack.
+ // The pointer is valid for the lifetime of this VideoTrack.
+ // VideoFrames rendered to the cricket::VideoRenderer will be rendered on all
+ // registered renderers.
+ virtual cricket::VideoRenderer* FrameInput() = 0;
+
+ virtual VideoSourceInterface* GetSource() const = 0;
+
+ protected:
+ virtual ~VideoTrackInterface() {}
+};
+
+// AudioSourceInterface is a reference counted source used for AudioTracks.
+// The same source can be used in multiple AudioTracks.
+// TODO(perkj): Extend this class with necessary methods to allow separate
+// sources for each audio track.
+class AudioSourceInterface : public MediaSourceInterface {
+};
+
+class AudioTrackInterface : public MediaStreamTrackInterface {
+ public:
+ // TODO(xians): Figure out if the following interface should be const or not.
+ virtual AudioSourceInterface* GetSource() const = 0;
+
+ // Gets a pointer to the frame input of this AudioTrack.
+ // The pointer is valid for the lifetime of this AudioTrack.
+ // TODO(xians): Make the following interface pure virtual once Chrome has its
+ // implementation.
+ virtual cricket::AudioRenderer* FrameInput() { return NULL; }
+
+ protected:
+ virtual ~AudioTrackInterface() {}
+};
+
+typedef std::vector<talk_base::scoped_refptr<AudioTrackInterface> >
+ AudioTrackVector;
+typedef std::vector<talk_base::scoped_refptr<VideoTrackInterface> >
+ VideoTrackVector;
+
+class MediaStreamInterface : public talk_base::RefCountInterface,
+ public NotifierInterface {
+ public:
+ virtual std::string label() const = 0;
+
+ virtual AudioTrackVector GetAudioTracks() = 0;
+ virtual VideoTrackVector GetVideoTracks() = 0;
+ virtual talk_base::scoped_refptr<AudioTrackInterface>
+ FindAudioTrack(const std::string& track_id) = 0;
+ virtual talk_base::scoped_refptr<VideoTrackInterface>
+ FindVideoTrack(const std::string& track_id) = 0;
+
+ virtual bool AddTrack(AudioTrackInterface* track) = 0;
+ virtual bool AddTrack(VideoTrackInterface* track) = 0;
+ virtual bool RemoveTrack(AudioTrackInterface* track) = 0;
+ virtual bool RemoveTrack(VideoTrackInterface* track) = 0;
+
+ protected:
+ virtual ~MediaStreamInterface() {}
+};
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_MEDIASTREAMINTERFACE_H_
diff --git a/talk/app/webrtc/mediastreamprovider.h b/talk/app/webrtc/mediastreamprovider.h
new file mode 100644
index 0000000..4c77fd0
--- /dev/null
+++ b/talk/app/webrtc/mediastreamprovider.h
@@ -0,0 +1,81 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_APP_WEBRTC_MEDIASTREAMPROVIDER_H_
+#define TALK_APP_WEBRTC_MEDIASTREAMPROVIDER_H_
+
+namespace cricket {
+
+class AudioRenderer;
+class VideoCapturer;
+class VideoRenderer;
+struct AudioOptions;
+struct VideoOptions;
+
+} // namespace cricket
+
+namespace webrtc {
+
+// This interface is called by AudioTrackHandler classes in mediastreamhandler.h
+// to change the settings of an audio track connected to certain PeerConnection.
+class AudioProviderInterface {
+ public:
+ // Enable/disable the audio playout of a remote audio track with |ssrc|.
+ virtual void SetAudioPlayout(uint32 ssrc, bool enable) = 0;
+ // Enable/disable sending audio on the local audio track with |ssrc|.
+ // When |enable| is true |options| should be applied to the audio track.
+ virtual void SetAudioSend(uint32 ssrc, bool enable,
+ const cricket::AudioOptions& options) = 0;
+ // Sets the renderer to be used for the specified |ssrc|.
+ virtual bool SetAudioRenderer(uint32 ssrc,
+ cricket::AudioRenderer* renderer) = 0;
+
+ protected:
+ virtual ~AudioProviderInterface() {}
+};
+
+// This interface is called by VideoTrackHandler classes in mediastreamhandler.h
+// to change the settings of a video track connected to a certain
+// PeerConnection.
+class VideoProviderInterface {
+ public:
+ virtual bool SetCaptureDevice(uint32 ssrc,
+ cricket::VideoCapturer* camera) = 0;
+ // Enable/disable the video playout of a remote video track with |ssrc|.
+ virtual void SetVideoPlayout(uint32 ssrc, bool enable,
+ cricket::VideoRenderer* renderer) = 0;
+ // Enable sending video on the local video track with |ssrc|.
+ virtual void SetVideoSend(uint32 ssrc, bool enable,
+ const cricket::VideoOptions* options) = 0;
+
+ protected:
+ virtual ~VideoProviderInterface() {}
+};
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_MEDIASTREAMPROVIDER_H_
diff --git a/talk/app/webrtc/mediastreamproxy.h b/talk/app/webrtc/mediastreamproxy.h
new file mode 100644
index 0000000..7d018d5
--- /dev/null
+++ b/talk/app/webrtc/mediastreamproxy.h
@@ -0,0 +1,54 @@
+/*
+ * libjingle
+ * Copyright 2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_APP_WEBRTC_MEDIASTREAMPROXY_H_
+#define TALK_APP_WEBRTC_MEDIASTREAMPROXY_H_
+
+#include "talk/app/webrtc/mediastreaminterface.h"
+#include "talk/app/webrtc/proxy.h"
+
+namespace webrtc {
+
+BEGIN_PROXY_MAP(MediaStream)
+ PROXY_CONSTMETHOD0(std::string, label)
+ PROXY_METHOD0(AudioTrackVector, GetAudioTracks)
+ PROXY_METHOD0(VideoTrackVector, GetVideoTracks)
+ PROXY_METHOD1(talk_base::scoped_refptr<AudioTrackInterface>,
+ FindAudioTrack, const std::string&)
+ PROXY_METHOD1(talk_base::scoped_refptr<VideoTrackInterface>,
+ FindVideoTrack, const std::string&)
+ PROXY_METHOD1(bool, AddTrack, AudioTrackInterface*)
+ PROXY_METHOD1(bool, AddTrack, VideoTrackInterface*)
+ PROXY_METHOD1(bool, RemoveTrack, AudioTrackInterface*)
+ PROXY_METHOD1(bool, RemoveTrack, VideoTrackInterface*)
+ PROXY_METHOD1(void, RegisterObserver, ObserverInterface*)
+ PROXY_METHOD1(void, UnregisterObserver, ObserverInterface*)
+END_PROXY()
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_MEDIASTREAMPROXY_H_
diff --git a/talk/app/webrtc/mediastreamsignaling.cc b/talk/app/webrtc/mediastreamsignaling.cc
new file mode 100644
index 0000000..1397a7f
--- /dev/null
+++ b/talk/app/webrtc/mediastreamsignaling.cc
@@ -0,0 +1,883 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/mediastreamsignaling.h"
+
+#include <vector>
+
+#include "talk/app/webrtc/audiotrack.h"
+#include "talk/app/webrtc/mediastreamproxy.h"
+#include "talk/app/webrtc/mediaconstraintsinterface.h"
+#include "talk/app/webrtc/mediastreamtrackproxy.h"
+#include "talk/app/webrtc/videotrack.h"
+
+static const char kDefaultStreamLabel[] = "default";
+static const char kDefaultAudioTrackLabel[] = "defaulta0";
+static const char kDefaultVideoTrackLabel[] = "defaultv0";
+
+namespace webrtc {
+
+using talk_base::scoped_ptr;
+using talk_base::scoped_refptr;
+
+// Supported MediaConstraints.
+const char MediaConstraintsInterface::kOfferToReceiveAudio[] =
+ "OfferToReceiveAudio";
+const char MediaConstraintsInterface::kOfferToReceiveVideo[] =
+ "OfferToReceiveVideo";
+const char MediaConstraintsInterface::kIceRestart[] =
+ "IceRestart";
+const char MediaConstraintsInterface::kUseRtpMux[] =
+ "googUseRtpMUX";
+const char MediaConstraintsInterface::kVoiceActivityDetection[] =
+ "VoiceActivityDetection";
+
+static bool ParseConstraints(
+ const MediaConstraintsInterface* constraints,
+ cricket::MediaSessionOptions* options, bool is_answer) {
+ bool value;
+ size_t mandatory_constraints_satisfied = 0;
+
+ if (FindConstraint(constraints,
+ MediaConstraintsInterface::kOfferToReceiveAudio,
+ &value, &mandatory_constraints_satisfied)) {
+ // |options-|has_audio| can only change from false to
+ // true, but never change from true to false. This is to make sure
+ // CreateOffer / CreateAnswer doesn't remove a media content
+ // description that has been created.
+ options->has_audio |= value;
+ } else {
+ // kOfferToReceiveAudio defaults to true according to spec.
+ options->has_audio = true;
+ }
+
+ if (FindConstraint(constraints,
+ MediaConstraintsInterface::kOfferToReceiveVideo,
+ &value, &mandatory_constraints_satisfied)) {
+ // |options->has_video| can only change from false to
+ // true, but never change from true to false. This is to make sure
+ // CreateOffer / CreateAnswer doesn't remove a media content
+ // description that has been created.
+ options->has_video |= value;
+ } else {
+ // kOfferToReceiveVideo defaults to false according to spec. But
+ // if it is an answer and video is offered, we should still accept video
+ // per default.
+ options->has_video |= is_answer;
+ }
+
+ if (FindConstraint(constraints,
+ MediaConstraintsInterface::kVoiceActivityDetection,
+ &value, &mandatory_constraints_satisfied)) {
+ options->vad_enabled = value;
+ }
+
+ if (FindConstraint(constraints,
+ MediaConstraintsInterface::kUseRtpMux,
+ &value, &mandatory_constraints_satisfied)) {
+ options->bundle_enabled = value;
+ } else {
+ // kUseRtpMux defaults to true according to spec.
+ options->bundle_enabled = true;
+ }
+ if (FindConstraint(constraints,
+ MediaConstraintsInterface::kIceRestart,
+ &value, &mandatory_constraints_satisfied)) {
+ options->transport_options.ice_restart = value;
+ } else {
+ // kIceRestart defaults to false according to spec.
+ options->transport_options.ice_restart = false;
+ }
+
+ if (!constraints) {
+ return true;
+ }
+ return mandatory_constraints_satisfied == constraints->GetMandatory().size();
+}
+
+// Returns true if if at least one media content is present and
+// |options.bundle_enabled| is true.
+// Bundle will be enabled by default if at least one media content is present
+// and the constraint kUseRtpMux has not disabled bundle.
+static bool EvaluateNeedForBundle(const cricket::MediaSessionOptions& options) {
+ return options.bundle_enabled &&
+ (options.has_audio || options.has_video || options.has_data());
+}
+
+// Factory class for creating remote MediaStreams and MediaStreamTracks.
+class RemoteMediaStreamFactory {
+ public:
+ explicit RemoteMediaStreamFactory(talk_base::Thread* signaling_thread)
+ : signaling_thread_(signaling_thread) {
+ }
+
+ talk_base::scoped_refptr<MediaStreamInterface> CreateMediaStream(
+ const std::string& stream_label) {
+ return MediaStreamProxy::Create(
+ signaling_thread_, MediaStream::Create(stream_label));
+ }
+
+ AudioTrackInterface* AddAudioTrack(webrtc::MediaStreamInterface* stream,
+ const std::string& track_id) {
+ return AddTrack<AudioTrackInterface, AudioTrack, AudioTrackProxy>(stream,
+ track_id);
+ }
+
+ VideoTrackInterface* AddVideoTrack(webrtc::MediaStreamInterface* stream,
+ const std::string& track_id) {
+ return AddTrack<VideoTrackInterface, VideoTrack, VideoTrackProxy>(stream,
+ track_id);
+ }
+
+ private:
+ template <typename TI, typename T, typename TP>
+ TI* AddTrack(MediaStreamInterface* stream, const std::string& track_id) {
+ talk_base::scoped_refptr<TI> track(
+ TP::Create(signaling_thread_, T::Create(track_id, NULL)));
+ track->set_state(webrtc::MediaStreamTrackInterface::kLive);
+ if (stream->AddTrack(track)) {
+ return track;
+ }
+ return NULL;
+ }
+
+ talk_base::Thread* signaling_thread_;
+};
+
+MediaStreamSignaling::MediaStreamSignaling(
+ talk_base::Thread* signaling_thread,
+ MediaStreamSignalingObserver* stream_observer)
+ : signaling_thread_(signaling_thread),
+ data_channel_factory_(NULL),
+ stream_observer_(stream_observer),
+ local_streams_(StreamCollection::Create()),
+ remote_streams_(StreamCollection::Create()),
+ remote_stream_factory_(new RemoteMediaStreamFactory(signaling_thread)),
+ last_allocated_sctp_id_(0) {
+ options_.has_video = false;
+ options_.has_audio = false;
+}
+
+MediaStreamSignaling::~MediaStreamSignaling() {
+}
+
+void MediaStreamSignaling::TearDown() {
+ OnAudioChannelClose();
+ OnVideoChannelClose();
+ OnDataChannelClose();
+}
+
+bool MediaStreamSignaling::IsSctpIdAvailable(int id) const {
+ if (id < 0 || id > static_cast<int>(cricket::kMaxSctpSid))
+ return false;
+ for (DataChannels::const_iterator iter = data_channels_.begin();
+ iter != data_channels_.end();
+ ++iter) {
+ if (iter->second->id() == id) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// Gets the first id that has not been taken by existing data
+// channels. Starting from 1.
+// Returns false if no id can be allocated.
+// TODO(jiayl): Update to some kind of even/odd random number selection when the
+// rules are fully standardized.
+bool MediaStreamSignaling::AllocateSctpId(int* id) {
+ do {
+ last_allocated_sctp_id_++;
+ } while (last_allocated_sctp_id_ <= static_cast<int>(cricket::kMaxSctpSid) &&
+ !IsSctpIdAvailable(last_allocated_sctp_id_));
+
+ if (last_allocated_sctp_id_ > static_cast<int>(cricket::kMaxSctpSid)) {
+ last_allocated_sctp_id_ = cricket::kMaxSctpSid;
+ return false;
+ }
+
+ *id = last_allocated_sctp_id_;
+ return true;
+}
+
+bool MediaStreamSignaling::AddDataChannel(DataChannel* data_channel) {
+ ASSERT(data_channel != NULL);
+ if (data_channels_.find(data_channel->label()) != data_channels_.end()) {
+ LOG(LS_ERROR) << "DataChannel with label " << data_channel->label()
+ << " already exists.";
+ return false;
+ }
+ data_channels_[data_channel->label()] = data_channel;
+ return true;
+}
+
+bool MediaStreamSignaling::AddLocalStream(MediaStreamInterface* local_stream) {
+ if (local_streams_->find(local_stream->label()) != NULL) {
+ LOG(LS_WARNING) << "MediaStream with label " << local_stream->label()
+ << "already exist.";
+ return false;
+ }
+ local_streams_->AddStream(local_stream);
+
+ // Find tracks that has already been configured in SDP. This can occur if a
+ // local session description that contains the MSID of these tracks is set
+ // before AddLocalStream is called. It can also occur if the local session
+ // description is not changed and RemoveLocalStream
+ // is called and later AddLocalStream is called again with the same stream.
+ AudioTrackVector audio_tracks = local_stream->GetAudioTracks();
+ for (AudioTrackVector::const_iterator it = audio_tracks.begin();
+ it != audio_tracks.end(); ++it) {
+ TrackInfos::const_iterator track_info_it =
+ local_audio_tracks_.find((*it)->id());
+ if (track_info_it != local_audio_tracks_.end()) {
+ const TrackInfo& info = track_info_it->second;
+ OnLocalTrackSeen(info.stream_label, info.track_id, info.ssrc,
+ cricket::MEDIA_TYPE_AUDIO);
+ }
+ }
+
+ VideoTrackVector video_tracks = local_stream->GetVideoTracks();
+ for (VideoTrackVector::const_iterator it = video_tracks.begin();
+ it != video_tracks.end(); ++it) {
+ TrackInfos::const_iterator track_info_it =
+ local_video_tracks_.find((*it)->id());
+ if (track_info_it != local_video_tracks_.end()) {
+ const TrackInfo& info = track_info_it->second;
+ OnLocalTrackSeen(info.stream_label, info.track_id, info.ssrc,
+ cricket::MEDIA_TYPE_VIDEO);
+ }
+ }
+ return true;
+}
+
+void MediaStreamSignaling::RemoveLocalStream(
+ MediaStreamInterface* local_stream) {
+ local_streams_->RemoveStream(local_stream);
+ stream_observer_->OnRemoveLocalStream(local_stream);
+}
+
+bool MediaStreamSignaling::GetOptionsForOffer(
+ const MediaConstraintsInterface* constraints,
+ cricket::MediaSessionOptions* options) {
+ UpdateSessionOptions();
+ if (!ParseConstraints(constraints, &options_, false)) {
+ return false;
+ }
+ options_.bundle_enabled = EvaluateNeedForBundle(options_);
+ *options = options_;
+ return true;
+}
+
+bool MediaStreamSignaling::GetOptionsForAnswer(
+ const MediaConstraintsInterface* constraints,
+ cricket::MediaSessionOptions* options) {
+ UpdateSessionOptions();
+
+ // Copy the |options_| to not let the flag MediaSessionOptions::has_audio and
+ // MediaSessionOptions::has_video affect subsequent offers.
+ cricket::MediaSessionOptions current_options = options_;
+ if (!ParseConstraints(constraints, ¤t_options, true)) {
+ return false;
+ }
+ current_options.bundle_enabled = EvaluateNeedForBundle(current_options);
+ *options = current_options;
+ return true;
+}
+
+// Updates or creates remote MediaStream objects given a
+// remote SessionDesription.
+// If the remote SessionDesription contains new remote MediaStreams
+// the observer OnAddStream method is called. If a remote MediaStream is missing
+// from the remote SessionDescription OnRemoveStream is called.
+void MediaStreamSignaling::OnRemoteDescriptionChanged(
+ const SessionDescriptionInterface* desc) {
+ const cricket::SessionDescription* remote_desc = desc->description();
+ talk_base::scoped_refptr<StreamCollection> new_streams(
+ StreamCollection::Create());
+
+ // Find all audio rtp streams and create corresponding remote AudioTracks
+ // and MediaStreams.
+ const cricket::ContentInfo* audio_content = GetFirstAudioContent(remote_desc);
+ if (audio_content) {
+ const cricket::AudioContentDescription* desc =
+ static_cast<const cricket::AudioContentDescription*>(
+ audio_content->description);
+ UpdateRemoteStreamsList(desc->streams(), desc->type(), new_streams);
+ remote_info_.default_audio_track_needed =
+ desc->direction() == cricket::MD_SENDRECV && desc->streams().empty();
+ }
+
+ // Find all video rtp streams and create corresponding remote VideoTracks
+ // and MediaStreams.
+ const cricket::ContentInfo* video_content = GetFirstVideoContent(remote_desc);
+ if (video_content) {
+ const cricket::VideoContentDescription* desc =
+ static_cast<const cricket::VideoContentDescription*>(
+ video_content->description);
+ UpdateRemoteStreamsList(desc->streams(), desc->type(), new_streams);
+ remote_info_.default_video_track_needed =
+ desc->direction() == cricket::MD_SENDRECV && desc->streams().empty();
+ }
+
+ // Update the DataChannels with the information from the remote peer.
+ const cricket::ContentInfo* data_content = GetFirstDataContent(remote_desc);
+ if (data_content) {
+ const cricket::DataContentDescription* data_desc =
+ static_cast<const cricket::DataContentDescription*>(
+ data_content->description);
+ if (data_desc->protocol() == cricket::kMediaProtocolDtlsSctp) {
+ UpdateRemoteSctpDataChannels();
+ } else {
+ UpdateRemoteRtpDataChannels(data_desc->streams());
+ }
+ }
+
+ // Iterate new_streams and notify the observer about new MediaStreams.
+ for (size_t i = 0; i < new_streams->count(); ++i) {
+ MediaStreamInterface* new_stream = new_streams->at(i);
+ stream_observer_->OnAddRemoteStream(new_stream);
+ }
+
+ // Find removed MediaStreams.
+ if (remote_info_.IsDefaultMediaStreamNeeded() &&
+ remote_streams_->find(kDefaultStreamLabel) != NULL) {
+ // The default media stream already exists. No need to do anything.
+ } else {
+ UpdateEndedRemoteMediaStreams();
+ remote_info_.msid_supported |= remote_streams_->count() > 0;
+ }
+ MaybeCreateDefaultStream();
+}
+
+void MediaStreamSignaling::OnLocalDescriptionChanged(
+ const SessionDescriptionInterface* desc) {
+ const cricket::ContentInfo* audio_content =
+ GetFirstAudioContent(desc->description());
+ if (audio_content) {
+ if (audio_content->rejected) {
+ RejectRemoteTracks(cricket::MEDIA_TYPE_AUDIO);
+ }
+ const cricket::AudioContentDescription* audio_desc =
+ static_cast<const cricket::AudioContentDescription*>(
+ audio_content->description);
+ UpdateLocalTracks(audio_desc->streams(), audio_desc->type());
+ }
+
+ const cricket::ContentInfo* video_content =
+ GetFirstVideoContent(desc->description());
+ if (video_content) {
+ if (video_content->rejected) {
+ RejectRemoteTracks(cricket::MEDIA_TYPE_VIDEO);
+ }
+ const cricket::VideoContentDescription* video_desc =
+ static_cast<const cricket::VideoContentDescription*>(
+ video_content->description);
+ UpdateLocalTracks(video_desc->streams(), video_desc->type());
+ }
+
+ const cricket::ContentInfo* data_content =
+ GetFirstDataContent(desc->description());
+ if (data_content) {
+ const cricket::DataContentDescription* data_desc =
+ static_cast<const cricket::DataContentDescription*>(
+ data_content->description);
+ if (data_desc->protocol() == cricket::kMediaProtocolDtlsSctp) {
+ UpdateLocalSctpDataChannels();
+ } else {
+ UpdateLocalRtpDataChannels(data_desc->streams());
+ }
+ }
+}
+
+void MediaStreamSignaling::OnAudioChannelClose() {
+ RejectRemoteTracks(cricket::MEDIA_TYPE_AUDIO);
+}
+
+void MediaStreamSignaling::OnVideoChannelClose() {
+ RejectRemoteTracks(cricket::MEDIA_TYPE_VIDEO);
+}
+
+void MediaStreamSignaling::OnDataChannelClose() {
+ DataChannels::iterator it = data_channels_.begin();
+ for (; it != data_channels_.end(); ++it) {
+ DataChannel* data_channel = it->second;
+ data_channel->OnDataEngineClose();
+ }
+}
+
+bool MediaStreamSignaling::GetRemoteAudioTrackSsrc(
+ const std::string& track_id, uint32* ssrc) const {
+ TrackInfos::const_iterator it = remote_audio_tracks_.find(track_id);
+ if (it == remote_audio_tracks_.end()) {
+ return false;
+ }
+
+ *ssrc = it->second.ssrc;
+ return true;
+}
+
+bool MediaStreamSignaling::GetRemoteVideoTrackSsrc(
+ const std::string& track_id, uint32* ssrc) const {
+ TrackInfos::const_iterator it = remote_video_tracks_.find(track_id);
+ if (it == remote_video_tracks_.end()) {
+ return false;
+ }
+
+ *ssrc = it->second.ssrc;
+ return true;
+}
+
+void MediaStreamSignaling::UpdateSessionOptions() {
+ options_.streams.clear();
+ if (local_streams_ != NULL) {
+ for (size_t i = 0; i < local_streams_->count(); ++i) {
+ MediaStreamInterface* stream = local_streams_->at(i);
+
+ AudioTrackVector audio_tracks(stream->GetAudioTracks());
+ if (!audio_tracks.empty()) {
+ options_.has_audio = true;
+ }
+
+ // For each audio track in the stream, add it to the MediaSessionOptions.
+ for (size_t j = 0; j < audio_tracks.size(); ++j) {
+ scoped_refptr<MediaStreamTrackInterface> track(audio_tracks[j]);
+ options_.AddStream(cricket::MEDIA_TYPE_AUDIO, track->id(),
+ stream->label());
+ }
+
+ VideoTrackVector video_tracks(stream->GetVideoTracks());
+ if (!video_tracks.empty()) {
+ options_.has_video = true;
+ }
+ // For each video track in the stream, add it to the MediaSessionOptions.
+ for (size_t j = 0; j < video_tracks.size(); ++j) {
+ scoped_refptr<MediaStreamTrackInterface> track(video_tracks[j]);
+ options_.AddStream(cricket::MEDIA_TYPE_VIDEO, track->id(),
+ stream->label());
+ }
+ }
+ }
+
+ // Check for data channels.
+ DataChannels::const_iterator data_channel_it = data_channels_.begin();
+ for (; data_channel_it != data_channels_.end(); ++data_channel_it) {
+ const DataChannel* channel = data_channel_it->second;
+ if (channel->state() == DataChannel::kConnecting ||
+ channel->state() == DataChannel::kOpen) {
+ // |streamid| and |sync_label| are both set to the DataChannel label
+ // here so they can be signaled the same way as MediaStreams and Tracks.
+ // For MediaStreams, the sync_label is the MediaStream label and the
+ // track label is the same as |streamid|.
+ const std::string& streamid = channel->label();
+ const std::string& sync_label = channel->label();
+ options_.AddStream(cricket::MEDIA_TYPE_DATA, streamid, sync_label);
+ }
+ }
+}
+
+void MediaStreamSignaling::UpdateRemoteStreamsList(
+ const cricket::StreamParamsVec& streams,
+ cricket::MediaType media_type,
+ StreamCollection* new_streams) {
+ TrackInfos* current_tracks = GetRemoteTracks(media_type);
+
+ // Find removed tracks. Ie tracks where the track id or ssrc don't match the
+ // new StreamParam.
+ TrackInfos::iterator track_it = current_tracks->begin();
+ while (track_it != current_tracks->end()) {
+ TrackInfo info = track_it->second;
+ cricket::StreamParams params;
+ if (!cricket::GetStreamBySsrc(streams, info.ssrc, ¶ms) ||
+ params.id != info.track_id) {
+ OnRemoteTrackRemoved(info.stream_label, info.track_id, media_type);
+ current_tracks->erase(track_it++);
+ } else {
+ ++track_it;
+ }
+ }
+
+ // Find new and active tracks.
+ for (cricket::StreamParamsVec::const_iterator it = streams.begin();
+ it != streams.end(); ++it) {
+ // The sync_label is the MediaStream label and the |stream.id| is the
+ // track id.
+ const std::string& stream_label = it->sync_label;
+ const std::string& track_id = it->id;
+ uint32 ssrc = it->first_ssrc();
+
+ talk_base::scoped_refptr<MediaStreamInterface> stream =
+ remote_streams_->find(stream_label);
+ if (!stream) {
+ // This is a new MediaStream. Create a new remote MediaStream.
+ stream = remote_stream_factory_->CreateMediaStream(stream_label);
+ remote_streams_->AddStream(stream);
+ new_streams->AddStream(stream);
+ }
+
+ TrackInfos::iterator track_it = current_tracks->find(track_id);
+ if (track_it == current_tracks->end()) {
+ (*current_tracks)[track_id] =
+ TrackInfo(stream_label, track_id, ssrc);
+ OnRemoteTrackSeen(stream_label, track_id, it->first_ssrc(), media_type);
+ }
+ }
+}
+
+void MediaStreamSignaling::OnRemoteTrackSeen(const std::string& stream_label,
+ const std::string& track_id,
+ uint32 ssrc,
+ cricket::MediaType media_type) {
+ MediaStreamInterface* stream = remote_streams_->find(stream_label);
+
+ if (media_type == cricket::MEDIA_TYPE_AUDIO) {
+ AudioTrackInterface* audio_track =
+ remote_stream_factory_->AddAudioTrack(stream, track_id);
+ stream_observer_->OnAddRemoteAudioTrack(stream, audio_track, ssrc);
+ } else if (media_type == cricket::MEDIA_TYPE_VIDEO) {
+ VideoTrackInterface* video_track =
+ remote_stream_factory_->AddVideoTrack(stream, track_id);
+ stream_observer_->OnAddRemoteVideoTrack(stream, video_track, ssrc);
+ } else {
+ ASSERT(false && "Invalid media type");
+ }
+}
+
+void MediaStreamSignaling::OnRemoteTrackRemoved(
+ const std::string& stream_label,
+ const std::string& track_id,
+ cricket::MediaType media_type) {
+ MediaStreamInterface* stream = remote_streams_->find(stream_label);
+
+ if (media_type == cricket::MEDIA_TYPE_AUDIO) {
+ talk_base::scoped_refptr<AudioTrackInterface> audio_track =
+ stream->FindAudioTrack(track_id);
+ audio_track->set_state(webrtc::MediaStreamTrackInterface::kEnded);
+ stream->RemoveTrack(audio_track);
+ stream_observer_->OnRemoveRemoteAudioTrack(stream, audio_track);
+ } else if (media_type == cricket::MEDIA_TYPE_VIDEO) {
+ talk_base::scoped_refptr<VideoTrackInterface> video_track =
+ stream->FindVideoTrack(track_id);
+ video_track->set_state(webrtc::MediaStreamTrackInterface::kEnded);
+ stream->RemoveTrack(video_track);
+ stream_observer_->OnRemoveRemoteVideoTrack(stream, video_track);
+ } else {
+ ASSERT(false && "Invalid media type");
+ }
+}
+
+void MediaStreamSignaling::RejectRemoteTracks(cricket::MediaType media_type) {
+ TrackInfos* current_tracks = GetRemoteTracks(media_type);
+ for (TrackInfos::iterator track_it = current_tracks->begin();
+ track_it != current_tracks->end(); ++track_it) {
+ TrackInfo info = track_it->second;
+ MediaStreamInterface* stream = remote_streams_->find(info.stream_label);
+ if (media_type == cricket::MEDIA_TYPE_AUDIO) {
+ AudioTrackInterface* track = stream->FindAudioTrack(info.track_id);
+ track->set_state(webrtc::MediaStreamTrackInterface::kEnded);
+ }
+ if (media_type == cricket::MEDIA_TYPE_VIDEO) {
+ VideoTrackInterface* track = stream->FindVideoTrack(info.track_id);
+ track->set_state(webrtc::MediaStreamTrackInterface::kEnded);
+ }
+ }
+}
+
+void MediaStreamSignaling::UpdateEndedRemoteMediaStreams() {
+ std::vector<scoped_refptr<MediaStreamInterface> > streams_to_remove;
+ for (size_t i = 0; i < remote_streams_->count(); ++i) {
+ MediaStreamInterface*stream = remote_streams_->at(i);
+ if (stream->GetAudioTracks().empty() && stream->GetVideoTracks().empty()) {
+ streams_to_remove.push_back(stream);
+ }
+ }
+
+ std::vector<scoped_refptr<MediaStreamInterface> >::const_iterator it;
+ for (it = streams_to_remove.begin(); it != streams_to_remove.end(); ++it) {
+ remote_streams_->RemoveStream(*it);
+ stream_observer_->OnRemoveRemoteStream(*it);
+ }
+}
+
+void MediaStreamSignaling::MaybeCreateDefaultStream() {
+ if (!remote_info_.IsDefaultMediaStreamNeeded())
+ return;
+
+ bool default_created = false;
+
+ scoped_refptr<MediaStreamInterface> default_remote_stream =
+ remote_streams_->find(kDefaultStreamLabel);
+ if (default_remote_stream == NULL) {
+ default_created = true;
+ default_remote_stream =
+ remote_stream_factory_->CreateMediaStream(kDefaultStreamLabel);
+ remote_streams_->AddStream(default_remote_stream);
+ }
+ if (remote_info_.default_audio_track_needed &&
+ default_remote_stream->GetAudioTracks().size() == 0) {
+ remote_audio_tracks_[kDefaultAudioTrackLabel] =
+ TrackInfo(kDefaultStreamLabel, kDefaultAudioTrackLabel, 0);
+ OnRemoteTrackSeen(kDefaultStreamLabel, kDefaultAudioTrackLabel, 0,
+ cricket::MEDIA_TYPE_AUDIO);
+ }
+ if (remote_info_.default_video_track_needed &&
+ default_remote_stream->GetVideoTracks().size() == 0) {
+ remote_video_tracks_[kDefaultVideoTrackLabel] =
+ TrackInfo(kDefaultStreamLabel, kDefaultVideoTrackLabel, 0);
+ OnRemoteTrackSeen(kDefaultStreamLabel, kDefaultVideoTrackLabel, 0,
+ cricket::MEDIA_TYPE_VIDEO);
+ }
+ if (default_created) {
+ stream_observer_->OnAddRemoteStream(default_remote_stream);
+ }
+}
+
+MediaStreamSignaling::TrackInfos* MediaStreamSignaling::GetRemoteTracks(
+ cricket::MediaType type) {
+ if (type == cricket::MEDIA_TYPE_AUDIO)
+ return &remote_audio_tracks_;
+ else if (type == cricket::MEDIA_TYPE_VIDEO)
+ return &remote_video_tracks_;
+ ASSERT(false && "Unknown MediaType");
+ return NULL;
+}
+
+MediaStreamSignaling::TrackInfos* MediaStreamSignaling::GetLocalTracks(
+ cricket::MediaType media_type) {
+ ASSERT(media_type == cricket::MEDIA_TYPE_AUDIO ||
+ media_type == cricket::MEDIA_TYPE_VIDEO);
+
+ return (media_type == cricket::MEDIA_TYPE_AUDIO) ?
+ &local_audio_tracks_ : &local_video_tracks_;
+}
+
+void MediaStreamSignaling::UpdateLocalTracks(
+ const std::vector<cricket::StreamParams>& streams,
+ cricket::MediaType media_type) {
+ TrackInfos* current_tracks = GetLocalTracks(media_type);
+
+ // Find removed tracks. Ie tracks where the track id or ssrc don't match the
+ // new StreamParam.
+ TrackInfos::iterator track_it = current_tracks->begin();
+ while (track_it != current_tracks->end()) {
+ TrackInfo info = track_it->second;
+ cricket::StreamParams params;
+ if (!cricket::GetStreamBySsrc(streams, info.ssrc, ¶ms) ||
+ params.id != info.track_id) {
+ OnLocalTrackRemoved(info.stream_label, info.track_id, media_type);
+ current_tracks->erase(track_it++);
+ } else {
+ ++track_it;
+ }
+ }
+
+ // Find new and active tracks.
+ for (cricket::StreamParamsVec::const_iterator it = streams.begin();
+ it != streams.end(); ++it) {
+ // The sync_label is the MediaStream label and the |stream.id| is the
+ // track id.
+ const std::string& stream_label = it->sync_label;
+ const std::string& track_id = it->id;
+ uint32 ssrc = it->first_ssrc();
+ TrackInfos::iterator track_it = current_tracks->find(track_id);
+ if (track_it == current_tracks->end()) {
+ (*current_tracks)[track_id] =
+ TrackInfo(stream_label, track_id, ssrc);
+ OnLocalTrackSeen(stream_label, track_id, it->first_ssrc(),
+ media_type);
+ }
+ }
+}
+
+void MediaStreamSignaling::OnLocalTrackSeen(
+ const std::string& stream_label,
+ const std::string& track_id,
+ uint32 ssrc,
+ cricket::MediaType media_type) {
+ MediaStreamInterface* stream = local_streams_->find(stream_label);
+ if (!stream) {
+ LOG(LS_WARNING) << "An unknown local MediaStream with label "
+ << stream_label << " has been configured.";
+ return;
+ }
+
+ if (media_type == cricket::MEDIA_TYPE_AUDIO) {
+ AudioTrackInterface* audio_track = stream->FindAudioTrack(track_id);
+ if (!audio_track) {
+ LOG(LS_WARNING) << "An unknown local AudioTrack with id , "
+ << track_id << " has been configured.";
+ return;
+ }
+ stream_observer_->OnAddLocalAudioTrack(stream, audio_track, ssrc);
+ } else if (media_type == cricket::MEDIA_TYPE_VIDEO) {
+ VideoTrackInterface* video_track = stream->FindVideoTrack(track_id);
+ if (!video_track) {
+ LOG(LS_WARNING) << "An unknown local VideoTrack with id , "
+ << track_id << " has been configured.";
+ return;
+ }
+ stream_observer_->OnAddLocalVideoTrack(stream, video_track, ssrc);
+ } else {
+ ASSERT(false && "Invalid media type");
+ }
+}
+
+void MediaStreamSignaling::OnLocalTrackRemoved(
+ const std::string& stream_label,
+ const std::string& track_id,
+ cricket::MediaType media_type) {
+ MediaStreamInterface* stream = local_streams_->find(stream_label);
+ if (!stream) {
+ // This is the normal case. Ie RemoveLocalStream has been called and the
+ // SessionDescriptions has been renegotiated.
+ return;
+ }
+ // A track has been removed from the SessionDescription but the MediaStream
+ // is still associated with MediaStreamSignaling. This only occurs if the SDP
+ // doesn't match with the calls to AddLocalStream and RemoveLocalStream.
+
+ if (media_type == cricket::MEDIA_TYPE_AUDIO) {
+ AudioTrackInterface* audio_track = stream->FindAudioTrack(track_id);
+ if (!audio_track) {
+ return;
+ }
+ stream_observer_->OnRemoveLocalAudioTrack(stream, audio_track);
+ } else if (media_type == cricket::MEDIA_TYPE_VIDEO) {
+ VideoTrackInterface* video_track = stream->FindVideoTrack(track_id);
+ if (!video_track) {
+ return;
+ }
+ stream_observer_->OnRemoveLocalVideoTrack(stream, video_track);
+ } else {
+ ASSERT(false && "Invalid media type.");
+ }
+}
+
+void MediaStreamSignaling::UpdateLocalRtpDataChannels(
+ const cricket::StreamParamsVec& streams) {
+ std::vector<std::string> existing_channels;
+
+ // Find new and active data channels.
+ for (cricket::StreamParamsVec::const_iterator it =streams.begin();
+ it != streams.end(); ++it) {
+ // |it->sync_label| is actually the data channel label. The reason is that
+ // we use the same naming of data channels as we do for
+ // MediaStreams and Tracks.
+ // For MediaStreams, the sync_label is the MediaStream label and the
+ // track label is the same as |streamid|.
+ const std::string& channel_label = it->sync_label;
+ DataChannels::iterator data_channel_it = data_channels_.find(channel_label);
+ if (!VERIFY(data_channel_it != data_channels_.end())) {
+ continue;
+ }
+ // Set the SSRC the data channel should use for sending.
+ data_channel_it->second->SetSendSsrc(it->first_ssrc());
+ existing_channels.push_back(data_channel_it->first);
+ }
+
+ UpdateClosingDataChannels(existing_channels, true);
+}
+
+void MediaStreamSignaling::UpdateRemoteRtpDataChannels(
+ const cricket::StreamParamsVec& streams) {
+ std::vector<std::string> existing_channels;
+
+ // Find new and active data channels.
+ for (cricket::StreamParamsVec::const_iterator it = streams.begin();
+ it != streams.end(); ++it) {
+ // The data channel label is either the mslabel or the SSRC if the mslabel
+ // does not exist. Ex a=ssrc:444330170 mslabel:test1.
+ std::string label = it->sync_label.empty() ?
+ talk_base::ToString(it->first_ssrc()) : it->sync_label;
+ DataChannels::iterator data_channel_it =
+ data_channels_.find(label);
+ if (data_channel_it == data_channels_.end()) {
+ // This is a new data channel.
+ CreateRemoteDataChannel(label, it->first_ssrc());
+ } else {
+ data_channel_it->second->SetReceiveSsrc(it->first_ssrc());
+ }
+ existing_channels.push_back(label);
+ }
+
+ UpdateClosingDataChannels(existing_channels, false);
+}
+
+void MediaStreamSignaling::UpdateClosingDataChannels(
+ const std::vector<std::string>& active_channels, bool is_local_update) {
+ DataChannels::iterator it = data_channels_.begin();
+ while (it != data_channels_.end()) {
+ DataChannel* data_channel = it->second;
+ if (std::find(active_channels.begin(), active_channels.end(),
+ data_channel->label()) != active_channels.end()) {
+ ++it;
+ continue;
+ }
+
+ if (is_local_update)
+ data_channel->SetSendSsrc(0);
+ else
+ data_channel->RemotePeerRequestClose();
+
+ if (data_channel->state() == DataChannel::kClosed) {
+ data_channels_.erase(it);
+ it = data_channels_.begin();
+ } else {
+ ++it;
+ }
+ }
+}
+
+void MediaStreamSignaling::CreateRemoteDataChannel(const std::string& label,
+ uint32 remote_ssrc) {
+ if (!data_channel_factory_) {
+ LOG(LS_WARNING) << "Remote peer requested a DataChannel but DataChannels "
+ << "are not supported.";
+ return;
+ }
+ scoped_refptr<DataChannel> channel(
+ data_channel_factory_->CreateDataChannel(label, NULL));
+ channel->SetReceiveSsrc(remote_ssrc);
+ stream_observer_->OnAddDataChannel(channel);
+}
+
+void MediaStreamSignaling::UpdateLocalSctpDataChannels() {
+ DataChannels::iterator it = data_channels_.begin();
+ for (; it != data_channels_.end(); ++it) {
+ DataChannel* data_channel = it->second;
+ data_channel->SetSendSsrc(data_channel->id());
+ }
+}
+
+void MediaStreamSignaling::UpdateRemoteSctpDataChannels() {
+ DataChannels::iterator it = data_channels_.begin();
+ for (; it != data_channels_.end(); ++it) {
+ DataChannel* data_channel = it->second;
+ data_channel->SetReceiveSsrc(data_channel->id());
+ }
+}
+
+} // namespace webrtc
diff --git a/talk/app/webrtc/mediastreamsignaling.h b/talk/app/webrtc/mediastreamsignaling.h
new file mode 100644
index 0000000..9ead8b0
--- /dev/null
+++ b/talk/app/webrtc/mediastreamsignaling.h
@@ -0,0 +1,385 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_APP_WEBRTC_MEDIASTREAMSIGNALING_H_
+#define TALK_APP_WEBRTC_MEDIASTREAMSIGNALING_H_
+
+#include <string>
+#include <vector>
+#include <map>
+
+#include "talk/app/webrtc/datachannel.h"
+#include "talk/app/webrtc/mediastream.h"
+#include "talk/app/webrtc/peerconnectioninterface.h"
+#include "talk/app/webrtc/streamcollection.h"
+#include "talk/base/scoped_ref_ptr.h"
+#include "talk/session/media/mediasession.h"
+
+namespace talk_base {
+class Thread;
+} // namespace talk_base
+
+namespace webrtc {
+
+class RemoteMediaStreamFactory;
+
+// A MediaStreamSignalingObserver is notified when events happen to
+// MediaStreams, MediaStreamTracks or DataChannels associated with the observed
+// MediaStreamSignaling object. The notifications identify the stream, track or
+// channel.
+class MediaStreamSignalingObserver {
+ public:
+ // Triggered when the remote SessionDescription has a new stream.
+ virtual void OnAddRemoteStream(MediaStreamInterface* stream) = 0;
+
+ // Triggered when the remote SessionDescription removes a stream.
+ virtual void OnRemoveRemoteStream(MediaStreamInterface* stream) = 0;
+
+ // Triggered when the remote SessionDescription has a new data channel.
+ virtual void OnAddDataChannel(DataChannelInterface* data_channel) = 0;
+
+ // Triggered when the remote SessionDescription has a new audio track.
+ virtual void OnAddRemoteAudioTrack(MediaStreamInterface* stream,
+ AudioTrackInterface* audio_track,
+ uint32 ssrc) = 0;
+
+ // Triggered when the remote SessionDescription has a new video track.
+ virtual void OnAddRemoteVideoTrack(MediaStreamInterface* stream,
+ VideoTrackInterface* video_track,
+ uint32 ssrc) = 0;
+
+ // Triggered when the remote SessionDescription has removed an audio track.
+ virtual void OnRemoveRemoteAudioTrack(MediaStreamInterface* stream,
+ AudioTrackInterface* audio_track) = 0;
+
+ // Triggered when the remote SessionDescription has removed a video track.
+ virtual void OnRemoveRemoteVideoTrack(MediaStreamInterface* stream,
+ VideoTrackInterface* video_track) = 0;
+
+ // Triggered when the local SessionDescription has a new audio track.
+ virtual void OnAddLocalAudioTrack(MediaStreamInterface* stream,
+ AudioTrackInterface* audio_track,
+ uint32 ssrc) = 0;
+
+ // Triggered when the local SessionDescription has a new video track.
+ virtual void OnAddLocalVideoTrack(MediaStreamInterface* stream,
+ VideoTrackInterface* video_track,
+ uint32 ssrc) = 0;
+
+ // Triggered when the local SessionDescription has removed an audio track.
+ virtual void OnRemoveLocalAudioTrack(MediaStreamInterface* stream,
+ AudioTrackInterface* audio_track) = 0;
+
+ // Triggered when the local SessionDescription has removed a video track.
+ virtual void OnRemoveLocalVideoTrack(MediaStreamInterface* stream,
+ VideoTrackInterface* video_track) = 0;
+
+ // Triggered when RemoveLocalStream is called. |stream| is no longer used
+ // when negotiating and all tracks in |stream| should stop providing data to
+ // this PeerConnection. This doesn't mean that the local session description
+ // has changed and OnRemoveLocalAudioTrack and OnRemoveLocalVideoTrack is not
+ // called for each individual track.
+ virtual void OnRemoveLocalStream(MediaStreamInterface* stream) = 0;
+
+ protected:
+ ~MediaStreamSignalingObserver() {}
+};
+
+// MediaStreamSignaling works as a glue between MediaStreams and a cricket
+// classes for SessionDescriptions.
+// It is used for creating cricket::MediaSessionOptions given the local
+// MediaStreams and data channels.
+//
+// It is responsible for creating remote MediaStreams given a remote
+// SessionDescription and creating cricket::MediaSessionOptions given
+// local MediaStreams.
+//
+// To signal that a DataChannel should be established:
+// 1. Call AddDataChannel with the new DataChannel. Next time
+// GetMediaSessionOptions will include the description of the DataChannel.
+// 2. When a local session description is set, call UpdateLocalStreams with the
+// session description. This will set the SSRC used for sending data on
+// this DataChannel.
+// 3. When remote session description is set, call UpdateRemoteStream with the
+// session description. If the DataChannel label and a SSRC is included in
+// the description, the DataChannel is updated with SSRC that will be used
+// for receiving data.
+// 4. When both the local and remote SSRC of a DataChannel is set the state of
+// the DataChannel change to kOpen.
+//
+// To setup a DataChannel initialized by the remote end.
+// 1. When remote session description is set, call UpdateRemoteStream with the
+// session description. If a label and a SSRC of a new DataChannel is found
+// MediaStreamSignalingObserver::OnAddDataChannel with the label and SSRC is
+// triggered.
+// 2. Create a DataChannel instance with the label and set the remote SSRC.
+// 3. Call AddDataChannel with this new DataChannel. GetMediaSessionOptions
+// will include the description of the DataChannel.
+// 4. Create a local session description and call UpdateLocalStreams. This will
+// set the local SSRC used by the DataChannel.
+// 5. When both the local and remote SSRC of a DataChannel is set the state of
+// the DataChannel change to kOpen.
+//
+// To close a DataChannel:
+// 1. Call DataChannel::Close. This will change the state of the DataChannel to
+// kClosing. GetMediaSessionOptions will not
+// include the description of the DataChannel.
+// 2. When a local session description is set, call UpdateLocalStreams with the
+// session description. The description will no longer contain the
+// DataChannel label or SSRC.
+// 3. When remote session description is set, call UpdateRemoteStream with the
+// session description. The description will no longer contain the
+// DataChannel label or SSRC. The DataChannel SSRC is updated with SSRC=0.
+// The DataChannel change state to kClosed.
+
+class MediaStreamSignaling {
+ public:
+ MediaStreamSignaling(talk_base::Thread* signaling_thread,
+ MediaStreamSignalingObserver* stream_observer);
+ virtual ~MediaStreamSignaling();
+
+ // Notify all referenced objects that MediaStreamSignaling will be teared
+ // down. This method must be called prior to the dtor.
+ void TearDown();
+
+ // Set a factory for creating data channels that are initiated by the remote
+ // peer.
+ void SetDataChannelFactory(DataChannelFactory* data_channel_factory) {
+ data_channel_factory_ = data_channel_factory;
+ }
+
+ // Checks if |id| is available to be assigned to a new SCTP data channel.
+ bool IsSctpIdAvailable(int id) const;
+
+ // Gets the first available SCTP id that is not assigned to any existing
+ // data channels.
+ bool AllocateSctpId(int* id);
+
+ // Adds |local_stream| to the collection of known MediaStreams that will be
+ // offered in a SessionDescription.
+ bool AddLocalStream(MediaStreamInterface* local_stream);
+
+ // Removes |local_stream| from the collection of known MediaStreams that will
+ // be offered in a SessionDescription.
+ void RemoveLocalStream(MediaStreamInterface* local_stream);
+
+ // Adds |data_channel| to the collection of DataChannels that will be
+ // be offered in a SessionDescription.
+ bool AddDataChannel(DataChannel* data_channel);
+
+ // Returns a MediaSessionOptions struct with options decided by |constraints|,
+ // the local MediaStreams and DataChannels.
+ virtual bool GetOptionsForOffer(
+ const MediaConstraintsInterface* constraints,
+ cricket::MediaSessionOptions* options);
+
+ // Returns a MediaSessionOptions struct with options decided by
+ // |constraints|, the local MediaStreams and DataChannels.
+ virtual bool GetOptionsForAnswer(
+ const MediaConstraintsInterface* constraints,
+ cricket::MediaSessionOptions* options);
+
+ // Called when the remote session description has changed. The purpose is to
+ // update remote MediaStreams and DataChannels with the current
+ // session state.
+ // If the remote SessionDescription contain information about a new remote
+ // MediaStreams a new remote MediaStream is created and
+ // MediaStreamSignalingObserver::OnAddStream is called.
+ // If a remote MediaStream is missing from
+ // the remote SessionDescription MediaStreamSignalingObserver::OnRemoveStream
+ // is called.
+ // If the SessionDescription contains information about a new DataChannel,
+ // MediaStreamSignalingObserver::OnAddDataChannel is called with the
+ // DataChannel.
+ void OnRemoteDescriptionChanged(const SessionDescriptionInterface* desc);
+
+ // Called when the local session description has changed. The purpose is to
+ // update local and remote MediaStreams and DataChannels with the current
+ // session state.
+ // If |desc| indicates that the media type should be rejected, the method
+ // ends the remote MediaStreamTracks.
+ // It also updates local DataChannels with information about its local SSRC.
+ void OnLocalDescriptionChanged(const SessionDescriptionInterface* desc);
+
+ // Called when the audio channel closes.
+ void OnAudioChannelClose();
+ // Called when the video channel closes.
+ void OnVideoChannelClose();
+ // Called when the data channel closes.
+ void OnDataChannelClose();
+
+ // Returns the SSRC for a given track.
+ bool GetRemoteAudioTrackSsrc(const std::string& track_id, uint32* ssrc) const;
+ bool GetRemoteVideoTrackSsrc(const std::string& track_id, uint32* ssrc) const;
+
+ // Returns all current known local MediaStreams.
+ StreamCollectionInterface* local_streams() const { return local_streams_;}
+
+ // Returns all current remote MediaStreams.
+ StreamCollectionInterface* remote_streams() const {
+ return remote_streams_.get();
+ }
+
+ private:
+ struct RemotePeerInfo {
+ RemotePeerInfo()
+ : msid_supported(false),
+ default_audio_track_needed(false),
+ default_video_track_needed(false) {
+ }
+ // True if it has been discovered that the remote peer support MSID.
+ bool msid_supported;
+ // The remote peer indicates in the session description that audio will be
+ // sent but no MSID is given.
+ bool default_audio_track_needed;
+ // The remote peer indicates in the session description that video will be
+ // sent but no MSID is given.
+ bool default_video_track_needed;
+
+ bool IsDefaultMediaStreamNeeded() {
+ return !msid_supported && (default_audio_track_needed ||
+ default_video_track_needed);
+ }
+ };
+
+ struct TrackInfo {
+ TrackInfo() : ssrc(0) {}
+ TrackInfo(const std::string& stream_label,
+ const std::string track_id,
+ uint32 ssrc)
+ : stream_label(stream_label),
+ track_id(track_id),
+ ssrc(ssrc) {
+ }
+ std::string stream_label;
+ std::string track_id;
+ uint32 ssrc;
+ };
+ typedef std::map<std::string, TrackInfo> TrackInfos;
+
+ void UpdateSessionOptions();
+
+ // Makes sure a MediaStream Track is created for each StreamParam in
+ // |streams|. |media_type| is the type of the |streams| and can be either
+ // audio or video.
+ // If a new MediaStream is created it is added to |new_streams|.
+ void UpdateRemoteStreamsList(
+ const std::vector<cricket::StreamParams>& streams,
+ cricket::MediaType media_type,
+ StreamCollection* new_streams);
+
+ // Triggered when a remote track has been seen for the first time in a remote
+ // session description. It creates a remote MediaStreamTrackInterface
+ // implementation and triggers MediaStreamSignaling::OnAddRemoteAudioTrack or
+ // MediaStreamSignaling::OnAddRemoteVideoTrack.
+ void OnRemoteTrackSeen(const std::string& stream_label,
+ const std::string& track_id,
+ uint32 ssrc,
+ cricket::MediaType media_type);
+
+ // Triggered when a remote track has been removed from a remote session
+ // description. It removes the remote track with id |track_id| from a remote
+ // MediaStream and triggers MediaStreamSignaling::OnRemoveRemoteAudioTrack or
+ // MediaStreamSignaling::OnRemoveRemoteVideoTrack.
+ void OnRemoteTrackRemoved(const std::string& stream_label,
+ const std::string& track_id,
+ cricket::MediaType media_type);
+
+ // Set the MediaStreamTrackInterface::TrackState to |kEnded| on all remote
+ // tracks of type |media_type|.
+ void RejectRemoteTracks(cricket::MediaType media_type);
+
+ // Finds remote MediaStreams without any tracks and removes them from
+ // |remote_streams_| and notifies the observer that the MediaStream no longer
+ // exist.
+ void UpdateEndedRemoteMediaStreams();
+ void MaybeCreateDefaultStream();
+ TrackInfos* GetRemoteTracks(cricket::MediaType type);
+
+ // Returns a map of currently negotiated LocalTrackInfo of type |type|.
+ TrackInfos* GetLocalTracks(cricket::MediaType type);
+ bool FindLocalTrack(const std::string& track_id, cricket::MediaType type);
+
+ // Loops through the vector of |streams| and finds added and removed
+ // StreamParams since last time this method was called.
+ // For each new or removed StreamParam NotifyLocalTrackAdded or
+ // NotifyLocalTrackRemoved in invoked.
+ void UpdateLocalTracks(const std::vector<cricket::StreamParams>& streams,
+ cricket::MediaType media_type);
+
+ // Triggered when a local track has been seen for the first time in a local
+ // session description.
+ // This method triggers MediaStreamSignaling::OnAddLocalAudioTrack or
+ // MediaStreamSignaling::OnAddLocalVideoTrack if the rtp streams in the local
+ // SessionDescription can be mapped to a MediaStreamTrack in a MediaStream in
+ // |local_streams_|
+ void OnLocalTrackSeen(const std::string& stream_label,
+ const std::string& track_id,
+ uint32 ssrc,
+ cricket::MediaType media_type);
+
+ // Triggered when a local track has been removed from a local session
+ // description.
+ // This method triggers MediaStreamSignaling::OnRemoveLocalAudioTrack or
+ // MediaStreamSignaling::OnRemoveLocalVideoTrack if a stream has been removed
+ // from the local SessionDescription and the stream can be mapped to a
+ // MediaStreamTrack in a MediaStream in |local_streams_|.
+ void OnLocalTrackRemoved(const std::string& stream_label,
+ const std::string& track_id,
+ cricket::MediaType media_type);
+
+ void UpdateLocalRtpDataChannels(const cricket::StreamParamsVec& streams);
+ void UpdateRemoteRtpDataChannels(const cricket::StreamParamsVec& streams);
+ void UpdateClosingDataChannels(
+ const std::vector<std::string>& active_channels, bool is_local_update);
+ void CreateRemoteDataChannel(const std::string& label, uint32 remote_ssrc);
+ void UpdateLocalSctpDataChannels();
+ void UpdateRemoteSctpDataChannels();
+
+ RemotePeerInfo remote_info_;
+ talk_base::Thread* signaling_thread_;
+ DataChannelFactory* data_channel_factory_;
+ cricket::MediaSessionOptions options_;
+ MediaStreamSignalingObserver* stream_observer_;
+ talk_base::scoped_refptr<StreamCollection> local_streams_;
+ talk_base::scoped_refptr<StreamCollection> remote_streams_;
+ talk_base::scoped_ptr<RemoteMediaStreamFactory> remote_stream_factory_;
+
+ TrackInfos remote_audio_tracks_;
+ TrackInfos remote_video_tracks_;
+ TrackInfos local_audio_tracks_;
+ TrackInfos local_video_tracks_;
+
+ int last_allocated_sctp_id_;
+ typedef std::map<std::string, talk_base::scoped_refptr<DataChannel> >
+ DataChannels;
+ DataChannels data_channels_;
+};
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_MEDIASTREAMSIGNALING_H_
diff --git a/talk/app/webrtc/mediastreamsignaling_unittest.cc b/talk/app/webrtc/mediastreamsignaling_unittest.cc
new file mode 100644
index 0000000..7f87454
--- /dev/null
+++ b/talk/app/webrtc/mediastreamsignaling_unittest.cc
@@ -0,0 +1,949 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string>
+
+#include "talk/app/webrtc/audiotrack.h"
+#include "talk/app/webrtc/mediastream.h"
+#include "talk/app/webrtc/mediastreamsignaling.h"
+#include "talk/app/webrtc/streamcollection.h"
+#include "talk/app/webrtc/test/fakeconstraints.h"
+#include "talk/app/webrtc/videotrack.h"
+#include "talk/base/gunit.h"
+#include "talk/base/scoped_ptr.h"
+#include "talk/base/stringutils.h"
+#include "talk/base/thread.h"
+#include "talk/p2p/base/constants.h"
+#include "talk/p2p/base/sessiondescription.h"
+
+static const char kStreams[][8] = {"stream1", "stream2"};
+static const char kAudioTracks[][32] = {"audiotrack0", "audiotrack1"};
+static const char kVideoTracks[][32] = {"videotrack0", "videotrack1"};
+
+using webrtc::AudioTrack;
+using webrtc::AudioTrackInterface;
+using webrtc::AudioTrackVector;
+using webrtc::VideoTrack;
+using webrtc::VideoTrackInterface;
+using webrtc::VideoTrackVector;
+using webrtc::DataChannelInterface;
+using webrtc::FakeConstraints;
+using webrtc::IceCandidateInterface;
+using webrtc::MediaConstraintsInterface;
+using webrtc::MediaStreamInterface;
+using webrtc::MediaStreamTrackInterface;
+using webrtc::SdpParseError;
+using webrtc::SessionDescriptionInterface;
+using webrtc::StreamCollection;
+using webrtc::StreamCollectionInterface;
+
+// Reference SDP with a MediaStream with label "stream1" and audio track with
+// id "audio_1" and a video track with id "video_1;
+static const char kSdpStringWithStream1[] =
+ "v=0\r\n"
+ "o=- 0 0 IN IP4 127.0.0.1\r\n"
+ "s=-\r\n"
+ "t=0 0\r\n"
+ "m=audio 1 RTP/AVPF 103\r\n"
+ "a=mid:audio\r\n"
+ "a=rtpmap:103 ISAC/16000\r\n"
+ "a=ssrc:1 cname:stream1\r\n"
+ "a=ssrc:1 mslabel:stream1\r\n"
+ "a=ssrc:1 label:audiotrack0\r\n"
+ "m=video 1 RTP/AVPF 120\r\n"
+ "a=mid:video\r\n"
+ "a=rtpmap:120 VP8/90000\r\n"
+ "a=ssrc:2 cname:stream1\r\n"
+ "a=ssrc:2 mslabel:stream1\r\n"
+ "a=ssrc:2 label:videotrack0\r\n";
+
+// Reference SDP with two MediaStreams with label "stream1" and "stream2. Each
+// MediaStreams have one audio track and one video track.
+// This uses MSID.
+static const char kSdpStringWith2Stream[] =
+ "v=0\r\n"
+ "o=- 0 0 IN IP4 127.0.0.1\r\n"
+ "s=-\r\n"
+ "t=0 0\r\n"
+ "a=msid-semantic: WMS stream1 stream2\r\n"
+ "m=audio 1 RTP/AVPF 103\r\n"
+ "a=mid:audio\r\n"
+ "a=rtpmap:103 ISAC/16000\r\n"
+ "a=ssrc:1 cname:stream1\r\n"
+ "a=ssrc:1 msid:stream1 audiotrack0\r\n"
+ "a=ssrc:3 cname:stream2\r\n"
+ "a=ssrc:3 msid:stream2 audiotrack1\r\n"
+ "m=video 1 RTP/AVPF 120\r\n"
+ "a=mid:video\r\n"
+ "a=rtpmap:120 VP8/0\r\n"
+ "a=ssrc:2 cname:stream1\r\n"
+ "a=ssrc:2 msid:stream1 videotrack0\r\n"
+ "a=ssrc:4 cname:stream2\r\n"
+ "a=ssrc:4 msid:stream2 videotrack1\r\n";
+
+// Reference SDP without MediaStreams. Msid is not supported.
+static const char kSdpStringWithoutStreams[] =
+ "v=0\r\n"
+ "o=- 0 0 IN IP4 127.0.0.1\r\n"
+ "s=-\r\n"
+ "t=0 0\r\n"
+ "m=audio 1 RTP/AVPF 103\r\n"
+ "a=mid:audio\r\n"
+ "a=rtpmap:103 ISAC/16000\r\n"
+ "m=video 1 RTP/AVPF 120\r\n"
+ "a=mid:video\r\n"
+ "a=rtpmap:120 VP8/90000\r\n";
+
+// Reference SDP without MediaStreams. Msid is supported.
+static const char kSdpStringWithMsidWithoutStreams[] =
+ "v=0\r\n"
+ "o=- 0 0 IN IP4 127.0.0.1\r\n"
+ "s=-\r\n"
+ "t=0 0\r\n"
+ "a:msid-semantic: WMS\r\n"
+ "m=audio 1 RTP/AVPF 103\r\n"
+ "a=mid:audio\r\n"
+ "a=rtpmap:103 ISAC/16000\r\n"
+ "m=video 1 RTP/AVPF 120\r\n"
+ "a=mid:video\r\n"
+ "a=rtpmap:120 VP8/90000\r\n";
+
+// Reference SDP without MediaStreams and audio only.
+static const char kSdpStringWithoutStreamsAudioOnly[] =
+ "v=0\r\n"
+ "o=- 0 0 IN IP4 127.0.0.1\r\n"
+ "s=-\r\n"
+ "t=0 0\r\n"
+ "m=audio 1 RTP/AVPF 103\r\n"
+ "a=mid:audio\r\n"
+ "a=rtpmap:103 ISAC/16000\r\n";
+
+static const char kSdpStringInit[] =
+ "v=0\r\n"
+ "o=- 0 0 IN IP4 127.0.0.1\r\n"
+ "s=-\r\n"
+ "t=0 0\r\n"
+ "a=msid-semantic: WMS\r\n";
+
+static const char kSdpStringAudio[] =
+ "m=audio 1 RTP/AVPF 103\r\n"
+ "a=mid:audio\r\n"
+ "a=rtpmap:103 ISAC/16000\r\n";
+
+static const char kSdpStringVideo[] =
+ "m=video 1 RTP/AVPF 120\r\n"
+ "a=mid:video\r\n"
+ "a=rtpmap:120 VP8/90000\r\n";
+
+static const char kSdpStringMs1Audio0[] =
+ "a=ssrc:1 cname:stream1\r\n"
+ "a=ssrc:1 msid:stream1 audiotrack0\r\n";
+
+static const char kSdpStringMs1Video0[] =
+ "a=ssrc:2 cname:stream1\r\n"
+ "a=ssrc:2 msid:stream1 videotrack0\r\n";
+
+static const char kSdpStringMs1Audio1[] =
+ "a=ssrc:3 cname:stream1\r\n"
+ "a=ssrc:3 msid:stream1 audiotrack1\r\n";
+
+static const char kSdpStringMs1Video1[] =
+ "a=ssrc:4 cname:stream1\r\n"
+ "a=ssrc:4 msid:stream1 videotrack1\r\n";
+
+// Verifies that |options| contain all tracks in |collection| and that
+// the |options| has set the the has_audio and has_video flags correct.
+static void VerifyMediaOptions(StreamCollectionInterface* collection,
+ const cricket::MediaSessionOptions& options) {
+ if (!collection) {
+ return;
+ }
+
+ size_t stream_index = 0;
+ for (size_t i = 0; i < collection->count(); ++i) {
+ MediaStreamInterface* stream = collection->at(i);
+ AudioTrackVector audio_tracks = stream->GetAudioTracks();
+ ASSERT_GE(options.streams.size(), stream_index + audio_tracks.size());
+ for (size_t j = 0; j < audio_tracks.size(); ++j) {
+ webrtc::AudioTrackInterface* audio = audio_tracks[j];
+ EXPECT_EQ(options.streams[stream_index].sync_label, stream->label());
+ EXPECT_EQ(options.streams[stream_index++].id, audio->id());
+ EXPECT_TRUE(options.has_audio);
+ }
+ VideoTrackVector video_tracks = stream->GetVideoTracks();
+ ASSERT_GE(options.streams.size(), stream_index + video_tracks.size());
+ for (size_t j = 0; j < video_tracks.size(); ++j) {
+ webrtc::VideoTrackInterface* video = video_tracks[j];
+ EXPECT_EQ(options.streams[stream_index].sync_label, stream->label());
+ EXPECT_EQ(options.streams[stream_index++].id, video->id());
+ EXPECT_TRUE(options.has_video);
+ }
+ }
+}
+
+static bool CompareStreamCollections(StreamCollectionInterface* s1,
+ StreamCollectionInterface* s2) {
+ if (s1 == NULL || s2 == NULL || s1->count() != s2->count())
+ return false;
+
+ for (size_t i = 0; i != s1->count(); ++i) {
+ if (s1->at(i)->label() != s2->at(i)->label())
+ return false;
+ webrtc::AudioTrackVector audio_tracks1 = s1->at(i)->GetAudioTracks();
+ webrtc::AudioTrackVector audio_tracks2 = s2->at(i)->GetAudioTracks();
+ webrtc::VideoTrackVector video_tracks1 = s1->at(i)->GetVideoTracks();
+ webrtc::VideoTrackVector video_tracks2 = s2->at(i)->GetVideoTracks();
+
+ if (audio_tracks1.size() != audio_tracks2.size())
+ return false;
+ for (size_t j = 0; j != audio_tracks1.size(); ++j) {
+ if (audio_tracks1[j]->id() != audio_tracks2[j]->id())
+ return false;
+ }
+ if (video_tracks1.size() != video_tracks2.size())
+ return false;
+ for (size_t j = 0; j != video_tracks1.size(); ++j) {
+ if (video_tracks1[j]->id() != video_tracks2[j]->id())
+ return false;
+ }
+ }
+ return true;
+}
+
+class MockSignalingObserver : public webrtc::MediaStreamSignalingObserver {
+ public:
+ MockSignalingObserver()
+ : remote_media_streams_(StreamCollection::Create()) {
+ }
+
+ virtual ~MockSignalingObserver() {
+ }
+
+ // New remote stream have been discovered.
+ virtual void OnAddRemoteStream(MediaStreamInterface* remote_stream) {
+ remote_media_streams_->AddStream(remote_stream);
+ }
+
+ // Remote stream is no longer available.
+ virtual void OnRemoveRemoteStream(MediaStreamInterface* remote_stream) {
+ remote_media_streams_->RemoveStream(remote_stream);
+ }
+
+ virtual void OnAddDataChannel(DataChannelInterface* data_channel) {
+ }
+
+ virtual void OnAddLocalAudioTrack(MediaStreamInterface* stream,
+ AudioTrackInterface* audio_track,
+ uint32 ssrc) {
+ AddTrack(&local_audio_tracks_, stream, audio_track, ssrc);
+ }
+
+ virtual void OnAddLocalVideoTrack(MediaStreamInterface* stream,
+ VideoTrackInterface* video_track,
+ uint32 ssrc) {
+ AddTrack(&local_video_tracks_, stream, video_track, ssrc);
+ }
+
+ virtual void OnRemoveLocalAudioTrack(MediaStreamInterface* stream,
+ AudioTrackInterface* audio_track) {
+ RemoveTrack(&local_audio_tracks_, stream, audio_track);
+ }
+
+ virtual void OnRemoveLocalVideoTrack(MediaStreamInterface* stream,
+ VideoTrackInterface* video_track) {
+ RemoveTrack(&local_video_tracks_, stream, video_track);
+ }
+
+ virtual void OnAddRemoteAudioTrack(MediaStreamInterface* stream,
+ AudioTrackInterface* audio_track,
+ uint32 ssrc) {
+ AddTrack(&remote_audio_tracks_, stream, audio_track, ssrc);
+ }
+
+ virtual void OnAddRemoteVideoTrack(MediaStreamInterface* stream,
+ VideoTrackInterface* video_track,
+ uint32 ssrc) {
+ AddTrack(&remote_video_tracks_, stream, video_track, ssrc);
+ }
+
+ virtual void OnRemoveRemoteAudioTrack(MediaStreamInterface* stream,
+ AudioTrackInterface* audio_track) {
+ RemoveTrack(&remote_audio_tracks_, stream, audio_track);
+ }
+
+ virtual void OnRemoveRemoteVideoTrack(MediaStreamInterface* stream,
+ VideoTrackInterface* video_track) {
+ RemoveTrack(&remote_video_tracks_, stream, video_track);
+ }
+
+ virtual void OnRemoveLocalStream(MediaStreamInterface* stream) {
+ }
+
+ MediaStreamInterface* RemoteStream(const std::string& label) {
+ return remote_media_streams_->find(label);
+ }
+
+ StreamCollectionInterface* remote_streams() const {
+ return remote_media_streams_;
+ }
+
+ size_t NumberOfRemoteAudioTracks() { return remote_audio_tracks_.size(); }
+
+ void VerifyRemoteAudioTrack(const std::string& stream_label,
+ const std::string& track_id,
+ uint32 ssrc) {
+ VerifyTrack(remote_audio_tracks_, stream_label, track_id, ssrc);
+ }
+
+ size_t NumberOfRemoteVideoTracks() { return remote_video_tracks_.size(); }
+
+ void VerifyRemoteVideoTrack(const std::string& stream_label,
+ const std::string& track_id,
+ uint32 ssrc) {
+ VerifyTrack(remote_video_tracks_, stream_label, track_id, ssrc);
+ }
+
+ size_t NumberOfLocalAudioTracks() { return local_audio_tracks_.size(); }
+ void VerifyLocalAudioTrack(const std::string& stream_label,
+ const std::string& track_id,
+ uint32 ssrc) {
+ VerifyTrack(local_audio_tracks_, stream_label, track_id, ssrc);
+ }
+
+ size_t NumberOfLocalVideoTracks() { return local_video_tracks_.size(); }
+
+ void VerifyLocalVideoTrack(const std::string& stream_label,
+ const std::string& track_id,
+ uint32 ssrc) {
+ VerifyTrack(local_video_tracks_, stream_label, track_id, ssrc);
+ }
+
+ private:
+ struct TrackInfo {
+ TrackInfo() {}
+ TrackInfo(const std::string& stream_label, const std::string track_id,
+ uint32 ssrc)
+ : stream_label(stream_label),
+ track_id(track_id),
+ ssrc(ssrc) {
+ }
+ std::string stream_label;
+ std::string track_id;
+ uint32 ssrc;
+ };
+ typedef std::map<std::string, TrackInfo> TrackInfos;
+
+ void AddTrack(TrackInfos* track_infos, MediaStreamInterface* stream,
+ MediaStreamTrackInterface* track,
+ uint32 ssrc) {
+ (*track_infos)[track->id()] = TrackInfo(stream->label(), track->id(),
+ ssrc);
+ }
+
+ void RemoveTrack(TrackInfos* track_infos, MediaStreamInterface* stream,
+ MediaStreamTrackInterface* track) {
+ TrackInfos::iterator it = track_infos->find(track->id());
+ ASSERT_TRUE(it != track_infos->end());
+ ASSERT_EQ(it->second.stream_label, stream->label());
+ track_infos->erase(it);
+ }
+
+ void VerifyTrack(const TrackInfos& track_infos,
+ const std::string& stream_label,
+ const std::string& track_id,
+ uint32 ssrc) {
+ TrackInfos::const_iterator it = track_infos.find(track_id);
+ ASSERT_TRUE(it != track_infos.end());
+ EXPECT_EQ(stream_label, it->second.stream_label);
+ EXPECT_EQ(ssrc, it->second.ssrc);
+ }
+
+ TrackInfos remote_audio_tracks_;
+ TrackInfos remote_video_tracks_;
+ TrackInfos local_audio_tracks_;
+ TrackInfos local_video_tracks_;
+
+ talk_base::scoped_refptr<StreamCollection> remote_media_streams_;
+};
+
+class MediaStreamSignalingForTest : public webrtc::MediaStreamSignaling {
+ public:
+ explicit MediaStreamSignalingForTest(MockSignalingObserver* observer)
+ : webrtc::MediaStreamSignaling(talk_base::Thread::Current(), observer) {
+ };
+
+ using webrtc::MediaStreamSignaling::GetOptionsForOffer;
+ using webrtc::MediaStreamSignaling::GetOptionsForAnswer;
+ using webrtc::MediaStreamSignaling::OnRemoteDescriptionChanged;
+ using webrtc::MediaStreamSignaling::remote_streams;
+};
+
+class MediaStreamSignalingTest: public testing::Test {
+ protected:
+ virtual void SetUp() {
+ observer_.reset(new MockSignalingObserver());
+ signaling_.reset(new MediaStreamSignalingForTest(observer_.get()));
+ }
+
+ // Create a collection of streams.
+ // CreateStreamCollection(1) creates a collection that
+ // correspond to kSdpString1.
+ // CreateStreamCollection(2) correspond to kSdpString2.
+ talk_base::scoped_refptr<StreamCollection>
+ CreateStreamCollection(int number_of_streams) {
+ talk_base::scoped_refptr<StreamCollection> local_collection(
+ StreamCollection::Create());
+
+ for (int i = 0; i < number_of_streams; ++i) {
+ talk_base::scoped_refptr<webrtc::MediaStreamInterface> stream(
+ webrtc::MediaStream::Create(kStreams[i]));
+
+ // Add a local audio track.
+ talk_base::scoped_refptr<webrtc::AudioTrackInterface> audio_track(
+ webrtc::AudioTrack::Create(kAudioTracks[i], NULL));
+ stream->AddTrack(audio_track);
+
+ // Add a local video track.
+ talk_base::scoped_refptr<webrtc::VideoTrackInterface> video_track(
+ webrtc::VideoTrack::Create(kVideoTracks[i], NULL));
+ stream->AddTrack(video_track);
+
+ local_collection->AddStream(stream);
+ }
+ return local_collection;
+ }
+
+ // This functions Creates a MediaStream with label kStreams[0] and
+ // |number_of_audio_tracks| and |number_of_video_tracks| tracks and the
+ // corresponding SessionDescriptionInterface. The SessionDescriptionInterface
+ // is returned in |desc| and the MediaStream is stored in
+ // |reference_collection_|
+ void CreateSessionDescriptionAndReference(
+ size_t number_of_audio_tracks,
+ size_t number_of_video_tracks,
+ SessionDescriptionInterface** desc) {
+ ASSERT_TRUE(desc != NULL);
+ ASSERT_LE(number_of_audio_tracks, 2u);
+ ASSERT_LE(number_of_video_tracks, 2u);
+
+ reference_collection_ = StreamCollection::Create();
+ std::string sdp_ms1 = std::string(kSdpStringInit);
+
+ std::string mediastream_label = kStreams[0];
+
+ talk_base::scoped_refptr<webrtc::MediaStreamInterface> stream(
+ webrtc::MediaStream::Create(mediastream_label));
+ reference_collection_->AddStream(stream);
+
+ if (number_of_audio_tracks > 0) {
+ sdp_ms1 += std::string(kSdpStringAudio);
+ sdp_ms1 += std::string(kSdpStringMs1Audio0);
+ AddAudioTrack(kAudioTracks[0], stream);
+ }
+ if (number_of_audio_tracks > 1) {
+ sdp_ms1 += kSdpStringMs1Audio1;
+ AddAudioTrack(kAudioTracks[1], stream);
+ }
+
+ if (number_of_video_tracks > 0) {
+ sdp_ms1 += std::string(kSdpStringVideo);
+ sdp_ms1 += std::string(kSdpStringMs1Video0);
+ AddVideoTrack(kVideoTracks[0], stream);
+ }
+ if (number_of_video_tracks > 1) {
+ sdp_ms1 += kSdpStringMs1Video1;
+ AddVideoTrack(kVideoTracks[1], stream);
+ }
+
+ *desc = webrtc::CreateSessionDescription(
+ SessionDescriptionInterface::kOffer, sdp_ms1, NULL);
+ }
+
+ void AddAudioTrack(const std::string& track_id,
+ MediaStreamInterface* stream) {
+ talk_base::scoped_refptr<webrtc::AudioTrackInterface> audio_track(
+ webrtc::AudioTrack::Create(track_id, NULL));
+ ASSERT_TRUE(stream->AddTrack(audio_track));
+ }
+
+ void AddVideoTrack(const std::string& track_id,
+ MediaStreamInterface* stream) {
+ talk_base::scoped_refptr<webrtc::VideoTrackInterface> video_track(
+ webrtc::VideoTrack::Create(track_id, NULL));
+ ASSERT_TRUE(stream->AddTrack(video_track));
+ }
+
+ talk_base::scoped_refptr<StreamCollection> reference_collection_;
+ talk_base::scoped_ptr<MockSignalingObserver> observer_;
+ talk_base::scoped_ptr<MediaStreamSignalingForTest> signaling_;
+};
+
+// Test that a MediaSessionOptions is created for an offer if
+// kOfferToReceiveAudio and kOfferToReceiveVideo constraints are set but no
+// MediaStreams are sent.
+TEST_F(MediaStreamSignalingTest, GetMediaSessionOptionsForOfferWithAudioVideo) {
+ FakeConstraints constraints;
+ constraints.SetMandatoryReceiveAudio(true);
+ constraints.SetMandatoryReceiveVideo(true);
+ cricket::MediaSessionOptions options;
+ EXPECT_TRUE(signaling_->GetOptionsForOffer(&constraints, &options));
+ EXPECT_TRUE(options.has_audio);
+ EXPECT_TRUE(options.has_video);
+ EXPECT_TRUE(options.bundle_enabled);
+}
+
+// Test that a correct MediaSessionOptions is created for an offer if
+// kOfferToReceiveAudio constraints is set but no MediaStreams are sent.
+TEST_F(MediaStreamSignalingTest, GetMediaSessionOptionsForOfferWithAudio) {
+ FakeConstraints constraints;
+ constraints.SetMandatoryReceiveAudio(true);
+ cricket::MediaSessionOptions options;
+ EXPECT_TRUE(signaling_->GetOptionsForOffer(&constraints, &options));
+ EXPECT_TRUE(options.has_audio);
+ EXPECT_FALSE(options.has_video);
+ EXPECT_TRUE(options.bundle_enabled);
+}
+
+// Test that a correct MediaSessionOptions is created for an offer if
+// no constraints or MediaStreams are sent.
+TEST_F(MediaStreamSignalingTest, GetDefaultMediaSessionOptionsForOffer) {
+ cricket::MediaSessionOptions options;
+ EXPECT_TRUE(signaling_->GetOptionsForOffer(NULL, &options));
+ EXPECT_TRUE(options.has_audio);
+ EXPECT_FALSE(options.has_video);
+ EXPECT_TRUE(options.bundle_enabled);
+}
+
+// Test that a correct MediaSessionOptions is created for an offer if
+// kOfferToReceiveVideo constraints is set but no MediaStreams are sent.
+TEST_F(MediaStreamSignalingTest, GetMediaSessionOptionsForOfferWithVideo) {
+ FakeConstraints constraints;
+ constraints.SetMandatoryReceiveAudio(false);
+ constraints.SetMandatoryReceiveVideo(true);
+ cricket::MediaSessionOptions options;
+ EXPECT_TRUE(signaling_->GetOptionsForOffer(&constraints, &options));
+ EXPECT_FALSE(options.has_audio);
+ EXPECT_TRUE(options.has_video);
+ EXPECT_TRUE(options.bundle_enabled);
+}
+
+// Test that a correct MediaSessionOptions is created for an offer if
+// kUseRtpMux constraints is set to false.
+TEST_F(MediaStreamSignalingTest,
+ GetMediaSessionOptionsForOfferWithBundleDisabled) {
+ FakeConstraints constraints;
+ constraints.SetMandatoryReceiveAudio(true);
+ constraints.SetMandatoryReceiveVideo(true);
+ constraints.SetMandatoryUseRtpMux(false);
+ cricket::MediaSessionOptions options;
+ EXPECT_TRUE(signaling_->GetOptionsForOffer(&constraints, &options));
+ EXPECT_TRUE(options.has_audio);
+ EXPECT_TRUE(options.has_video);
+ EXPECT_FALSE(options.bundle_enabled);
+}
+
+// Test that a correct MediaSessionOptions is created to restart ice if
+// kIceRestart constraints is set. It also tests that subsequent
+// MediaSessionOptions don't have |transport_options.ice_restart| set.
+TEST_F(MediaStreamSignalingTest,
+ GetMediaSessionOptionsForOfferWithIceRestart) {
+ FakeConstraints constraints;
+ constraints.SetMandatoryIceRestart(true);
+ cricket::MediaSessionOptions options;
+ EXPECT_TRUE(signaling_->GetOptionsForOffer(&constraints, &options));
+ EXPECT_TRUE(options.transport_options.ice_restart);
+
+ EXPECT_TRUE(signaling_->GetOptionsForOffer(NULL, &options));
+ EXPECT_FALSE(options.transport_options.ice_restart);
+}
+
+// Test that GetMediaSessionOptionsForOffer and GetOptionsForAnswer work as
+// expected if unknown constraints are used.
+TEST_F(MediaStreamSignalingTest, GetMediaSessionOptionsWithBadConstraints) {
+ FakeConstraints mandatory;
+ mandatory.AddMandatory("bad_key", "bad_value");
+ cricket::MediaSessionOptions options;
+ EXPECT_FALSE(signaling_->GetOptionsForOffer(&mandatory, &options));
+ EXPECT_FALSE(signaling_->GetOptionsForAnswer(&mandatory, &options));
+
+ FakeConstraints optional;
+ optional.AddOptional("bad_key", "bad_value");
+ EXPECT_TRUE(signaling_->GetOptionsForOffer(&optional, &options));
+ EXPECT_TRUE(signaling_->GetOptionsForAnswer(&optional, &options));
+}
+
+// Test that a correct MediaSessionOptions are created for an offer if
+// a MediaStream is sent and later updated with a new track.
+// MediaConstraints are not used.
+TEST_F(MediaStreamSignalingTest, AddTrackToLocalMediaStream) {
+ talk_base::scoped_refptr<StreamCollection> local_streams(
+ CreateStreamCollection(1));
+ MediaStreamInterface* local_stream = local_streams->at(0);
+ EXPECT_TRUE(signaling_->AddLocalStream(local_stream));
+ cricket::MediaSessionOptions options;
+ EXPECT_TRUE(signaling_->GetOptionsForOffer(NULL, &options));
+ VerifyMediaOptions(local_streams, options);
+
+ cricket::MediaSessionOptions updated_options;
+ local_stream->AddTrack(AudioTrack::Create(kAudioTracks[1], NULL));
+ EXPECT_TRUE(signaling_->GetOptionsForOffer(NULL, &options));
+ VerifyMediaOptions(local_streams, options);
+}
+
+// Test that the MediaConstraints in an answer don't affect if audio and video
+// is offered in an offer but that if kOfferToReceiveAudio or
+// kOfferToReceiveVideo constraints are true in an offer, the media type will be
+// included in subsequent answers.
+TEST_F(MediaStreamSignalingTest, MediaConstraintsInAnswer) {
+ FakeConstraints answer_c;
+ answer_c.SetMandatoryReceiveAudio(true);
+ answer_c.SetMandatoryReceiveVideo(true);
+
+ cricket::MediaSessionOptions answer_options;
+ EXPECT_TRUE(signaling_->GetOptionsForAnswer(&answer_c, &answer_options));
+ EXPECT_TRUE(answer_options.has_audio);
+ EXPECT_TRUE(answer_options.has_video);
+
+ FakeConstraints offer_c;
+ offer_c.SetMandatoryReceiveAudio(false);
+ offer_c.SetMandatoryReceiveVideo(false);
+
+ cricket::MediaSessionOptions offer_options;
+ EXPECT_TRUE(signaling_->GetOptionsForOffer(&offer_c, &offer_options));
+ EXPECT_FALSE(offer_options.has_audio);
+ EXPECT_FALSE(offer_options.has_video);
+
+ FakeConstraints updated_offer_c;
+ updated_offer_c.SetMandatoryReceiveAudio(true);
+ updated_offer_c.SetMandatoryReceiveVideo(true);
+
+ cricket::MediaSessionOptions updated_offer_options;
+ EXPECT_TRUE(signaling_->GetOptionsForOffer(&updated_offer_c,
+ &updated_offer_options));
+ EXPECT_TRUE(updated_offer_options.has_audio);
+ EXPECT_TRUE(updated_offer_options.has_video);
+
+ // Since an offer has been created with both audio and video, subsequent
+ // offers and answers should contain both audio and video.
+ // Answers will only contain the media types that exist in the offer
+ // regardless of the value of |updated_answer_options.has_audio| and
+ // |updated_answer_options.has_video|.
+ FakeConstraints updated_answer_c;
+ answer_c.SetMandatoryReceiveAudio(false);
+ answer_c.SetMandatoryReceiveVideo(false);
+
+ cricket::MediaSessionOptions updated_answer_options;
+ EXPECT_TRUE(signaling_->GetOptionsForAnswer(&updated_answer_c,
+ &updated_answer_options));
+ EXPECT_TRUE(updated_answer_options.has_audio);
+ EXPECT_TRUE(updated_answer_options.has_video);
+
+ EXPECT_TRUE(signaling_->GetOptionsForOffer(NULL,
+ &updated_offer_options));
+ EXPECT_TRUE(updated_offer_options.has_audio);
+ EXPECT_TRUE(updated_offer_options.has_video);
+}
+
+// This test verifies that the remote MediaStreams corresponding to a received
+// SDP string is created. In this test the two separate MediaStreams are
+// signaled.
+TEST_F(MediaStreamSignalingTest, UpdateRemoteStreams) {
+ talk_base::scoped_ptr<SessionDescriptionInterface> desc(
+ webrtc::CreateSessionDescription(SessionDescriptionInterface::kOffer,
+ kSdpStringWithStream1, NULL));
+ EXPECT_TRUE(desc != NULL);
+ signaling_->OnRemoteDescriptionChanged(desc.get());
+
+ talk_base::scoped_refptr<StreamCollection> reference(
+ CreateStreamCollection(1));
+ EXPECT_TRUE(CompareStreamCollections(signaling_->remote_streams(),
+ reference.get()));
+ EXPECT_TRUE(CompareStreamCollections(observer_->remote_streams(),
+ reference.get()));
+ EXPECT_EQ(1u, observer_->NumberOfRemoteAudioTracks());
+ observer_->VerifyRemoteAudioTrack(kStreams[0], kAudioTracks[0], 1);
+ EXPECT_EQ(1u, observer_->NumberOfRemoteVideoTracks());
+ observer_->VerifyRemoteVideoTrack(kStreams[0], kVideoTracks[0], 2);
+
+ // Create a session description based on another SDP with another
+ // MediaStream.
+ talk_base::scoped_ptr<SessionDescriptionInterface> update_desc(
+ webrtc::CreateSessionDescription(SessionDescriptionInterface::kOffer,
+ kSdpStringWith2Stream, NULL));
+ EXPECT_TRUE(update_desc != NULL);
+ signaling_->OnRemoteDescriptionChanged(update_desc.get());
+
+ talk_base::scoped_refptr<StreamCollection> reference2(
+ CreateStreamCollection(2));
+ EXPECT_TRUE(CompareStreamCollections(signaling_->remote_streams(),
+ reference2.get()));
+ EXPECT_TRUE(CompareStreamCollections(observer_->remote_streams(),
+ reference2.get()));
+
+ EXPECT_EQ(2u, observer_->NumberOfRemoteAudioTracks());
+ observer_->VerifyRemoteAudioTrack(kStreams[0], kAudioTracks[0], 1);
+ observer_->VerifyRemoteAudioTrack(kStreams[1], kAudioTracks[1], 3);
+ EXPECT_EQ(2u, observer_->NumberOfRemoteVideoTracks());
+ observer_->VerifyRemoteVideoTrack(kStreams[0], kVideoTracks[0], 2);
+ observer_->VerifyRemoteVideoTrack(kStreams[1], kVideoTracks[1], 4);
+}
+
+// This test verifies that the remote MediaStreams corresponding to a received
+// SDP string is created. In this test the same remote MediaStream is signaled
+// but MediaStream tracks are added and removed.
+TEST_F(MediaStreamSignalingTest, AddRemoveTrackFromExistingRemoteMediaStream) {
+ talk_base::scoped_ptr<SessionDescriptionInterface> desc_ms1;
+ CreateSessionDescriptionAndReference(1, 1, desc_ms1.use());
+ signaling_->OnRemoteDescriptionChanged(desc_ms1.get());
+ EXPECT_TRUE(CompareStreamCollections(signaling_->remote_streams(),
+ reference_collection_));
+
+ // Add extra audio and video tracks to the same MediaStream.
+ talk_base::scoped_ptr<SessionDescriptionInterface> desc_ms1_two_tracks;
+ CreateSessionDescriptionAndReference(2, 2, desc_ms1_two_tracks.use());
+ signaling_->OnRemoteDescriptionChanged(desc_ms1_two_tracks.get());
+ EXPECT_TRUE(CompareStreamCollections(signaling_->remote_streams(),
+ reference_collection_));
+ EXPECT_TRUE(CompareStreamCollections(observer_->remote_streams(),
+ reference_collection_));
+
+ // Remove the extra audio and video tracks again.
+ CreateSessionDescriptionAndReference(1, 1, desc_ms1.use());
+ signaling_->OnRemoteDescriptionChanged(desc_ms1.get());
+ EXPECT_TRUE(CompareStreamCollections(signaling_->remote_streams(),
+ reference_collection_));
+ EXPECT_TRUE(CompareStreamCollections(observer_->remote_streams(),
+ reference_collection_));
+}
+
+// This test that remote tracks are ended if a
+// local session description is set that rejects the media content type.
+TEST_F(MediaStreamSignalingTest, RejectMediaContent) {
+ talk_base::scoped_ptr<SessionDescriptionInterface> desc(
+ webrtc::CreateSessionDescription(SessionDescriptionInterface::kOffer,
+ kSdpStringWithStream1, NULL));
+ EXPECT_TRUE(desc != NULL);
+ signaling_->OnRemoteDescriptionChanged(desc.get());
+
+ ASSERT_EQ(1u, observer_->remote_streams()->count());
+ MediaStreamInterface* remote_stream = observer_->remote_streams()->at(0);
+ ASSERT_EQ(1u, remote_stream->GetVideoTracks().size());
+ ASSERT_EQ(1u, remote_stream->GetAudioTracks().size());
+
+ talk_base::scoped_refptr<webrtc::VideoTrackInterface> remote_video =
+ remote_stream->GetVideoTracks()[0];
+ EXPECT_EQ(webrtc::MediaStreamTrackInterface::kLive, remote_video->state());
+ talk_base::scoped_refptr<webrtc::AudioTrackInterface> remote_audio =
+ remote_stream->GetAudioTracks()[0];
+ EXPECT_EQ(webrtc::MediaStreamTrackInterface::kLive, remote_audio->state());
+
+ cricket::ContentInfo* video_info =
+ desc->description()->GetContentByName("video");
+ ASSERT_TRUE(video_info != NULL);
+ video_info->rejected = true;
+ signaling_->OnLocalDescriptionChanged(desc.get());
+ EXPECT_EQ(webrtc::MediaStreamTrackInterface::kEnded, remote_video->state());
+ EXPECT_EQ(webrtc::MediaStreamTrackInterface::kLive, remote_audio->state());
+
+ cricket::ContentInfo* audio_info =
+ desc->description()->GetContentByName("audio");
+ ASSERT_TRUE(audio_info != NULL);
+ audio_info->rejected = true;
+ signaling_->OnLocalDescriptionChanged(desc.get());
+ EXPECT_EQ(webrtc::MediaStreamTrackInterface::kEnded, remote_audio->state());
+}
+
+// This tests that a default MediaStream is created if a remote session
+// description doesn't contain any streams and no MSID support.
+// It also tests that the default stream is updated if a video m-line is added
+// in a subsequent session description.
+TEST_F(MediaStreamSignalingTest, SdpWithoutMsidCreatesDefaultStream) {
+ talk_base::scoped_ptr<SessionDescriptionInterface> desc_audio_only(
+ webrtc::CreateSessionDescription(SessionDescriptionInterface::kOffer,
+ kSdpStringWithoutStreamsAudioOnly,
+ NULL));
+ ASSERT_TRUE(desc_audio_only != NULL);
+ signaling_->OnRemoteDescriptionChanged(desc_audio_only.get());
+
+ EXPECT_EQ(1u, signaling_->remote_streams()->count());
+ ASSERT_EQ(1u, observer_->remote_streams()->count());
+ MediaStreamInterface* remote_stream = observer_->remote_streams()->at(0);
+
+ EXPECT_EQ(1u, remote_stream->GetAudioTracks().size());
+ EXPECT_EQ(0u, remote_stream->GetVideoTracks().size());
+ EXPECT_EQ("default", remote_stream->label());
+
+ talk_base::scoped_ptr<SessionDescriptionInterface> desc(
+ webrtc::CreateSessionDescription(SessionDescriptionInterface::kOffer,
+ kSdpStringWithoutStreams, NULL));
+ ASSERT_TRUE(desc != NULL);
+ signaling_->OnRemoteDescriptionChanged(desc.get());
+ EXPECT_EQ(1u, signaling_->remote_streams()->count());
+ ASSERT_EQ(1u, remote_stream->GetAudioTracks().size());
+ EXPECT_EQ("defaulta0", remote_stream->GetAudioTracks()[0]->id());
+ ASSERT_EQ(1u, remote_stream->GetVideoTracks().size());
+ EXPECT_EQ("defaultv0", remote_stream->GetVideoTracks()[0]->id());
+ observer_->VerifyRemoteAudioTrack("default", "defaulta0", 0);
+ observer_->VerifyRemoteVideoTrack("default", "defaultv0", 0);
+}
+
+// This tests that a default MediaStream is created if the remote session
+// description doesn't contain any streams and don't contain an indication if
+// MSID is supported.
+TEST_F(MediaStreamSignalingTest,
+ SdpWithoutMsidAndStreamsCreatesDefaultStream) {
+ talk_base::scoped_ptr<SessionDescriptionInterface> desc(
+ webrtc::CreateSessionDescription(SessionDescriptionInterface::kOffer,
+ kSdpStringWithoutStreams,
+ NULL));
+ ASSERT_TRUE(desc != NULL);
+ signaling_->OnRemoteDescriptionChanged(desc.get());
+
+ ASSERT_EQ(1u, observer_->remote_streams()->count());
+ MediaStreamInterface* remote_stream = observer_->remote_streams()->at(0);
+ EXPECT_EQ(1u, remote_stream->GetAudioTracks().size());
+ EXPECT_EQ(1u, remote_stream->GetVideoTracks().size());
+}
+
+// This tests that a default MediaStream is not created if the remote session
+// description doesn't contain any streams but does support MSID.
+TEST_F(MediaStreamSignalingTest, SdpWitMsidDontCreatesDefaultStream) {
+ talk_base::scoped_ptr<SessionDescriptionInterface> desc_msid_without_streams(
+ webrtc::CreateSessionDescription(SessionDescriptionInterface::kOffer,
+ kSdpStringWithMsidWithoutStreams,
+ NULL));
+ signaling_->OnRemoteDescriptionChanged(desc_msid_without_streams.get());
+ EXPECT_EQ(0u, observer_->remote_streams()->count());
+}
+
+// This test that a default MediaStream is not created if a remote session
+// description is updated to not have any MediaStreams.
+TEST_F(MediaStreamSignalingTest, VerifyDefaultStreamIsNotCreated) {
+ talk_base::scoped_ptr<SessionDescriptionInterface> desc(
+ webrtc::CreateSessionDescription(SessionDescriptionInterface::kOffer,
+ kSdpStringWithStream1,
+ NULL));
+ ASSERT_TRUE(desc != NULL);
+ signaling_->OnRemoteDescriptionChanged(desc.get());
+ talk_base::scoped_refptr<StreamCollection> reference(
+ CreateStreamCollection(1));
+ EXPECT_TRUE(CompareStreamCollections(observer_->remote_streams(),
+ reference.get()));
+
+ talk_base::scoped_ptr<SessionDescriptionInterface> desc_without_streams(
+ webrtc::CreateSessionDescription(SessionDescriptionInterface::kOffer,
+ kSdpStringWithoutStreams,
+ NULL));
+ signaling_->OnRemoteDescriptionChanged(desc_without_streams.get());
+ EXPECT_EQ(0u, observer_->remote_streams()->count());
+}
+
+// This test that the correct MediaStreamSignalingObserver methods are called
+// when MediaStreamSignaling::OnLocalDescriptionChanged is called with an
+// updated local session description.
+TEST_F(MediaStreamSignalingTest, LocalDescriptionChanged) {
+ talk_base::scoped_ptr<SessionDescriptionInterface> desc_1;
+ CreateSessionDescriptionAndReference(2, 2, desc_1.use());
+
+ signaling_->AddLocalStream(reference_collection_->at(0));
+ signaling_->OnLocalDescriptionChanged(desc_1.get());
+ EXPECT_EQ(2u, observer_->NumberOfLocalAudioTracks());
+ EXPECT_EQ(2u, observer_->NumberOfLocalVideoTracks());
+ observer_->VerifyLocalAudioTrack(kStreams[0], kAudioTracks[0], 1);
+ observer_->VerifyLocalVideoTrack(kStreams[0], kVideoTracks[0], 2);
+ observer_->VerifyLocalAudioTrack(kStreams[0], kAudioTracks[1], 3);
+ observer_->VerifyLocalVideoTrack(kStreams[0], kVideoTracks[1], 4);
+
+ // Remove an audio and video track.
+ talk_base::scoped_ptr<SessionDescriptionInterface> desc_2;
+ CreateSessionDescriptionAndReference(1, 1, desc_2.use());
+ signaling_->OnLocalDescriptionChanged(desc_2.get());
+ EXPECT_EQ(1u, observer_->NumberOfLocalAudioTracks());
+ EXPECT_EQ(1u, observer_->NumberOfLocalVideoTracks());
+ observer_->VerifyLocalAudioTrack(kStreams[0], kAudioTracks[0], 1);
+ observer_->VerifyLocalVideoTrack(kStreams[0], kVideoTracks[0], 2);
+}
+
+// This test that the correct MediaStreamSignalingObserver methods are called
+// when MediaStreamSignaling::AddLocalStream is called after
+// MediaStreamSignaling::OnLocalDescriptionChanged is called.
+TEST_F(MediaStreamSignalingTest, AddLocalStreamAfterLocalDescriptionChanged) {
+ talk_base::scoped_ptr<SessionDescriptionInterface> desc_1;
+ CreateSessionDescriptionAndReference(2, 2, desc_1.use());
+
+ signaling_->OnLocalDescriptionChanged(desc_1.get());
+ EXPECT_EQ(0u, observer_->NumberOfLocalAudioTracks());
+ EXPECT_EQ(0u, observer_->NumberOfLocalVideoTracks());
+
+ signaling_->AddLocalStream(reference_collection_->at(0));
+ EXPECT_EQ(2u, observer_->NumberOfLocalAudioTracks());
+ EXPECT_EQ(2u, observer_->NumberOfLocalVideoTracks());
+ observer_->VerifyLocalAudioTrack(kStreams[0], kAudioTracks[0], 1);
+ observer_->VerifyLocalVideoTrack(kStreams[0], kVideoTracks[0], 2);
+ observer_->VerifyLocalAudioTrack(kStreams[0], kAudioTracks[1], 3);
+ observer_->VerifyLocalVideoTrack(kStreams[0], kVideoTracks[1], 4);
+}
+
+// This test that the correct MediaStreamSignalingObserver methods are called
+// if the ssrc on a local track is changed when
+// MediaStreamSignaling::OnLocalDescriptionChanged is called.
+TEST_F(MediaStreamSignalingTest, ChangeSsrcOnTrackInLocalSessionDescription) {
+ talk_base::scoped_ptr<SessionDescriptionInterface> desc;
+ CreateSessionDescriptionAndReference(1, 1, desc.use());
+
+ signaling_->AddLocalStream(reference_collection_->at(0));
+ signaling_->OnLocalDescriptionChanged(desc.get());
+ EXPECT_EQ(1u, observer_->NumberOfLocalAudioTracks());
+ EXPECT_EQ(1u, observer_->NumberOfLocalVideoTracks());
+ observer_->VerifyLocalAudioTrack(kStreams[0], kAudioTracks[0], 1);
+ observer_->VerifyLocalVideoTrack(kStreams[0], kVideoTracks[0], 2);
+
+ // Change the ssrc of the audio and video track.
+ std::string sdp;
+ desc->ToString(&sdp);
+ std::string ssrc_org = "a=ssrc:1";
+ std::string ssrc_to = "a=ssrc:97";
+ talk_base::replace_substrs(ssrc_org.c_str(), ssrc_org.length(),
+ ssrc_to.c_str(), ssrc_to.length(),
+ &sdp);
+ ssrc_org = "a=ssrc:2";
+ ssrc_to = "a=ssrc:98";
+ talk_base::replace_substrs(ssrc_org.c_str(), ssrc_org.length(),
+ ssrc_to.c_str(), ssrc_to.length(),
+ &sdp);
+ talk_base::scoped_ptr<SessionDescriptionInterface> updated_desc(
+ webrtc::CreateSessionDescription(SessionDescriptionInterface::kOffer,
+ sdp, NULL));
+
+ signaling_->OnLocalDescriptionChanged(updated_desc.get());
+ EXPECT_EQ(1u, observer_->NumberOfLocalAudioTracks());
+ EXPECT_EQ(1u, observer_->NumberOfLocalVideoTracks());
+ observer_->VerifyLocalAudioTrack(kStreams[0], kAudioTracks[0], 97);
+ observer_->VerifyLocalVideoTrack(kStreams[0], kVideoTracks[0], 98);
+}
+
+
diff --git a/talk/app/webrtc/mediastreamtrack.h b/talk/app/webrtc/mediastreamtrack.h
new file mode 100644
index 0000000..6055e51
--- /dev/null
+++ b/talk/app/webrtc/mediastreamtrack.h
@@ -0,0 +1,81 @@
+/*
+ * libjingle
+ * Copyright 2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_APP_WEBRTC_MEDIASTREAMTRACK_H_
+#define TALK_APP_WEBRTC_MEDIASTREAMTRACK_H_
+
+#include <string>
+
+#include "talk/app/webrtc/mediastreaminterface.h"
+#include "talk/app/webrtc/notifier.h"
+
+namespace webrtc {
+
+// MediaTrack implements the interface common to AudioTrackInterface and
+// VideoTrackInterface.
+template <typename T>
+class MediaStreamTrack : public Notifier<T> {
+ public:
+ typedef typename T::TrackState TypedTrackState;
+
+ virtual std::string id() const { return id_; }
+ virtual MediaStreamTrackInterface::TrackState state() const {
+ return state_;
+ }
+ virtual bool enabled() const { return enabled_; }
+ virtual bool set_enabled(bool enable) {
+ bool fire_on_change = (enable != enabled_);
+ enabled_ = enable;
+ if (fire_on_change) {
+ Notifier<T>::FireOnChanged();
+ }
+ return fire_on_change;
+ }
+ virtual bool set_state(MediaStreamTrackInterface::TrackState new_state) {
+ bool fire_on_change = (state_ != new_state);
+ state_ = new_state;
+ if (fire_on_change)
+ Notifier<T>::FireOnChanged();
+ return true;
+ }
+
+ protected:
+ explicit MediaStreamTrack(const std::string& id)
+ : enabled_(true),
+ id_(id),
+ state_(MediaStreamTrackInterface::kInitializing) {
+ }
+
+ private:
+ bool enabled_;
+ std::string id_;
+ MediaStreamTrackInterface::TrackState state_;
+};
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_MEDIASTREAMTRACK_H_
diff --git a/talk/app/webrtc/mediastreamtrackproxy.h b/talk/app/webrtc/mediastreamtrackproxy.h
new file mode 100644
index 0000000..954874b
--- /dev/null
+++ b/talk/app/webrtc/mediastreamtrackproxy.h
@@ -0,0 +1,73 @@
+/*
+ * libjingle
+ * Copyright 2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// This file includes proxy classes for tracks. The purpose is
+// to make sure tracks are only accessed from the signaling thread.
+
+#ifndef TALK_APP_WEBRTC_MEDIASTREAMTRACKPROXY_H_
+#define TALK_APP_WEBRTC_MEDIASTREAMTRACKPROXY_H_
+
+#include "talk/app/webrtc/mediastreaminterface.h"
+#include "talk/app/webrtc/proxy.h"
+
+namespace webrtc {
+
+BEGIN_PROXY_MAP(AudioTrack)
+ PROXY_CONSTMETHOD0(std::string, kind)
+ PROXY_CONSTMETHOD0(std::string, id)
+ PROXY_CONSTMETHOD0(TrackState, state)
+ PROXY_CONSTMETHOD0(bool, enabled)
+ PROXY_CONSTMETHOD0(AudioSourceInterface*, GetSource)
+ PROXY_METHOD0(cricket::AudioRenderer*, FrameInput)
+
+ PROXY_METHOD1(bool, set_enabled, bool)
+ PROXY_METHOD1(bool, set_state, TrackState)
+
+ PROXY_METHOD1(void, RegisterObserver, ObserverInterface*)
+ PROXY_METHOD1(void, UnregisterObserver, ObserverInterface*)
+END_PROXY()
+
+BEGIN_PROXY_MAP(VideoTrack)
+ PROXY_CONSTMETHOD0(std::string, kind)
+ PROXY_CONSTMETHOD0(std::string, id)
+ PROXY_CONSTMETHOD0(TrackState, state)
+ PROXY_CONSTMETHOD0(bool, enabled)
+ PROXY_METHOD1(bool, set_enabled, bool)
+ PROXY_METHOD1(bool, set_state, TrackState)
+
+ PROXY_METHOD1(void, AddRenderer, VideoRendererInterface*)
+ PROXY_METHOD1(void, RemoveRenderer, VideoRendererInterface*)
+ PROXY_METHOD0(cricket::VideoRenderer*, FrameInput)
+ PROXY_CONSTMETHOD0(VideoSourceInterface*, GetSource)
+
+ PROXY_METHOD1(void, RegisterObserver, ObserverInterface*)
+ PROXY_METHOD1(void, UnregisterObserver, ObserverInterface*)
+END_PROXY()
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_MEDIASTREAMTRACKPROXY_H_
diff --git a/talk/app/webrtc/notifier.h b/talk/app/webrtc/notifier.h
new file mode 100644
index 0000000..eaa0063
--- /dev/null
+++ b/talk/app/webrtc/notifier.h
@@ -0,0 +1,77 @@
+/*
+ * libjingle
+ * Copyright 2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_APP_WEBRTC_NOTIFIER_H_
+#define TALK_APP_WEBRTC_NOTIFIER_H_
+
+#include <list>
+
+#include "talk/base/common.h"
+#include "talk/app/webrtc/mediastreaminterface.h"
+
+namespace webrtc {
+
+// Implement a template version of a notifier.
+template <class T>
+class Notifier : public T {
+ public:
+ Notifier() {
+ }
+
+ virtual void RegisterObserver(ObserverInterface* observer) {
+ ASSERT(observer != NULL);
+ observers_.push_back(observer);
+ }
+
+ virtual void UnregisterObserver(ObserverInterface* observer) {
+ for (std::list<ObserverInterface*>::iterator it = observers_.begin();
+ it != observers_.end(); it++) {
+ if (*it == observer) {
+ observers_.erase(it);
+ break;
+ }
+ }
+ }
+
+ void FireOnChanged() {
+ // Copy the list of observers to avoid a crash if the observer object
+ // unregisters as a result of the OnChanged() call. If the same list is used
+ // UnregisterObserver will affect the list make the iterator invalid.
+ std::list<ObserverInterface*> observers = observers_;
+ for (std::list<ObserverInterface*>::iterator it = observers.begin();
+ it != observers.end(); ++it) {
+ (*it)->OnChanged();
+ }
+ }
+
+ protected:
+ std::list<ObserverInterface*> observers_;
+};
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_NOTIFIER_H_
diff --git a/talk/app/webrtc/objc/README b/talk/app/webrtc/objc/README
new file mode 100644
index 0000000..cea2aae
--- /dev/null
+++ b/talk/app/webrtc/objc/README
@@ -0,0 +1,45 @@
+This directory contains the ObjectiveC implementation of the
+webrtc::PeerConnection API. This can be built for Mac or iOS.
+
+Prerequisites:
+- Make sure gclient is checking out tools necessary to target iOS: your
+ .gclient file should contain a line like:
+ target_os = ['ios', 'mac']
+ Make sure to re-run gclient sync after adding this to download the tools.
+- Set up webrtc-related GYP variables:
+- For Mac:
+ export GYP_DEFINES="build_with_libjingle=1 build_with_chromium=0 OS=mac
+ target_arch=x64 libjingle_objc=1 libpeer_target_type=static_library
+ $GYP_DEFINES"
+- For iOS:
+ export GYP_DEFINES="build_with_libjingle=1 build_with_chromium=0 OS=ios
+ libjingle_enable_video=0 libjingle_objc=1 enable_video=0 $GYP_DEFINES"
+- Finally, run "gclient runhooks" to generate iOS or Mac targeting Xcode
+ projects.
+
+Example of building & using the app:
+
+cd <path/to/libjingle>/trunk/talk
+- Open libjingle.xcproj. Select iPhone or iPad simulator and build everything.
+ Then switch to iOS device and build everything. This creates x86 and ARM
+ archives.
+cd examples/ios
+./makeLibs.sh
+- This will generate fat archives containing both targets and copy them to
+ ./libs.
+- This step must be rerun every time you run gclient sync or build the API
+ libraries.
+- Open AppRTCDemo.xcodeproj, select your device or simulator and run.
+- If you have any problems deploying for the first time, check the project
+ properties to ensure that the Bundle Identifier matches your phone
+ provisioning profile. Or use the simulator as it doesn't require a profile.
+
+- In desktop chrome, navigate to http://apprtc.appspot.com and note the r=<NNN>
+ room number in the resulting URL.
+
+- Enter that number into the text field on the phone.
+
+- Alternatively, you can background the app and launch Safari. In Safari, open
+ the url apprtc://apprtc.appspot.com/?r=<NNN> where <NNN> is the room name.
+ Other options are to put the link in an email and send it to your self.
+ Clicking on it will launch AppRTCDemo and navigate to the room.
diff --git a/talk/app/webrtc/objc/RTCAudioTrack+Internal.h b/talk/app/webrtc/objc/RTCAudioTrack+Internal.h
new file mode 100644
index 0000000..17d2723
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCAudioTrack+Internal.h
@@ -0,0 +1,37 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import "RTCAudioTrack.h"
+
+#include "talk/app/webrtc/mediastreaminterface.h"
+
+@interface RTCAudioTrack (Internal)
+
+@property(nonatomic, assign, readonly)
+ talk_base::scoped_refptr<webrtc::AudioTrackInterface> audioTrack;
+
+@end
diff --git a/talk/app/webrtc/objc/RTCAudioTrack.mm b/talk/app/webrtc/objc/RTCAudioTrack.mm
new file mode 100644
index 0000000..8a56986
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCAudioTrack.mm
@@ -0,0 +1,45 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+#error "This file requires ARC support."
+#endif
+
+#import "RTCAudioTrack+internal.h"
+
+#import "RTCMediaStreamTrack+internal.h"
+
+@implementation RTCAudioTrack
+@end
+
+@implementation RTCAudioTrack (Internal)
+
+- (talk_base::scoped_refptr<webrtc::AudioTrackInterface>)audioTrack {
+ return static_cast<webrtc::AudioTrackInterface *>(self.mediaTrack.get());
+}
+
+@end
diff --git a/talk/app/webrtc/objc/RTCEnumConverter.h b/talk/app/webrtc/objc/RTCEnumConverter.h
new file mode 100644
index 0000000..0e83719
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCEnumConverter.h
@@ -0,0 +1,54 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import <Foundation/Foundation.h>
+
+#import "RTCTypes.h"
+
+#include "talk/app/webrtc/peerconnectioninterface.h"
+
+@interface RTCEnumConverter : NSObject
+
++ (RTCICEConnectionState)convertIceConnectionStateToObjC:
+ (webrtc::PeerConnectionInterface::IceConnectionState)nativeState;
+
++ (RTCICEGatheringState)convertIceGatheringStateToObjC:
+ (webrtc::PeerConnectionInterface::IceGatheringState)nativeState;
+
++ (RTCSignalingState)convertSignalingStateToObjC:
+ (webrtc::PeerConnectionInterface::SignalingState)nativeState;
+
++ (RTCSourceState)convertSourceStateToObjC:
+ (webrtc::MediaSourceInterface::SourceState)nativeState;
+
++ (webrtc::MediaStreamTrackInterface::TrackState)convertTrackStateToNative:
+ (RTCTrackState)state;
+
++ (RTCTrackState)convertTrackStateToObjC:
+ (webrtc::MediaStreamTrackInterface::TrackState)nativeState;
+
+@end
diff --git a/talk/app/webrtc/objc/RTCEnumConverter.mm b/talk/app/webrtc/objc/RTCEnumConverter.mm
new file mode 100644
index 0000000..7c81c8d
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCEnumConverter.mm
@@ -0,0 +1,126 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import "RTCEnumConverter.h"
+
+#include "talk/app/webrtc/peerconnectioninterface.h"
+
+@implementation RTCEnumConverter
+
++ (RTCICEConnectionState)convertIceConnectionStateToObjC:
+ (webrtc::PeerConnectionInterface::IceConnectionState)nativeState {
+ switch (nativeState) {
+ case webrtc::PeerConnectionInterface::kIceConnectionNew:
+ return RTCICEConnectionNew;
+ case webrtc::PeerConnectionInterface::kIceConnectionChecking:
+ return RTCICEConnectionChecking;
+ case webrtc::PeerConnectionInterface::kIceConnectionConnected:
+ return RTCICEConnectionConnected;
+ case webrtc::PeerConnectionInterface::kIceConnectionCompleted:
+ return RTCICEConnectionCompleted;
+ case webrtc::PeerConnectionInterface::kIceConnectionFailed:
+ return RTCICEConnectionFailed;
+ case webrtc::PeerConnectionInterface::kIceConnectionDisconnected:
+ return RTCICEConnectionDisconnected;
+ case webrtc::PeerConnectionInterface::kIceConnectionClosed:
+ return RTCICEConnectionClosed;
+ }
+}
+
++ (RTCICEGatheringState)convertIceGatheringStateToObjC:
+ (webrtc::PeerConnectionInterface::IceGatheringState)nativeState {
+ switch (nativeState) {
+ case webrtc::PeerConnectionInterface::kIceGatheringNew:
+ return RTCICEGatheringNew;
+ case webrtc::PeerConnectionInterface::kIceGatheringGathering:
+ return RTCICEGatheringGathering;
+ case webrtc::PeerConnectionInterface::kIceGatheringComplete:
+ return RTCICEGatheringComplete;
+ }
+}
+
++ (RTCSignalingState)convertSignalingStateToObjC:
+ (webrtc::PeerConnectionInterface::SignalingState)nativeState {
+ switch (nativeState) {
+ case webrtc::PeerConnectionInterface::kStable:
+ return RTCSignalingStable;
+ case webrtc::PeerConnectionInterface::kHaveLocalOffer:
+ return RTCSignalingHaveLocalOffer;
+ case webrtc::PeerConnectionInterface::kHaveLocalPrAnswer:
+ return RTCSignalingHaveLocalPrAnswer;
+ case webrtc::PeerConnectionInterface::kHaveRemoteOffer:
+ return RTCSignalingHaveRemoteOffer;
+ case webrtc::PeerConnectionInterface::kHaveRemotePrAnswer:
+ return RTCSignalingHaveRemotePrAnswer;
+ case webrtc::PeerConnectionInterface::kClosed:
+ return RTCSignalingClosed;
+ }
+}
+
++ (RTCSourceState)convertSourceStateToObjC:
+ (webrtc::MediaSourceInterface::SourceState)nativeState {
+ switch (nativeState) {
+ case webrtc::MediaSourceInterface::kInitializing:
+ return RTCSourceStateInitializing;
+ case webrtc::MediaSourceInterface::kLive:
+ return RTCSourceStateLive;
+ case webrtc::MediaSourceInterface::kEnded:
+ return RTCSourceStateEnded;
+ case webrtc::MediaSourceInterface::kMuted:
+ return RTCSourceStateMuted;
+ }
+}
+
++ (webrtc::MediaStreamTrackInterface::TrackState)
+ convertTrackStateToNative:(RTCTrackState)state {
+ switch (state) {
+ case RTCTrackStateInitializing:
+ return webrtc::MediaStreamTrackInterface::kInitializing;
+ case RTCTrackStateLive:
+ return webrtc::MediaStreamTrackInterface::kLive;
+ case RTCTrackStateEnded:
+ return webrtc::MediaStreamTrackInterface::kEnded;
+ case RTCTrackStateFailed:
+ return webrtc::MediaStreamTrackInterface::kFailed;
+ }
+}
+
++ (RTCTrackState)convertTrackStateToObjC:
+ (webrtc::MediaStreamTrackInterface::TrackState)nativeState {
+ switch (nativeState) {
+ case webrtc::MediaStreamTrackInterface::kInitializing:
+ return RTCTrackStateInitializing;
+ case webrtc::MediaStreamTrackInterface::kLive:
+ return RTCTrackStateLive;
+ case webrtc::MediaStreamTrackInterface::kEnded:
+ return RTCTrackStateEnded;
+ case webrtc::MediaStreamTrackInterface::kFailed:
+ return RTCTrackStateFailed;
+ }
+}
+
+@end
diff --git a/talk/app/webrtc/objc/RTCI420Frame.mm b/talk/app/webrtc/objc/RTCI420Frame.mm
new file mode 100644
index 0000000..df84fc1
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCI420Frame.mm
@@ -0,0 +1,34 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import "RTCI420Frame.h"
+
+@implementation RTCI420Frame
+
+// TODO(hughv): Should this just be a cricket::VideoFrame wrapper object?
+
+@end
diff --git a/talk/app/webrtc/objc/RTCIceCandidate+Internal.h b/talk/app/webrtc/objc/RTCIceCandidate+Internal.h
new file mode 100644
index 0000000..e4964d4
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCIceCandidate+Internal.h
@@ -0,0 +1,39 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import "RTCICECandidate.h"
+
+#include "talk/app/webrtc/peerconnectioninterface.h"
+
+@interface RTCICECandidate (Internal)
+
+@property(nonatomic, assign, readonly) const
+ webrtc::IceCandidateInterface *candidate;
+
+- (id)initWithCandidate:(const webrtc::IceCandidateInterface *)candidate;
+
+@end
diff --git a/talk/app/webrtc/objc/RTCIceCandidate.mm b/talk/app/webrtc/objc/RTCIceCandidate.mm
new file mode 100644
index 0000000..63eac1d
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCIceCandidate.mm
@@ -0,0 +1,86 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+#error "This file requires ARC support."
+#endif
+
+#import "RTCICECandidate+internal.h"
+
+@implementation RTCICECandidate {
+ NSString *_sdpMid;
+ NSInteger _sdpMLineIndex;
+ NSString *_sdp;
+}
+
+- (id)initWithMid:(NSString *)sdpMid
+ index:(NSInteger)sdpMLineIndex
+ sdp:(NSString *)sdp {
+ if (!sdpMid || !sdp) {
+ NSAssert(NO, @"nil arguments not allowed");
+ return nil;
+ }
+ if ((self = [super init])) {
+ _sdpMid = [sdpMid copy];
+ _sdpMLineIndex = sdpMLineIndex;
+ _sdp = [sdp copy];
+ }
+ return self;
+}
+
+- (NSString *)description {
+ return [NSString stringWithFormat:@"%@:%ld:%@",
+ self.sdpMid,
+ (long)self.sdpMLineIndex,
+ self.sdp];
+}
+
+@end
+
+@implementation RTCICECandidate (Internal)
+
+- (id)initWithCandidate:(const webrtc::IceCandidateInterface *)candidate {
+ if ((self = [super init])) {
+ std::string sdp;
+ if (candidate->ToString(&sdp)) {
+ _sdpMid = @(candidate->sdp_mid().c_str());
+ _sdpMLineIndex = candidate->sdp_mline_index();
+ _sdp = @(sdp.c_str());
+ } else {
+ self = nil;
+ NSAssert(NO, @"ICECandidateInterface->ToString failed");
+ }
+ }
+ return self;
+}
+
+- (const webrtc::IceCandidateInterface *)candidate {
+ return webrtc::CreateIceCandidate(
+ [self.sdpMid UTF8String], self.sdpMLineIndex, [self.sdp UTF8String]);
+}
+
+@end
diff --git a/talk/app/webrtc/objc/RTCIceServer+Internal.h b/talk/app/webrtc/objc/RTCIceServer+Internal.h
new file mode 100644
index 0000000..c074294
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCIceServer+Internal.h
@@ -0,0 +1,37 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import "RTCICEServer.h"
+
+#include "talk/app/webrtc/peerconnectioninterface.h"
+
+@interface RTCICEServer (Internal)
+
+@property(nonatomic, assign, readonly)
+ webrtc::PeerConnectionInterface::IceServer iceServer;
+
+@end
diff --git a/talk/app/webrtc/objc/RTCIceServer.mm b/talk/app/webrtc/objc/RTCIceServer.mm
new file mode 100644
index 0000000..cb32673
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCIceServer.mm
@@ -0,0 +1,65 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+#error "This file requires ARC support."
+#endif
+
+#import "RTCICEServer+internal.h"
+
+@implementation RTCICEServer
+
+- (id)initWithURI:(NSURL *)URI password:(NSString *)password {
+ if (!URI || !password) {
+ NSAssert(NO, @"nil arguments not allowed");
+ self = nil;
+ return nil;
+ }
+ if ((self = [super init])) {
+ _URI = URI;
+ _password = [password copy];
+ }
+ return self;
+}
+
+- (NSString *)description {
+ return [NSString stringWithFormat:@"Server: [%@]\nPassword: [%@]",
+ [self.URI absoluteString], self.password];
+}
+
+@end
+
+@implementation RTCICEServer (Internal)
+
+- (webrtc::PeerConnectionInterface::IceServer)iceServer {
+ webrtc::PeerConnectionInterface::IceServer iceServer;
+ iceServer.uri = [[self.URI absoluteString] UTF8String];
+ iceServer.password = [self.password UTF8String];
+ return iceServer;
+}
+
+@end
diff --git a/talk/app/webrtc/objc/RTCMediaConstraints+Internal.h b/talk/app/webrtc/objc/RTCMediaConstraints+Internal.h
new file mode 100644
index 0000000..71a10c7
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCMediaConstraints+Internal.h
@@ -0,0 +1,40 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import "RTCMediaConstraints.h"
+
+#import "RTCMediaConstraintsNative.h"
+
+#include "talk/app/webrtc/mediastreaminterface.h"
+
+@interface RTCMediaConstraints (Internal)
+
+// Ownership is retained for the lifetime of this object.
+@property(nonatomic, assign, readonly) const
+ webrtc::RTCMediaConstraintsNative *constraints;
+
+@end
diff --git a/talk/app/webrtc/objc/RTCMediaConstraints.mm b/talk/app/webrtc/objc/RTCMediaConstraints.mm
new file mode 100644
index 0000000..fcb3b52
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCMediaConstraints.mm
@@ -0,0 +1,76 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+#error "This file requires ARC support."
+#endif
+
+#import "RTCMediaConstraints+internal.h"
+
+#import "RTCPair.h"
+
+#include "talk/base/scoped_ptr.h"
+
+// TODO(hughv): Add accessors for mandatory and optional constraints.
+// TODO(hughv): Add description.
+
+@implementation RTCMediaConstraints {
+ talk_base::scoped_ptr<webrtc::RTCMediaConstraintsNative> _constraints;
+ webrtc::MediaConstraintsInterface::Constraints _mandatory;
+ webrtc::MediaConstraintsInterface::Constraints _optional;
+}
+
+- (id)initWithMandatoryConstraints:(NSArray *)mandatory
+ optionalConstraints:(NSArray *)optional {
+ if ((self = [super init])) {
+ _mandatory = [[self class] constraintsFromArray:mandatory];
+ _optional = [[self class] constraintsFromArray:optional];
+ _constraints.reset(
+ new webrtc::RTCMediaConstraintsNative(_mandatory, _optional));
+ }
+ return self;
+}
+
++ (webrtc::MediaConstraintsInterface::Constraints)
+ constraintsFromArray:(NSArray *)array {
+ webrtc::MediaConstraintsInterface::Constraints constraints;
+ for (RTCPair *pair in array) {
+ constraints.push_back(webrtc::MediaConstraintsInterface::Constraint(
+ [pair.key UTF8String], [pair.value UTF8String]));
+ }
+ return constraints;
+}
+
+@end
+
+@implementation RTCMediaConstraints (internal)
+
+- (const webrtc::RTCMediaConstraintsNative *)constraints {
+ return _constraints.get();
+}
+
+@end
diff --git a/talk/app/webrtc/objc/RTCMediaConstraintsNative.cc b/talk/app/webrtc/objc/RTCMediaConstraintsNative.cc
new file mode 100644
index 0000000..ed06d18
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCMediaConstraintsNative.cc
@@ -0,0 +1,51 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/objc/RTCMediaConstraintsNative.h"
+
+namespace webrtc {
+
+RTCMediaConstraintsNative::~RTCMediaConstraintsNative() {}
+
+RTCMediaConstraintsNative::RTCMediaConstraintsNative() {}
+
+RTCMediaConstraintsNative::RTCMediaConstraintsNative(
+ const MediaConstraintsInterface::Constraints& mandatory,
+ const MediaConstraintsInterface::Constraints& optional)
+ : mandatory_(mandatory), optional_(optional) {}
+
+const MediaConstraintsInterface::Constraints&
+RTCMediaConstraintsNative::GetMandatory() const {
+ return mandatory_;
+}
+
+const MediaConstraintsInterface::Constraints&
+RTCMediaConstraintsNative::GetOptional() const {
+ return optional_;
+}
+
+} // namespace webrtc
diff --git a/talk/app/webrtc/objc/RTCMediaConstraintsNative.h b/talk/app/webrtc/objc/RTCMediaConstraintsNative.h
new file mode 100644
index 0000000..a5cd266
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCMediaConstraintsNative.h
@@ -0,0 +1,50 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_APP_WEBRTC_OBJC_RTCMEDIACONSTRAINTSNATIVE_H_
+#define TALK_APP_WEBRTC_OBJC_RTCMEDIACONSTRAINTSNATIVE_H_
+
+#include "talk/app/webrtc/mediaconstraintsinterface.h"
+
+namespace webrtc {
+class RTCMediaConstraintsNative : public MediaConstraintsInterface {
+ public:
+ virtual ~RTCMediaConstraintsNative();
+ RTCMediaConstraintsNative();
+ RTCMediaConstraintsNative(
+ const MediaConstraintsInterface::Constraints& mandatory,
+ const MediaConstraintsInterface::Constraints& optional);
+ virtual const Constraints& GetMandatory() const;
+ virtual const Constraints& GetOptional() const;
+
+ private:
+ MediaConstraintsInterface::Constraints mandatory_;
+ MediaConstraintsInterface::Constraints optional_;
+};
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_OBJC_RTCMEDIACONSTRAINTSNATIVE_H_
diff --git a/talk/app/webrtc/objc/RTCMediaSource+Internal.h b/talk/app/webrtc/objc/RTCMediaSource+Internal.h
new file mode 100644
index 0000000..98f8e9c
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCMediaSource+Internal.h
@@ -0,0 +1,40 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import "RTCMediaSource.h"
+
+#include "talk/app/webrtc/mediastreaminterface.h"
+
+@interface RTCMediaSource (Internal)
+
+@property(nonatomic, assign, readonly)
+ talk_base::scoped_refptr<webrtc::MediaSourceInterface> mediaSource;
+
+- (id)initWithMediaSource:
+ (talk_base::scoped_refptr<webrtc::MediaSourceInterface>)mediaSource;
+
+@end
diff --git a/talk/app/webrtc/objc/RTCMediaSource.mm b/talk/app/webrtc/objc/RTCMediaSource.mm
new file mode 100644
index 0000000..9331fd7
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCMediaSource.mm
@@ -0,0 +1,65 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+#error "This file requires ARC support."
+#endif
+
+#import "RTCMediaSource+internal.h"
+
+#import "RTCEnumConverter.h"
+
+@implementation RTCMediaSource {
+ talk_base::scoped_refptr<webrtc::MediaSourceInterface> _mediaSource;
+}
+
+- (RTCSourceState)state {
+ return [RTCEnumConverter convertSourceStateToObjC:self.mediaSource->state()];
+}
+
+@end
+
+@implementation RTCMediaSource (Internal)
+
+- (id)initWithMediaSource:
+ (talk_base::scoped_refptr<webrtc::MediaSourceInterface>)mediaSource {
+ if (!mediaSource) {
+ NSAssert(NO, @"nil arguments not allowed");
+ self = nil;
+ return nil;
+ }
+ if ((self = [super init])) {
+ _mediaSource = mediaSource;
+ }
+ return self;
+}
+
+- (talk_base::scoped_refptr<webrtc::MediaSourceInterface>)mediaSource {
+ return _mediaSource;
+}
+
+@end
diff --git a/talk/app/webrtc/objc/RTCMediaStream+Internal.h b/talk/app/webrtc/objc/RTCMediaStream+Internal.h
new file mode 100644
index 0000000..2123c2d
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCMediaStream+Internal.h
@@ -0,0 +1,40 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import "RTCMediaStream.h"
+
+#include "talk/app/webrtc/mediastreamtrack.h"
+
+@interface RTCMediaStream (Internal)
+
+@property(nonatomic, assign, readonly)
+ talk_base::scoped_refptr<webrtc::MediaStreamInterface> mediaStream;
+
+- (id)initWithMediaStream:
+ (talk_base::scoped_refptr<webrtc::MediaStreamInterface>)mediaStream;
+
+@end
diff --git a/talk/app/webrtc/objc/RTCMediaStream.mm b/talk/app/webrtc/objc/RTCMediaStream.mm
new file mode 100644
index 0000000..dd4aab6
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCMediaStream.mm
@@ -0,0 +1,145 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+#error "This file requires ARC support."
+#endif
+
+#import "RTCMediaStream+internal.h"
+
+#import "RTCAudioTrack+internal.h"
+#import "RTCMediaStreamTrack+internal.h"
+#import "RTCVideoTrack+internal.h"
+
+#include "talk/app/webrtc/mediastreaminterface.h"
+
+@implementation RTCMediaStream {
+ NSMutableArray *_audioTracks;
+ NSMutableArray *_videoTracks;
+ talk_base::scoped_refptr<webrtc::MediaStreamInterface> _mediaStream;
+}
+
+- (NSString *)description {
+ return [NSString stringWithFormat:@"[%@:A=%lu:V=%lu]",
+ [self label],
+ (unsigned long)[self.audioTracks count],
+ (unsigned long)[self.videoTracks count]];
+}
+
+- (NSArray *)audioTracks {
+ return [_audioTracks copy];
+}
+
+- (NSArray *)videoTracks {
+ return [_videoTracks copy];
+}
+
+- (NSString *)label {
+ return @(self.mediaStream->label().c_str());
+}
+
+- (BOOL)addAudioTrack:(RTCAudioTrack *)track {
+ if (self.mediaStream->AddTrack(track.audioTrack)) {
+ [_audioTracks addObject:track];
+ return YES;
+ }
+ return NO;
+}
+
+- (BOOL)addVideoTrack:(RTCVideoTrack *)track {
+ if (self.mediaStream->AddTrack(track.videoTrack)) {
+ [_videoTracks addObject:track];
+ return YES;
+ }
+ return NO;
+}
+
+- (BOOL)removeAudioTrack:(RTCAudioTrack *)track {
+ NSUInteger index = [_audioTracks indexOfObjectIdenticalTo:track];
+ NSAssert(index != NSNotFound,
+ @"|removeAudioTrack| called on unexpected RTCAudioTrack");
+ if (index != NSNotFound && self.mediaStream->RemoveTrack(track.audioTrack)) {
+ [_audioTracks removeObjectAtIndex:index];
+ return YES;
+ }
+ return NO;
+}
+
+- (BOOL)removeVideoTrack:(RTCVideoTrack *)track {
+ NSUInteger index = [_videoTracks indexOfObjectIdenticalTo:track];
+ NSAssert(index != NSNotFound,
+ @"|removeAudioTrack| called on unexpected RTCVideoTrack");
+ if (index != NSNotFound && self.mediaStream->RemoveTrack(track.videoTrack)) {
+ [_videoTracks removeObjectAtIndex:index];
+ return YES;
+ }
+ return NO;
+}
+
+@end
+
+@implementation RTCMediaStream (Internal)
+
+- (id)initWithMediaStream:
+ (talk_base::scoped_refptr<webrtc::MediaStreamInterface>)mediaStream {
+ if (!mediaStream) {
+ NSAssert(NO, @"nil arguments not allowed");
+ self = nil;
+ return nil;
+ }
+ if ((self = [super init])) {
+ webrtc::AudioTrackVector audio_tracks = mediaStream->GetAudioTracks();
+ webrtc::VideoTrackVector video_tracks = mediaStream->GetVideoTracks();
+
+ _audioTracks = [NSMutableArray arrayWithCapacity:audio_tracks.size()];
+ _videoTracks = [NSMutableArray arrayWithCapacity:video_tracks.size()];
+ _mediaStream = mediaStream;
+
+ for (size_t i = 0; i < audio_tracks.size(); ++i) {
+ talk_base::scoped_refptr<webrtc::AudioTrackInterface> track =
+ audio_tracks[i];
+ RTCAudioTrack *audioTrack =
+ [[RTCAudioTrack alloc] initWithMediaTrack:track];
+ [_audioTracks addObject:audioTrack];
+ }
+ // TODO(hughv): Add video.
+// for (size_t i = 0; i < video_tracks.size(); ++i) {
+// talk_base::scoped_refptr<webrtc::VideoTrackInterface> track =
+// video_tracks[i];
+// RTCVideoTrack *videoTrack =
+// [[RTCVideoTrack alloc] initWithMediaTrack:track];
+// [_videoTracks addObject:videoTrack];
+// }
+ }
+ return self;
+}
+
+- (talk_base::scoped_refptr<webrtc::MediaStreamInterface>)mediaStream {
+ return _mediaStream;
+}
+
+@end
diff --git a/talk/app/webrtc/objc/RTCMediaStreamTrack+Internal.h b/talk/app/webrtc/objc/RTCMediaStreamTrack+Internal.h
new file mode 100644
index 0000000..9a0cab3
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCMediaStreamTrack+Internal.h
@@ -0,0 +1,40 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import "RTCMediaStreamTrack.h"
+
+#include "talk/app/webrtc/mediastreaminterface.h"
+
+@interface RTCMediaStreamTrack (Internal)
+
+@property(nonatomic, assign, readonly)
+ talk_base::scoped_refptr<webrtc::MediaStreamTrackInterface> mediaTrack;
+
+- (id)initWithMediaTrack:
+ (talk_base::scoped_refptr<webrtc::MediaStreamTrackInterface>)mediaTrack;
+
+@end
diff --git a/talk/app/webrtc/objc/RTCMediaStreamTrack.mm b/talk/app/webrtc/objc/RTCMediaStreamTrack.mm
new file mode 100644
index 0000000..6c8f715
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCMediaStreamTrack.mm
@@ -0,0 +1,103 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+#error "This file requires ARC support."
+#endif
+
+#import "RTCMediaStreamTrack+internal.h"
+#import "RTCEnumConverter.h"
+
+@implementation RTCMediaStreamTrack {
+ talk_base::scoped_refptr<webrtc::MediaStreamTrackInterface> _mediaTrack;
+}
+
+@synthesize label;
+
+- (BOOL)isEqual:(id)other {
+ // Equality is purely based on the label just like the C++ implementation.
+ if (self == other) return YES;
+ if (![other isKindOfClass:[self class]] ||
+ ![self isKindOfClass:[other class]]) {
+ return NO;
+ }
+ RTCMediaStreamTrack *otherMediaStream = (RTCMediaStreamTrack *)other;
+ return [self.label isEqual:otherMediaStream.label];
+}
+
+- (NSUInteger)hash {
+ return [self.label hash];
+}
+
+- (NSString *)kind {
+ return @(self.mediaTrack->kind().c_str());
+}
+
+- (NSString *)label {
+ return @(self.mediaTrack->id().c_str());
+}
+
+- (BOOL)isEnabled {
+ return self.mediaTrack->enabled();
+}
+
+- (BOOL)setEnabled:(BOOL)enabled {
+ return self.mediaTrack->set_enabled(enabled);
+}
+
+- (RTCTrackState)state {
+ return [RTCEnumConverter convertTrackStateToObjC:self.mediaTrack->state()];
+}
+
+- (BOOL)setState:(RTCTrackState)state {
+ return self.mediaTrack->set_state(
+ [RTCEnumConverter convertTrackStateToNative:state]);
+}
+
+@end
+
+@implementation RTCMediaStreamTrack (Internal)
+
+- (id)initWithMediaTrack:(
+ talk_base::scoped_refptr<webrtc::MediaStreamTrackInterface>)mediaTrack {
+ if (!mediaTrack) {
+ NSAssert(NO, @"nil arguments not allowed");
+ self = nil;
+ return nil;
+ }
+ if ((self = [super init])) {
+ _mediaTrack = mediaTrack;
+ label = @(mediaTrack->id().c_str());
+ }
+ return self;
+}
+
+- (talk_base::scoped_refptr<webrtc::MediaStreamTrackInterface>)mediaTrack {
+ return _mediaTrack;
+}
+
+@end
diff --git a/talk/app/webrtc/objc/RTCPair.m b/talk/app/webrtc/objc/RTCPair.m
new file mode 100644
index 0000000..ee2ba1b
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCPair.m
@@ -0,0 +1,40 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import "RTCPair.h"
+
+@implementation RTCPair
+
+- (id)initWithKey:(NSString *)key value:(NSString *)value {
+ if ((self = [super init])) {
+ _key = [key copy];
+ _value = [value copy];
+ }
+ return self;
+}
+
+@end
diff --git a/talk/app/webrtc/objc/RTCPeerConnection+Internal.h b/talk/app/webrtc/objc/RTCPeerConnection+Internal.h
new file mode 100644
index 0000000..d1b4639
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCPeerConnection+Internal.h
@@ -0,0 +1,44 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import "RTCPeerConnection.h"
+
+#import "RTCPeerConnectionDelegate.h"
+#import "RTCPeerConnectionObserver.h"
+
+#include "talk/app/webrtc/peerconnectioninterface.h"
+
+@interface RTCPeerConnection (Internal)
+
+@property(nonatomic, assign, readonly)
+ talk_base::scoped_refptr<webrtc::PeerConnectionInterface> peerConnection;
+
+- (id)initWithPeerConnection:(
+ talk_base::scoped_refptr<webrtc::PeerConnectionInterface>)peerConnection
+ observer:(webrtc::RTCPeerConnectionObserver *)observer;
+
+@end
diff --git a/talk/app/webrtc/objc/RTCPeerConnection.mm b/talk/app/webrtc/objc/RTCPeerConnection.mm
new file mode 100644
index 0000000..73dce36
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCPeerConnection.mm
@@ -0,0 +1,247 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+#error "This file requires ARC support."
+#endif
+
+#import "RTCPeerConnection+internal.h"
+
+#import "RTCEnumConverter.h"
+#import "RTCICECandidate+internal.h"
+#import "RTCICEServer+internal.h"
+#import "RTCMediaConstraints+internal.h"
+#import "RTCMediaStream+internal.h"
+#import "RTCSessionDescription+internal.h"
+#import "RTCSessionDescriptonDelegate.h"
+#import "RTCSessionDescription.h"
+
+#include "talk/app/webrtc/jsep.h"
+
+NSString* const kRTCSessionDescriptionDelegateErrorDomain = @"RTCSDPError";
+int const kRTCSessionDescriptionDelegateErrorCode = -1;
+
+namespace webrtc {
+
+class RTCCreateSessionDescriptionObserver
+ : public CreateSessionDescriptionObserver {
+ public:
+ RTCCreateSessionDescriptionObserver(id<RTCSessionDescriptonDelegate> delegate,
+ RTCPeerConnection *peerConnection) {
+ _delegate = delegate;
+ _peerConnection = peerConnection;
+ }
+
+ virtual void OnSuccess(SessionDescriptionInterface *desc) OVERRIDE {
+ RTCSessionDescription *session =
+ [[RTCSessionDescription alloc] initWithSessionDescription:desc];
+ [_delegate peerConnection:_peerConnection
+ didCreateSessionDescription:session
+ error:nil];
+ }
+
+ virtual void OnFailure(const std::string &error) OVERRIDE {
+ NSString *str = @(error.c_str());
+ NSError *err =
+ [NSError errorWithDomain:kRTCSessionDescriptionDelegateErrorDomain
+ code:kRTCSessionDescriptionDelegateErrorCode
+ userInfo:@{ @"error" : str }];
+ [_delegate peerConnection:_peerConnection
+ didCreateSessionDescription:nil
+ error:err];
+ }
+
+ private:
+ id<RTCSessionDescriptonDelegate> _delegate;
+ RTCPeerConnection *_peerConnection;
+};
+
+class RTCSetSessionDescriptionObserver : public SetSessionDescriptionObserver {
+ public:
+ RTCSetSessionDescriptionObserver(id<RTCSessionDescriptonDelegate> delegate,
+ RTCPeerConnection *peerConnection) {
+ _delegate = delegate;
+ _peerConnection = peerConnection;
+ }
+
+ virtual void OnSuccess() OVERRIDE {
+ [_delegate peerConnection:_peerConnection
+ didSetSessionDescriptionWithError:nil];
+ }
+
+ virtual void OnFailure(const std::string &error) OVERRIDE {
+ NSString *str = @(error.c_str());
+ NSError *err =
+ [NSError errorWithDomain:kRTCSessionDescriptionDelegateErrorDomain
+ code:kRTCSessionDescriptionDelegateErrorCode
+ userInfo:@{ @"error" : str }];
+ [_delegate peerConnection:_peerConnection
+ didSetSessionDescriptionWithError:err];
+ }
+
+ private:
+ id<RTCSessionDescriptonDelegate> _delegate;
+ RTCPeerConnection *_peerConnection;
+};
+
+}
+
+@implementation RTCPeerConnection {
+ NSMutableArray *_localStreams;
+ talk_base::scoped_ptr<webrtc::RTCPeerConnectionObserver>_observer;
+ talk_base::scoped_refptr<webrtc::PeerConnectionInterface> _peerConnection;
+}
+
+- (BOOL)addICECandidate:(RTCICECandidate *)candidate {
+ const webrtc::IceCandidateInterface *iceCandidate = candidate.candidate;
+ return self.peerConnection->AddIceCandidate(iceCandidate);
+ delete iceCandidate;
+}
+
+- (BOOL)addStream:(RTCMediaStream *)stream
+ constraints:(RTCMediaConstraints *)constraints {
+ BOOL ret = self.peerConnection->AddStream(stream.mediaStream,
+ constraints.constraints);
+ if (!ret) {
+ return NO;
+ }
+ [_localStreams addObject:stream];
+ return YES;
+}
+
+- (void)createAnswerWithDelegate:(id<RTCSessionDescriptonDelegate>)delegate
+ constraints:(RTCMediaConstraints *)constraints {
+ talk_base::scoped_refptr<webrtc::RTCCreateSessionDescriptionObserver>
+ observer(new talk_base::RefCountedObject<
+ webrtc::RTCCreateSessionDescriptionObserver>(delegate, self));
+ self.peerConnection->CreateAnswer(observer, constraints.constraints);
+}
+
+- (void)createOfferWithDelegate:(id<RTCSessionDescriptonDelegate>)delegate
+ constraints:(RTCMediaConstraints *)constraints {
+ talk_base::scoped_refptr<webrtc::RTCCreateSessionDescriptionObserver>
+ observer(new talk_base::RefCountedObject<
+ webrtc::RTCCreateSessionDescriptionObserver>(delegate, self));
+ self.peerConnection->CreateOffer(observer, constraints.constraints);
+}
+
+- (void)removeStream:(RTCMediaStream *)stream {
+ self.peerConnection->RemoveStream(stream.mediaStream);
+ [_localStreams removeObject:stream];
+}
+
+- (void)
+ setLocalDescriptionWithDelegate:(id<RTCSessionDescriptonDelegate>)delegate
+ sessionDescription:(RTCSessionDescription *)sdp {
+ talk_base::scoped_refptr<webrtc::RTCSetSessionDescriptionObserver> observer(
+ new talk_base::RefCountedObject<webrtc::RTCSetSessionDescriptionObserver>(
+ delegate, self));
+ self.peerConnection->SetLocalDescription(observer, sdp.sessionDescription);
+}
+
+- (void)
+ setRemoteDescriptionWithDelegate:(id<RTCSessionDescriptonDelegate>)delegate
+ sessionDescription:(RTCSessionDescription *)sdp {
+ talk_base::scoped_refptr<webrtc::RTCSetSessionDescriptionObserver> observer(
+ new talk_base::RefCountedObject<webrtc::RTCSetSessionDescriptionObserver>(
+ delegate, self));
+ self.peerConnection->SetRemoteDescription(observer, sdp.sessionDescription);
+}
+
+- (BOOL)updateICEServers:(NSArray *)servers
+ constraints:(RTCMediaConstraints *)constraints {
+ webrtc::PeerConnectionInterface::IceServers iceServers;
+ for (RTCICEServer *server in servers) {
+ iceServers.push_back(server.iceServer);
+ }
+ return self.peerConnection->UpdateIce(iceServers, constraints.constraints);
+}
+
+- (RTCSessionDescription *)localDescription {
+ const webrtc::SessionDescriptionInterface *sdi =
+ self.peerConnection->local_description();
+ return sdi ?
+ [[RTCSessionDescription alloc] initWithSessionDescription:sdi] :
+ nil;
+}
+
+- (NSArray *)localStreams {
+ return [_localStreams copy];
+}
+
+- (RTCSessionDescription *)remoteDescription {
+ const webrtc::SessionDescriptionInterface *sdi =
+ self.peerConnection->remote_description();
+ return sdi ?
+ [[RTCSessionDescription alloc] initWithSessionDescription:sdi] :
+ nil;
+}
+
+- (RTCICEConnectionState)iceConnectionState {
+ return [RTCEnumConverter convertIceConnectionStateToObjC:
+ self.peerConnection->ice_connection_state()];
+}
+
+- (RTCICEGatheringState)iceGatheringState {
+ return [RTCEnumConverter convertIceGatheringStateToObjC:
+ self.peerConnection->ice_gathering_state()];
+}
+
+- (RTCSignalingState)signalingState {
+ return [RTCEnumConverter
+ convertSignalingStateToObjC:self.peerConnection->signaling_state()];
+}
+
+- (void)close {
+ self.peerConnection->Close();
+}
+
+@end
+
+@implementation RTCPeerConnection (Internal)
+
+- (id)initWithPeerConnection:(
+ talk_base::scoped_refptr<webrtc::PeerConnectionInterface>)peerConnection
+ observer:(webrtc::RTCPeerConnectionObserver *)observer {
+ if (!peerConnection || !observer) {
+ NSAssert(NO, @"nil arguments not allowed");
+ self = nil;
+ return nil;
+ }
+ if ((self = [super init])) {
+ _peerConnection = peerConnection;
+ _localStreams = [[NSMutableArray alloc] init];
+ _observer.reset(observer);
+ }
+ return self;
+}
+
+- (talk_base::scoped_refptr<webrtc::PeerConnectionInterface>)peerConnection {
+ return _peerConnection;
+}
+
+@end
diff --git a/talk/app/webrtc/objc/RTCPeerConnectionFactory.mm b/talk/app/webrtc/objc/RTCPeerConnectionFactory.mm
new file mode 100644
index 0000000..b12af9d
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCPeerConnectionFactory.mm
@@ -0,0 +1,127 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+#error "This file requires ARC support."
+#endif
+
+#import "RTCPeerConnectionFactory.h"
+
+#include <vector>
+
+#import "RTCAudioTrack+internal.h"
+#import "RTCICEServer+internal.h"
+#import "RTCMediaConstraints+internal.h"
+#import "RTCMediaSource+internal.h"
+#import "RTCMediaStream+internal.h"
+#import "RTCMediaStreamTrack+internal.h"
+#import "RTCPeerConnection+internal.h"
+#import "RTCPeerConnectionDelegate.h"
+#import "RTCPeerConnectionObserver.h"
+#import "RTCVideoCapturer+internal.h"
+#import "RTCVideoSource+internal.h"
+#import "RTCVideoTrack+internal.h"
+
+#include "talk/app/webrtc/audiotrack.h"
+#include "talk/app/webrtc/mediastreaminterface.h"
+#include "talk/app/webrtc/peerconnectionfactory.h"
+#include "talk/app/webrtc/peerconnectioninterface.h"
+#include "talk/app/webrtc/videosourceinterface.h"
+#include "talk/app/webrtc/videotrack.h"
+#include "talk/base/logging.h"
+
+@interface RTCPeerConnectionFactory ()
+
+@property(nonatomic, assign) talk_base::scoped_refptr<
+ webrtc::PeerConnectionFactoryInterface> nativeFactory;
+
+@end
+
+@implementation RTCPeerConnectionFactory
+
+- (id)init {
+ if ((self = [super init])) {
+ _nativeFactory = webrtc::CreatePeerConnectionFactory();
+ NSAssert(_nativeFactory, @"Failed to initialize PeerConnectionFactory!");
+ // Uncomment to get sensitive logs emitted (to stderr or logcat).
+ // talk_base::LogMessage::LogToDebug(talk_base::LS_SENSITIVE);
+ }
+ return self;
+}
+
+- (RTCPeerConnection *)
+ peerConnectionWithICEServers:(NSArray *)servers
+ constraints:(RTCMediaConstraints *)constraints
+ delegate:(id<RTCPeerConnectionDelegate>)delegate {
+ webrtc::PeerConnectionInterface::IceServers iceServers;
+ for (RTCICEServer *server in servers) {
+ iceServers.push_back(server.iceServer);
+ }
+ webrtc::RTCPeerConnectionObserver *observer =
+ new webrtc::RTCPeerConnectionObserver(delegate);
+ talk_base::scoped_refptr<webrtc::PeerConnectionInterface> peerConnection =
+ self.nativeFactory->CreatePeerConnection(
+ iceServers, constraints.constraints, observer);
+ RTCPeerConnection *pc =
+ [[RTCPeerConnection alloc] initWithPeerConnection:peerConnection
+ observer:observer];
+ observer->SetPeerConnection(pc);
+ return pc;
+}
+
+- (RTCMediaStream *)mediaStreamWithLabel:(NSString *)label {
+ talk_base::scoped_refptr<webrtc::MediaStreamInterface> nativeMediaStream =
+ self.nativeFactory->CreateLocalMediaStream([label UTF8String]);
+ return [[RTCMediaStream alloc] initWithMediaStream:nativeMediaStream];
+}
+
+- (RTCVideoSource *)videoSourceWithCapturer:(RTCVideoCapturer *)capturer
+ constraints:(RTCMediaConstraints *)constraints {
+ if (!capturer) {
+ return nil;
+ }
+ talk_base::scoped_refptr<webrtc::VideoSourceInterface> source =
+ self.nativeFactory->CreateVideoSource(capturer.capturer.get(),
+ constraints.constraints);
+ return [[RTCVideoSource alloc] initWithMediaSource:source];
+}
+
+- (RTCVideoTrack *)videoTrackWithID:(NSString *)videoId
+ source:(RTCVideoSource *)source {
+ talk_base::scoped_refptr<webrtc::VideoTrackInterface> track =
+ self.nativeFactory->CreateVideoTrack([videoId UTF8String],
+ source.videoSource);
+ return [[RTCVideoTrack alloc] initWithMediaTrack:track];
+}
+
+- (RTCAudioTrack *)audioTrackWithID:(NSString *)audioId {
+ talk_base::scoped_refptr<webrtc::AudioTrackInterface> track =
+ self.nativeFactory->CreateAudioTrack([audioId UTF8String], NULL);
+ return [[RTCAudioTrack alloc] initWithMediaTrack:track];
+}
+
+@end
diff --git a/talk/app/webrtc/objc/RTCPeerConnectionObserver.h b/talk/app/webrtc/objc/RTCPeerConnectionObserver.h
new file mode 100644
index 0000000..c7d1ef8
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCPeerConnectionObserver.h
@@ -0,0 +1,79 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/peerconnectioninterface.h"
+
+#import "RTCPeerConnection.h"
+#import "RTCPeerConnectionDelegate.h"
+
+// These objects are created by RTCPeerConnectionFactory to wrap an
+// id<RTCPeerConnectionDelegate> and call methods on that interface.
+
+namespace webrtc {
+
+class RTCPeerConnectionObserver : public PeerConnectionObserver {
+
+ public:
+ explicit RTCPeerConnectionObserver(id<RTCPeerConnectionDelegate> delegate);
+
+ void SetPeerConnection(RTCPeerConnection *peerConnection);
+
+ virtual void OnError() OVERRIDE;
+
+ // Triggered when the SignalingState changed.
+ virtual void OnSignalingChange(
+ PeerConnectionInterface::SignalingState new_state) OVERRIDE;
+
+ // Triggered when media is received on a new stream from remote peer.
+ virtual void OnAddStream(MediaStreamInterface* stream) OVERRIDE;
+
+ // Triggered when a remote peer close a stream.
+ virtual void OnRemoveStream(MediaStreamInterface* stream) OVERRIDE;
+
+ // Triggered when a remote peer open a data channel.
+ virtual void OnDataChannel(DataChannelInterface* data_channel) OVERRIDE;
+
+ // Triggered when renegotation is needed, for example the ICE has restarted.
+ virtual void OnRenegotiationNeeded() OVERRIDE;
+
+ // Called any time the ICEConnectionState changes
+ virtual void OnIceConnectionChange(
+ PeerConnectionInterface::IceConnectionState new_state) OVERRIDE;
+
+ // Called any time the ICEGatheringState changes
+ virtual void OnIceGatheringChange(
+ PeerConnectionInterface::IceGatheringState new_state) OVERRIDE;
+
+ // New Ice candidate have been found.
+ virtual void OnIceCandidate(const IceCandidateInterface* candidate) OVERRIDE;
+
+ private:
+ id<RTCPeerConnectionDelegate> _delegate;
+ RTCPeerConnection *_peerConnection;
+};
+
+} // namespace webrtc
diff --git a/talk/app/webrtc/objc/RTCPeerConnectionObserver.mm b/talk/app/webrtc/objc/RTCPeerConnectionObserver.mm
new file mode 100644
index 0000000..e102bb9
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCPeerConnectionObserver.mm
@@ -0,0 +1,103 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+#error "This file requires ARC support."
+#endif
+
+#import "RTCPeerConnectionObserver.h"
+
+#import "RTCICECandidate+internal.h"
+#import "RTCMediaStream+internal.h"
+#import "RTCEnumConverter.h"
+
+namespace webrtc {
+
+RTCPeerConnectionObserver::RTCPeerConnectionObserver(
+ id<RTCPeerConnectionDelegate> delegate) {
+ _delegate = delegate;
+}
+
+void RTCPeerConnectionObserver::SetPeerConnection(
+ RTCPeerConnection *peerConnection) {
+ _peerConnection = peerConnection;
+}
+
+void RTCPeerConnectionObserver::OnError() {
+ [_delegate peerConnectionOnError:_peerConnection];
+}
+
+void RTCPeerConnectionObserver::OnSignalingChange(
+ PeerConnectionInterface::SignalingState new_state) {
+ [_delegate peerConnection:_peerConnection
+ signalingStateChanged:
+ [RTCEnumConverter convertSignalingStateToObjC:new_state]];
+}
+
+void RTCPeerConnectionObserver::OnAddStream(MediaStreamInterface* stream) {
+ RTCMediaStream* mediaStream =
+ [[RTCMediaStream alloc] initWithMediaStream:stream];
+ [_delegate peerConnection:_peerConnection addedStream:mediaStream];
+}
+
+void RTCPeerConnectionObserver::OnRemoveStream(MediaStreamInterface* stream) {
+ RTCMediaStream* mediaStream =
+ [[RTCMediaStream alloc] initWithMediaStream:stream];
+ [_delegate peerConnection:_peerConnection removedStream:mediaStream];
+}
+
+void RTCPeerConnectionObserver::OnDataChannel(
+ DataChannelInterface* data_channel) {
+ // TODO(hughv): Implement for future version.
+}
+
+void RTCPeerConnectionObserver::OnRenegotiationNeeded() {
+ [_delegate peerConnectionOnRenegotiationNeeded:_peerConnection];
+}
+
+void RTCPeerConnectionObserver::OnIceConnectionChange(
+ PeerConnectionInterface::IceConnectionState new_state) {
+ [_delegate peerConnection:_peerConnection
+ iceConnectionChanged:
+ [RTCEnumConverter convertIceConnectionStateToObjC:new_state]];
+}
+
+void RTCPeerConnectionObserver::OnIceGatheringChange(
+ PeerConnectionInterface::IceGatheringState new_state) {
+ [_delegate peerConnection:_peerConnection
+ iceGatheringChanged:
+ [RTCEnumConverter convertIceGatheringStateToObjC:new_state]];
+}
+
+void RTCPeerConnectionObserver::OnIceCandidate(
+ const IceCandidateInterface* candidate) {
+ RTCICECandidate* iceCandidate =
+ [[RTCICECandidate alloc] initWithCandidate:candidate];
+ [_delegate peerConnection:_peerConnection gotICECandidate:iceCandidate];
+}
+
+} // namespace webrtc
diff --git a/talk/app/webrtc/objc/RTCSessionDescription+Internal.h b/talk/app/webrtc/objc/RTCSessionDescription+Internal.h
new file mode 100644
index 0000000..261a176
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCSessionDescription+Internal.h
@@ -0,0 +1,41 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import "RTCSessionDescription.h"
+
+#include "talk/app/webrtc/jsep.h"
+#include "talk/app/webrtc/webrtcsession.h"
+
+@interface RTCSessionDescription (Internal)
+
+// Caller assumes ownership of this object!
+- (webrtc::SessionDescriptionInterface *)sessionDescription;
+
+- (id)initWithSessionDescription:
+ (const webrtc::SessionDescriptionInterface*)sessionDescription;
+
+@end
diff --git a/talk/app/webrtc/objc/RTCSessionDescription.mm b/talk/app/webrtc/objc/RTCSessionDescription.mm
new file mode 100644
index 0000000..4bd9b14
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCSessionDescription.mm
@@ -0,0 +1,81 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+#error "This file requires ARC support."
+#endif
+
+#import "RTCSessionDescription+internal.h"
+
+@implementation RTCSessionDescription {
+ NSString *_description;
+ NSString *_type;
+}
+
+- (id)initWithType:(NSString *)type sdp:(NSString *)sdp {
+ if (!type || !sdp) {
+ NSAssert(NO, @"nil arguments not allowed");
+ return nil;
+ }
+ if ((self = [super init])) {
+ _description = sdp;
+ _type = type;
+ }
+ return self;
+}
+
+@end
+
+@implementation RTCSessionDescription (Internal)
+
+- (id)initWithSessionDescription:
+ (const webrtc::SessionDescriptionInterface *)sessionDescription {
+ if (!sessionDescription) {
+ NSAssert(NO, @"nil arguments not allowed");
+ self = nil;
+ return nil;
+ }
+ if ((self = [super init])) {
+ const std::string &type = sessionDescription->type();
+ std::string sdp;
+ if (!sessionDescription->ToString(&sdp)) {
+ NSAssert(NO, @"Invalid SessionDescriptionInterface.");
+ self = nil;
+ } else {
+ _description = @(sdp.c_str());
+ _type = @(type.c_str());
+ }
+ }
+ return self;
+}
+
+- (webrtc::SessionDescriptionInterface *)sessionDescription {
+ return webrtc::CreateSessionDescription(
+ [self.type UTF8String], [self.description UTF8String], NULL);
+}
+
+@end
diff --git a/talk/app/webrtc/objc/RTCVideoCapturer+Internal.h b/talk/app/webrtc/objc/RTCVideoCapturer+Internal.h
new file mode 100644
index 0000000..d0d685b
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCVideoCapturer+Internal.h
@@ -0,0 +1,38 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import "RTCVideoCapturer.h"
+
+#include "talk/app/webrtc/videosourceinterface.h"
+
+@interface RTCVideoCapturer (Internal)
+
+@property(nonatomic, assign, readonly) const talk_base::scoped_ptr<cricket::VideoCapturer> &capturer;
+
+- (id)initWithCapturer:(cricket::VideoCapturer*)capturer;
+
+@end
diff --git a/talk/app/webrtc/objc/RTCVideoCapturer.mm b/talk/app/webrtc/objc/RTCVideoCapturer.mm
new file mode 100644
index 0000000..f7282c5
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCVideoCapturer.mm
@@ -0,0 +1,76 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+#error "This file requires ARC support."
+#endif
+
+#import "RTCVideoCapturer+internal.h"
+
+#include "talk/media/base/videocapturer.h"
+#include "talk/media/devices/devicemanager.h"
+
+@implementation RTCVideoCapturer {
+ talk_base::scoped_ptr<cricket::VideoCapturer>_capturer;
+}
+
++ (RTCVideoCapturer *)capturerWithDeviceName:(NSString *)deviceName {
+ const std::string &device_name = std::string([deviceName UTF8String]);
+ talk_base::scoped_ptr<cricket::DeviceManagerInterface> device_manager(
+ cricket::DeviceManagerFactory::Create());
+ bool initialized = device_manager->Init();
+ NSAssert(initialized, @"DeviceManager::Init() failed");
+ cricket::Device device;
+ if (!device_manager->GetVideoCaptureDevice(device_name, &device)) {
+ LOG(LS_ERROR) << "GetVideoCaptureDevice failed";
+ return 0;
+ }
+ talk_base::scoped_ptr<cricket::VideoCapturer> capturer(
+ device_manager->CreateVideoCapturer(device));
+ RTCVideoCapturer *rtcCapturer =
+ [[RTCVideoCapturer alloc] initWithCapturer:capturer.release()];
+ return rtcCapturer;
+}
+
+@end
+
+@implementation RTCVideoCapturer (Internal)
+
+- (id)initWithCapturer:(cricket::VideoCapturer *)capturer {
+ if ((self = [super init])) {
+ _capturer.reset(capturer);
+ }
+ return self;
+}
+
+// TODO(hughv): When capturer is implemented, this needs to return
+// _capturer.release() instead. For now, this isn't used.
+- (const talk_base::scoped_ptr<cricket::VideoCapturer> &)capturer {
+ return _capturer;
+}
+
+@end
diff --git a/talk/app/webrtc/objc/RTCVideoRenderer+Internal.h b/talk/app/webrtc/objc/RTCVideoRenderer+Internal.h
new file mode 100644
index 0000000..8854ed7
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCVideoRenderer+Internal.h
@@ -0,0 +1,40 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import "RTCVideoRenderer.h"
+
+#include "talk/app/webrtc/mediastreaminterface.h"
+
+@interface RTCVideoRenderer (Internal)
+
+// TODO(hughv): Use smart pointer.
+@property(nonatomic, assign, readonly)
+ webrtc::VideoRendererInterface *videoRenderer;
+
+- (id)initWithVideoRenderer:(webrtc::VideoRendererInterface *)videoRenderer;
+
+@end
diff --git a/talk/app/webrtc/objc/RTCVideoRenderer.mm b/talk/app/webrtc/objc/RTCVideoRenderer.mm
new file mode 100644
index 0000000..3d3b10e
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCVideoRenderer.mm
@@ -0,0 +1,72 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+#error "This file requires ARC support."
+#endif
+
+#import "RTCVideoRenderer+internal.h"
+
+#if TARGET_OS_IPHONE
+#import <UIKit/UIKit.h>
+#endif
+
+#import "RTCI420Frame.h"
+#import "RTCVideoRendererDelegate.h"
+
+@implementation RTCVideoRenderer
+
++ (RTCVideoRenderer *)videoRenderGUIWithFrame:(CGRect)frame {
+ // TODO (hughv): Implement.
+ return nil;
+}
+
+- (id)initWithDelegate:(id<RTCVideoRendererDelegate>)delegate {
+ if ((self = [super init])) {
+ _delegate = delegate;
+ // TODO (hughv): Create video renderer.
+ }
+ return self;
+}
+
+@end
+
+@implementation RTCVideoRenderer (Internal)
+
+- (id)initWithVideoRenderer:(webrtc::VideoRendererInterface *)videoRenderer {
+ if ((self = [super init])) {
+ // TODO (hughv): Implement.
+ }
+ return self;
+}
+
+- (webrtc::VideoRendererInterface *)videoRenderer {
+ // TODO (hughv): Implement.
+ return NULL;
+}
+
+@end
diff --git a/talk/app/webrtc/objc/RTCVideoSource+Internal.h b/talk/app/webrtc/objc/RTCVideoSource+Internal.h
new file mode 100644
index 0000000..1d3c4c9
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCVideoSource+Internal.h
@@ -0,0 +1,37 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import "RTCVideoSource.h"
+
+#include "talk/app/webrtc/videosourceinterface.h"
+
+@interface RTCVideoSource (Internal)
+
+@property(nonatomic, assign, readonly)
+ talk_base::scoped_refptr<webrtc::VideoSourceInterface>videoSource;
+
+@end
diff --git a/talk/app/webrtc/objc/RTCVideoSource.mm b/talk/app/webrtc/objc/RTCVideoSource.mm
new file mode 100644
index 0000000..c28fa9b
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCVideoSource.mm
@@ -0,0 +1,44 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+#error "This file requires ARC support."
+#endif
+
+#import "RTCVideoSource+internal.h"
+#import "RTCMediaSource+internal.h"
+
+@implementation RTCVideoSource
+@end
+
+@implementation RTCVideoSource (Internal)
+
+- (talk_base::scoped_refptr<webrtc::VideoSourceInterface>)videoSource {
+ return static_cast<webrtc::VideoSourceInterface *>(self.mediaSource.get());
+}
+
+@end
diff --git a/talk/app/webrtc/objc/RTCVideoTrack+Internal.h b/talk/app/webrtc/objc/RTCVideoTrack+Internal.h
new file mode 100644
index 0000000..b5da54b
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCVideoTrack+Internal.h
@@ -0,0 +1,40 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import "RTCVideoTrack.h"
+
+#include "talk/app/webrtc/mediastreaminterface.h"
+#include "talk/app/webrtc/peerconnectioninterface.h"
+
+@class RTCVideoRenderer;
+
+@interface RTCVideoTrack (Internal)
+
+@property(nonatomic, assign, readonly)
+ talk_base::scoped_refptr<webrtc::VideoTrackInterface> videoTrack;
+
+@end
diff --git a/talk/app/webrtc/objc/RTCVideoTrack.mm b/talk/app/webrtc/objc/RTCVideoTrack.mm
new file mode 100644
index 0000000..88f7226
--- /dev/null
+++ b/talk/app/webrtc/objc/RTCVideoTrack.mm
@@ -0,0 +1,77 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+#error "This file requires ARC support."
+#endif
+
+#import "RTCVideoTrack+internal.h"
+
+#import "RTCMediaStreamTrack+internal.h"
+#import "RTCVideoRenderer+internal.h"
+
+@implementation RTCVideoTrack {
+ NSMutableArray *_rendererArray;
+}
+
+- (id)initWithMediaTrack:(
+ talk_base::scoped_refptr<webrtc::MediaStreamTrackInterface>)mediaTrack {
+ if (self = [super initWithMediaTrack:mediaTrack]) {
+ _rendererArray = [NSMutableArray array];
+ }
+ return self;
+}
+
+- (void)addRenderer:(RTCVideoRenderer *)renderer {
+ NSAssert1(![self.renderers containsObject:renderer],
+ @"renderers already contains object [%@]",
+ [renderer description]);
+ [_rendererArray addObject:renderer];
+ self.videoTrack->AddRenderer(renderer.videoRenderer);
+}
+
+- (void)removeRenderer:(RTCVideoRenderer *)renderer {
+ NSUInteger index = [self.renderers indexOfObjectIdenticalTo:renderer];
+ if (index != NSNotFound) {
+ [_rendererArray removeObjectAtIndex:index];
+ self.videoTrack->RemoveRenderer(renderer.videoRenderer);
+ }
+}
+
+- (NSArray *)renderers {
+ return [_rendererArray copy];
+}
+
+@end
+
+@implementation RTCVideoTrack (Internal)
+
+- (talk_base::scoped_refptr<webrtc::VideoTrackInterface>)videoTrack {
+ return static_cast<webrtc::VideoTrackInterface *>(self.mediaTrack.get());
+}
+
+@end
diff --git a/talk/app/webrtc/objc/public/RTCAudioSource.h b/talk/app/webrtc/objc/public/RTCAudioSource.h
new file mode 100644
index 0000000..e357620
--- /dev/null
+++ b/talk/app/webrtc/objc/public/RTCAudioSource.h
@@ -0,0 +1,40 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import "RTCMediaSource.h"
+
+// RTCAudioSource is an ObjectiveC wrapper for AudioSourceInterface. It is
+// used as the source for one or more RTCAudioTrack objects.
+@interface RTCAudioSource : RTCMediaSource
+
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
+// Disallow init and don't add to documentation
+- (id)init __attribute__(
+ (unavailable("init is not a supported initializer for this class.")));
+#endif /* DOXYGEN_SHOULD_SKIP_THIS */
+
+@end
diff --git a/talk/app/webrtc/objc/public/RTCAudioTrack.h b/talk/app/webrtc/objc/public/RTCAudioTrack.h
new file mode 100644
index 0000000..e6aae13
--- /dev/null
+++ b/talk/app/webrtc/objc/public/RTCAudioTrack.h
@@ -0,0 +1,39 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import "RTCMediaStreamTrack.h"
+
+// RTCAudioTrack is an ObjectiveC wrapper for AudioTrackInterface.
+@interface RTCAudioTrack : RTCMediaStreamTrack
+
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
+// Disallow init and don't add to documentation
+- (id)init __attribute__(
+ (unavailable("init is not a supported initializer for this class.")));
+#endif /* DOXYGEN_SHOULD_SKIP_THIS */
+
+@end
diff --git a/talk/app/webrtc/objc/public/RTCI420Frame.h b/talk/app/webrtc/objc/public/RTCI420Frame.h
new file mode 100644
index 0000000..bf58085
--- /dev/null
+++ b/talk/app/webrtc/objc/public/RTCI420Frame.h
@@ -0,0 +1,36 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import <Foundation/Foundation.h>
+
+// RTCI420Frame is an ObjectiveC version of cricket::VideoFrame.
+@interface RTCI420Frame : NSObject
+
+// TODO(hughv): Implement this when iOS VP8 is ready.
+
+@end
+
diff --git a/talk/app/webrtc/objc/public/RTCIceCandidate.h b/talk/app/webrtc/objc/public/RTCIceCandidate.h
new file mode 100644
index 0000000..f3f2c16
--- /dev/null
+++ b/talk/app/webrtc/objc/public/RTCIceCandidate.h
@@ -0,0 +1,56 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import <Foundation/Foundation.h>
+
+// RTCICECandidate contains an instance of ICECandidateInterface.
+@interface RTCICECandidate : NSObject
+
+// If present, this contains the identifier of the "media stream
+// identification" as defined in [RFC 3388] for m-line this candidate is
+// associated with.
+@property(nonatomic, copy, readonly) NSString *sdpMid;
+
+// This indicates the index (starting at zero) of m-line in the SDP this
+// candidate is associated with.
+@property(nonatomic, assign, readonly) NSInteger sdpMLineIndex;
+
+// Creates an SDP-ized form of this candidate.
+@property(nonatomic, copy, readonly) NSString *sdp;
+
+// Creates an ICECandidateInterface based on SDP string.
+- (id)initWithMid:(NSString *)sdpMid
+ index:(NSInteger)sdpMLineIndex
+ sdp:(NSString *)sdp;
+
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
+// Disallow init and don't add to documentation
+- (id)init __attribute__(
+ (unavailable("init is not a supported initializer for this class.")));
+#endif /* DOXYGEN_SHOULD_SKIP_THIS */
+
+@end
diff --git a/talk/app/webrtc/objc/public/RTCIceServer.h b/talk/app/webrtc/objc/public/RTCIceServer.h
new file mode 100644
index 0000000..01ad9b5
--- /dev/null
+++ b/talk/app/webrtc/objc/public/RTCIceServer.h
@@ -0,0 +1,48 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import <Foundation/Foundation.h>
+
+// RTCICEServer allows for the creation of ICEServer structs.
+@interface RTCICEServer : NSObject
+
+// The server URI.
+@property(nonatomic, strong, readonly) NSURL *URI;
+
+// The server password.
+@property(nonatomic, copy, readonly) NSString *password;
+
+// Initializer for RTCICEServer taking uri and password.
+- (id)initWithURI:(NSString *)URI password:(NSString *)password;
+
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
+// Disallow init and don't add to documentation
+- (id)init __attribute__(
+ (unavailable("init is not a supported initializer for this class.")));
+#endif /* DOXYGEN_SHOULD_SKIP_THIS */
+
+@end
diff --git a/talk/app/webrtc/objc/public/RTCMediaConstraints.h b/talk/app/webrtc/objc/public/RTCMediaConstraints.h
new file mode 100644
index 0000000..89d2c3b
--- /dev/null
+++ b/talk/app/webrtc/objc/public/RTCMediaConstraints.h
@@ -0,0 +1,39 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import <Foundation/Foundation.h>
+
+// RTCMediaConstraints contains the media constraints to be used in
+// RTCPeerConnection and RTCMediaStream.
+@interface RTCMediaConstraints : NSObject
+
+// Initializer for RTCMediaConstraints. The parameters mandatory and optional
+// contain RTCPair objects with key/value for each constrant.
+- (id)initWithMandatoryConstraints:(NSArray *)mandatory
+ optionalConstraints:(NSArray *)optional;
+
+@end
diff --git a/talk/app/webrtc/objc/public/RTCMediaSource.h b/talk/app/webrtc/objc/public/RTCMediaSource.h
new file mode 100644
index 0000000..be3ad32
--- /dev/null
+++ b/talk/app/webrtc/objc/public/RTCMediaSource.h
@@ -0,0 +1,44 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import <Foundation/Foundation.h>
+
+#import "RTCTypes.h"
+
+// RTCMediaSource is an ObjectiveC wrapper for MediaSourceInterface
+@interface RTCMediaSource : NSObject
+
+// The current state of the RTCMediaSource.
+@property (nonatomic, assign, readonly)RTCSourceState state;
+
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
+// Disallow init and don't add to documentation
+- (id)init __attribute__(
+ (unavailable("init is not a supported initializer for this class.")));
+#endif /* DOXYGEN_SHOULD_SKIP_THIS */
+
+@end
diff --git a/talk/app/webrtc/objc/public/RTCMediaStream.h b/talk/app/webrtc/objc/public/RTCMediaStream.h
new file mode 100644
index 0000000..cd10321
--- /dev/null
+++ b/talk/app/webrtc/objc/public/RTCMediaStream.h
@@ -0,0 +1,51 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import <Foundation/Foundation.h>
+
+@class RTCAudioTrack;
+@class RTCVideoTrack;
+
+// RTCMediaStream is an ObjectiveC wrapper for MediaStreamInterface.
+@interface RTCMediaStream : NSObject
+
+@property(nonatomic, strong, readonly) NSArray *audioTracks;
+@property(nonatomic, strong, readonly) NSArray *videoTracks;
+@property(nonatomic, strong, readonly) NSString *label;
+
+- (BOOL)addAudioTrack:(RTCAudioTrack *)track;
+- (BOOL)addVideoTrack:(RTCVideoTrack *)track;
+- (BOOL)removeAudioTrack:(RTCAudioTrack *)track;
+- (BOOL)removeVideoTrack:(RTCVideoTrack *)track;
+
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
+// Disallow init and don't add to documentation
+- (id)init __attribute__(
+ (unavailable("init is not a supported initializer for this class.")));
+#endif /* DOXYGEN_SHOULD_SKIP_THIS */
+
+@end
diff --git a/talk/app/webrtc/objc/public/RTCMediaStreamTrack.h b/talk/app/webrtc/objc/public/RTCMediaStreamTrack.h
new file mode 100644
index 0000000..f8f9369
--- /dev/null
+++ b/talk/app/webrtc/objc/public/RTCMediaStreamTrack.h
@@ -0,0 +1,51 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import <Foundation/Foundation.h>
+
+#import "RTCTypes.h"
+
+// RTCMediaStreamTrack implements the interface common to RTCAudioTrack and
+// RTCVideoTrack. Do not create an instance of this class, rather create one
+// of the derived classes.
+@interface RTCMediaStreamTrack : NSObject
+
+@property(nonatomic, assign, readonly) NSString *kind;
+@property(nonatomic, assign, readonly) NSString *label;
+
+- (BOOL)isEnabled;
+- (BOOL)setEnabled:(BOOL)enabled;
+- (RTCTrackState)state;
+- (BOOL)setState:(RTCTrackState)state;
+
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
+// Disallow init and don't add to documentation
+- (id)init __attribute__(
+ (unavailable("init is not a supported initializer for this class.")));
+#endif /* DOXYGEN_SHOULD_SKIP_THIS */
+
+@end
diff --git a/talk/app/webrtc/objc/public/RTCPair.h b/talk/app/webrtc/objc/public/RTCPair.h
new file mode 100644
index 0000000..bb57e02
--- /dev/null
+++ b/talk/app/webrtc/objc/public/RTCPair.h
@@ -0,0 +1,45 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import <Foundation/Foundation.h>
+
+// A class to hold a key and value.
+@interface RTCPair : NSObject
+
+@property(nonatomic, strong, readonly) NSString *key;
+@property(nonatomic, strong, readonly) NSString *value;
+
+// Initialize a RTCPair object with a key and value.
+- (id)initWithKey:(NSString *)key value:(NSString *)value;
+
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
+// Disallow init and don't add to documentation
+- (id)init __attribute__(
+ (unavailable("init is not a supported initializer for this class.")));
+#endif /* DOXYGEN_SHOULD_SKIP_THIS */
+
+@end
diff --git a/talk/app/webrtc/objc/public/RTCPeerConnection.h b/talk/app/webrtc/objc/public/RTCPeerConnection.h
new file mode 100644
index 0000000..c66bac8
--- /dev/null
+++ b/talk/app/webrtc/objc/public/RTCPeerConnection.h
@@ -0,0 +1,110 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import "RTCPeerConnectionDelegate.h"
+
+@class RTCICECandidate;
+@class RTCICEServers;
+@class RTCMediaConstraints;
+@class RTCMediaStream;
+@class RTCSessionDescription;
+@protocol RTCSessionDescriptonDelegate;
+
+// RTCPeerConnection is an ObjectiveC friendly wrapper around a PeerConnection
+// object. See the documentation in talk/app/webrtc/peerconnectioninterface.h.
+// or http://www.webrtc.org/reference/native-apis, which in turn is inspired by
+// the JS APIs: http://dev.w3.org/2011/webrtc/editor/webrtc.html and
+// http://www.w3.org/TR/mediacapture-streams/
+@interface RTCPeerConnection : NSObject
+
+// Accessor methods to active local streams.
+@property(nonatomic, strong, readonly) NSArray *localStreams;
+
+// The local description.
+@property(nonatomic, assign, readonly) RTCSessionDescription *localDescription;
+
+// The remote description.
+@property(nonatomic, assign, readonly) RTCSessionDescription *remoteDescription;
+
+// The current signaling state.
+@property(nonatomic, assign, readonly) RTCSignalingState signalingState;
+@property(nonatomic, assign, readonly) RTCICEConnectionState iceConnectionState;
+@property(nonatomic, assign, readonly) RTCICEGatheringState iceGatheringState;
+
+// Add a new MediaStream to be sent on this PeerConnection.
+// Note that a SessionDescription negotiation is needed before the
+// remote peer can receive the stream.
+- (BOOL)addStream:(RTCMediaStream *)stream
+ constraints:(RTCMediaConstraints *)constraints;
+
+// Remove a MediaStream from this PeerConnection.
+// Note that a SessionDescription negotiation is need before the
+// remote peer is notified.
+- (void)removeStream:(RTCMediaStream *)stream;
+
+// Create a new offer.
+// Success or failure will be reported via RTCSessionDescriptonDelegate.
+- (void)createOfferWithDelegate:(id<RTCSessionDescriptonDelegate>)delegate
+ constraints:(RTCMediaConstraints *)constraints;
+
+// Create an answer to an offer.
+// Success or failure will be reported via RTCSessionDescriptonDelegate.
+- (void)createAnswerWithDelegate:(id<RTCSessionDescriptonDelegate>)delegate
+ constraints:(RTCMediaConstraints *)constraints;
+
+// Sets the local session description.
+// Success or failure will be reported via RTCSessionDescriptonDelegate.
+- (void)
+ setLocalDescriptionWithDelegate:(id<RTCSessionDescriptonDelegate>)delegate
+ sessionDescription:(RTCSessionDescription *)sdp;
+
+// Sets the remote session description.
+// Success or failure will be reported via RTCSessionDescriptonDelegate.
+- (void)
+ setRemoteDescriptionWithDelegate:(id<RTCSessionDescriptonDelegate>)delegate
+ sessionDescription:(RTCSessionDescription *)sdp;
+
+// Restarts or updates the ICE Agent process of gathering local candidates
+// and pinging remote candidates.
+- (BOOL)updateICEServers:(NSArray *)servers
+ constraints:(RTCMediaConstraints *)constraints;
+
+// Provides a remote candidate to the ICE Agent.
+- (BOOL)addICECandidate:(RTCICECandidate *)candidate;
+
+// Terminates all media and closes the transport.
+- (void)close;
+
+// TODO(hughv): Implement GetStats.
+
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
+// Disallow init and don't add to documentation
+- (id)init __attribute__(
+ (unavailable("init is not a supported initializer for this class.")));
+#endif /* DOXYGEN_SHOULD_SKIP_THIS */
+
+@end
diff --git a/talk/app/webrtc/objc/public/RTCPeerConnectionDelegate.h b/talk/app/webrtc/objc/public/RTCPeerConnectionDelegate.h
new file mode 100644
index 0000000..b3bb881
--- /dev/null
+++ b/talk/app/webrtc/objc/public/RTCPeerConnectionDelegate.h
@@ -0,0 +1,70 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import <Foundation/Foundation.h>
+
+#import "RTCTypes.h"
+
+@class RTCICECandidate;
+@class RTCMediaStream;
+@class RTCPeerConnection;
+
+// RTCPeerConnectionDelegate is a protocol for an object that must be
+// implemented to get messages from PeerConnection.
+@protocol RTCPeerConnectionDelegate<NSObject>
+
+// Triggered when there is an error.
+- (void)peerConnectionOnError:(RTCPeerConnection *)peerConnection;
+
+// Triggered when the SignalingState changed.
+- (void)peerConnection:(RTCPeerConnection *)peerConnection
+ signalingStateChanged:(RTCSignalingState)stateChanged;
+
+// Triggered when media is received on a new stream from remote peer.
+- (void)peerConnection:(RTCPeerConnection *)peerConnection
+ addedStream:(RTCMediaStream *)stream;
+
+// Triggered when a remote peer close a stream.
+- (void)peerConnection:(RTCPeerConnection *)peerConnection
+ removedStream:(RTCMediaStream *)stream;
+
+// Triggered when renegotation is needed, for example the ICE has restarted.
+- (void)peerConnectionOnRenegotiationNeeded:(RTCPeerConnection *)peerConnection;
+
+// Called any time the ICEConnectionState changes.
+- (void)peerConnection:(RTCPeerConnection *)peerConnection
+ iceConnectionChanged:(RTCICEConnectionState)newState;
+
+// Called any time the ICEGatheringState changes.
+- (void)peerConnection:(RTCPeerConnection *)peerConnection
+ iceGatheringChanged:(RTCICEGatheringState)newState;
+
+// New Ice candidate have been found.
+- (void)peerConnection:(RTCPeerConnection *)peerConnection
+ gotICECandidate:(RTCICECandidate *)candidate;
+
+@end
diff --git a/talk/app/webrtc/objc/public/RTCPeerConnectionFactory.h b/talk/app/webrtc/objc/public/RTCPeerConnectionFactory.h
new file mode 100644
index 0000000..0f48299
--- /dev/null
+++ b/talk/app/webrtc/objc/public/RTCPeerConnectionFactory.h
@@ -0,0 +1,67 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import <Foundation/Foundation.h>
+
+@class RTCAudioTrack;
+@class RTCMediaConstraints;
+@class RTCMediaStream;
+@class RTCPeerConnection;
+@class RTCVideoCapturer;
+@class RTCVideoSource;
+@class RTCVideoTrack;
+@protocol RTCPeerConnectionDelegate;
+
+// RTCPeerConnectionFactory is an ObjectiveC wrapper for PeerConnectionFactory.
+// It is the main entry point to the PeerConnection API for clients.
+@interface RTCPeerConnectionFactory : NSObject
+
+// Create an RTCPeerConnection object. RTCPeerConnectionFactory will create
+// required libjingle threads, socket and network manager factory classes for
+// networking.
+- (RTCPeerConnection *)
+ peerConnectionWithICEServers:(NSArray *)servers
+ constraints:(RTCMediaConstraints *)constraints
+ delegate:(id<RTCPeerConnectionDelegate>)delegate;
+
+// Create an RTCMediaStream named |label|.
+- (RTCMediaStream *)mediaStreamWithLabel:(NSString *)label;
+
+// Creates a RTCVideoSource. The new source takes ownership of |capturer|.
+// |constraints| decides video resolution and frame rate but can be NULL.
+- (RTCVideoSource *)videoSourceWithCapturer:(RTCVideoCapturer *)capturer
+ constraints:(RTCMediaConstraints *)constraints;
+
+// Creates a new local VideoTrack. The same |source| can be used in several
+// tracks.
+- (RTCVideoTrack *)videoTrackWithID:(NSString *)videoId
+ source:(RTCVideoSource *)source;
+
+// Creates an new AudioTrack.
+- (RTCAudioTrack *)audioTrackWithID:(NSString *)audioId;
+
+@end
diff --git a/talk/app/webrtc/objc/public/RTCSessionDescription.h b/talk/app/webrtc/objc/public/RTCSessionDescription.h
new file mode 100644
index 0000000..ffe8fbe
--- /dev/null
+++ b/talk/app/webrtc/objc/public/RTCSessionDescription.h
@@ -0,0 +1,50 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import <Foundation/Foundation.h>
+
+// Description of an RFC 4566 Session.
+// RTCSessionDescription is an ObjectiveC wrapper for
+// SessionDescriptionInterface.
+@interface RTCSessionDescription : NSObject
+
+// The SDP description.
+@property(nonatomic, copy, readonly) NSString *description;
+
+// The session type.
+@property(nonatomic, copy, readonly) NSString *type;
+
+- (id)initWithType:(NSString *)type sdp:(NSString *)sdp;
+
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
+// Disallow init and don't add to documentation
+- (id)init __attribute__(
+ (unavailable("init is not a supported initializer for this class.")));
+#endif /* DOXYGEN_SHOULD_SKIP_THIS */
+
+@end
+
diff --git a/talk/app/webrtc/objc/public/RTCSessionDescriptonDelegate.h b/talk/app/webrtc/objc/public/RTCSessionDescriptonDelegate.h
new file mode 100644
index 0000000..409aaee
--- /dev/null
+++ b/talk/app/webrtc/objc/public/RTCSessionDescriptonDelegate.h
@@ -0,0 +1,49 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import <Foundation/Foundation.h>
+
+@class RTCPeerConnection;
+@class RTCSessionDescription;
+
+extern NSString* const kRTCSessionDescriptionDelegateErrorDomain;
+extern int const kRTCSessionDescriptionDelegateErrorCode;
+
+// RTCSessionDescriptonDelegate is a protocol for listening to callback messages
+// when RTCSessionDescriptions are created or set.
+@protocol RTCSessionDescriptonDelegate<NSObject>
+
+// Called when creating a session.
+- (void)peerConnection:(RTCPeerConnection *)peerConnection
+ didCreateSessionDescription:(RTCSessionDescription *)sdp
+ error:(NSError *)error;
+
+// Called when setting a local or remote description.
+- (void)peerConnection:(RTCPeerConnection *)peerConnection
+ didSetSessionDescriptionWithError:(NSError *)error;
+
+@end
diff --git a/talk/app/webrtc/objc/public/RTCTypes.h b/talk/app/webrtc/objc/public/RTCTypes.h
new file mode 100644
index 0000000..8ff8bf4
--- /dev/null
+++ b/talk/app/webrtc/objc/public/RTCTypes.h
@@ -0,0 +1,72 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// Enums that are common to the ObjectiveC version of the PeerConnection API.
+
+// RTCICEConnectionState correspond to the states in webrtc::ICEConnectionState.
+typedef enum {
+ RTCICEConnectionNew,
+ RTCICEConnectionChecking,
+ RTCICEConnectionConnected,
+ RTCICEConnectionCompleted,
+ RTCICEConnectionFailed,
+ RTCICEConnectionDisconnected,
+ RTCICEConnectionClosed,
+} RTCICEConnectionState;
+
+// RTCICEGatheringState the states in webrtc::ICEGatheringState.
+typedef enum {
+ RTCICEGatheringNew,
+ RTCICEGatheringGathering,
+ RTCICEGatheringComplete,
+} RTCICEGatheringState;
+
+// RTCSignalingState correspond to the states in webrtc::SignalingState.
+typedef enum {
+ RTCSignalingStable,
+ RTCSignalingHaveLocalOffer,
+ RTCSignalingHaveLocalPrAnswer,
+ RTCSignalingHaveRemoteOffer,
+ RTCSignalingHaveRemotePrAnswer,
+ RTCSignalingClosed,
+} RTCSignalingState;
+
+// RTCSourceState corresponds to the states in webrtc::SourceState.
+typedef enum {
+ RTCSourceStateInitializing,
+ RTCSourceStateLive,
+ RTCSourceStateEnded,
+ RTCSourceStateMuted,
+} RTCSourceState;
+
+// RTCTrackState corresponds to the states in webrtc::TrackState.
+typedef enum {
+ RTCTrackStateInitializing,
+ RTCTrackStateLive,
+ RTCTrackStateEnded,
+ RTCTrackStateFailed,
+} RTCTrackState;
diff --git a/talk/app/webrtc/objc/public/RTCVideoCapturer.h b/talk/app/webrtc/objc/public/RTCVideoCapturer.h
new file mode 100644
index 0000000..7321d57
--- /dev/null
+++ b/talk/app/webrtc/objc/public/RTCVideoCapturer.h
@@ -0,0 +1,42 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import <Foundation/Foundation.h>
+
+// RTCVideoCapturer is an ObjectiveC wrapper for VideoCapturerInterface.
+@interface RTCVideoCapturer : NSObject
+
+// Create a new video capturer using the specified device.
++ (RTCVideoCapturer *)capturerWithDeviceName:(NSString *)deviceName;
+
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
+// Disallow init and don't add to documentation
+- (id)init __attribute__(
+ (unavailable("init is not a supported initializer for this class.")));
+#endif /* DOXYGEN_SHOULD_SKIP_THIS */
+
+@end
diff --git a/talk/app/webrtc/objc/public/RTCVideoRenderer.h b/talk/app/webrtc/objc/public/RTCVideoRenderer.h
new file mode 100644
index 0000000..cc7ba71
--- /dev/null
+++ b/talk/app/webrtc/objc/public/RTCVideoRenderer.h
@@ -0,0 +1,52 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import <Foundation/Foundation.h>
+
+@protocol RTCVideoRendererDelegate;
+struct CGRect;
+
+// Interface for rendering VideoFrames from a VideoTrack
+@interface RTCVideoRenderer : NSObject
+
+@property(nonatomic, strong) id<RTCVideoRendererDelegate> delegate;
+
+// A convenience method to create a renderer and window and render frames into
+// that window.
++ (RTCVideoRenderer *)videoRenderGUIWithFrame:(CGRect)frame;
+
+// Initialize the renderer. Requires a delegate which does the actual drawing
+// of frames.
+- (id)initWithDelegate:(id<RTCVideoRendererDelegate>)delegate;
+
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
+// Disallow init and don't add to documentation
+- (id)init __attribute__(
+ (unavailable("init is not a supported initializer for this class.")));
+#endif /* DOXYGEN_SHOULD_SKIP_THIS */
+
+@end
diff --git a/talk/app/webrtc/objc/public/RTCVideoRendererDelegate.h b/talk/app/webrtc/objc/public/RTCVideoRendererDelegate.h
new file mode 100644
index 0000000..af72bde
--- /dev/null
+++ b/talk/app/webrtc/objc/public/RTCVideoRendererDelegate.h
@@ -0,0 +1,44 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import <Foundation/Foundation.h>
+
+@class RTCI420Frame;
+@class RTCVideoRenderer;
+
+// RTCVideoRendererDelegate is a protocol for an object that must be
+// implemented to get messages when rendering.
+@protocol RTCVideoRendererDelegate<NSObject>
+
+// The size of the frame.
+- (void)videoRenderer:(RTCVideoRenderer *)videoRenderer setSize:(CGSize)size;
+
+// The frame to be displayed.
+- (void)videoRenderer:(RTCVideoRenderer *)videoRenderer
+ renderFrame:(RTCI420Frame *)frame;
+
+@end
diff --git a/talk/app/webrtc/objc/public/RTCVideoSource.h b/talk/app/webrtc/objc/public/RTCVideoSource.h
new file mode 100644
index 0000000..8de8068
--- /dev/null
+++ b/talk/app/webrtc/objc/public/RTCVideoSource.h
@@ -0,0 +1,39 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import "RTCMediaSource.h"
+
+// RTCVideoSource is an ObjectiveC wrapper for VideoSourceInterface.
+@interface RTCVideoSource : RTCMediaSource
+
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
+// Disallow init and don't add to documentation
+- (id)init __attribute__(
+ (unavailable("init is not a supported initializer for this class.")));
+#endif /* DOXYGEN_SHOULD_SKIP_THIS */
+
+@end
diff --git a/talk/app/webrtc/objc/public/RTCVideoTrack.h b/talk/app/webrtc/objc/public/RTCVideoTrack.h
new file mode 100644
index 0000000..291c923
--- /dev/null
+++ b/talk/app/webrtc/objc/public/RTCVideoTrack.h
@@ -0,0 +1,50 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import "RTCMediaStreamTrack.h"
+
+@class RTCVideoRenderer;
+
+// RTCVideoTrack is an ObjectiveC wrapper for VideoTrackInterface.
+@interface RTCVideoTrack : RTCMediaStreamTrack
+
+// The currently registered renderers.
+@property(nonatomic, strong, readonly) NSArray *renderers;
+
+// Register a renderer that will render all frames received on this track.
+- (void)addRenderer:(RTCVideoRenderer *)renderer;
+
+// Deregister a renderer.
+- (void)removeRenderer:(RTCVideoRenderer *)renderer;
+
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
+// Disallow init and don't add to documentation
+- (id)init __attribute__(
+ (unavailable("init is not a supported initializer for this class.")));
+#endif /* DOXYGEN_SHOULD_SKIP_THIS */
+
+@end
diff --git a/talk/app/webrtc/objctests/Info.plist b/talk/app/webrtc/objctests/Info.plist
new file mode 100644
index 0000000..0b1583e
--- /dev/null
+++ b/talk/app/webrtc/objctests/Info.plist
@@ -0,0 +1,38 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleDevelopmentRegion</key>
+ <string>en</string>
+ <key>CFBundleDisplayName</key>
+ <string>${PRODUCT_NAME}</string>
+ <key>CFBundleExecutable</key>
+ <string>${EXECUTABLE_NAME}</string>
+ <key>CFBundleIdentifier</key>
+ <string>com.Google.${PRODUCT_NAME:rfc1034identifier}</string>
+ <key>CFBundleInfoDictionaryVersion</key>
+ <string>6.0</string>
+ <key>CFBundleName</key>
+ <string>${PRODUCT_NAME}</string>
+ <key>CFBundlePackageType</key>
+ <string>APPL</string>
+ <key>CFBundleShortVersionString</key>
+ <string>1.0</string>
+ <key>CFBundleSignature</key>
+ <string>????</string>
+ <key>CFBundleVersion</key>
+ <string>1.0</string>
+ <key>LSRequiresIPhoneOS</key>
+ <true/>
+ <key>UIRequiredDeviceCapabilities</key>
+ <array>
+ <string>armv7</string>
+ </array>
+ <key>UISupportedInterfaceOrientations</key>
+ <array>
+ <string>UIInterfaceOrientationPortrait</string>
+ <string>UIInterfaceOrientationLandscapeLeft</string>
+ <string>UIInterfaceOrientationLandscapeRight</string>
+ </array>
+</dict>
+</plist>
diff --git a/talk/app/webrtc/objctests/RTCPeerConnectionSyncObserver.h b/talk/app/webrtc/objctests/RTCPeerConnectionSyncObserver.h
new file mode 100644
index 0000000..db97816
--- /dev/null
+++ b/talk/app/webrtc/objctests/RTCPeerConnectionSyncObserver.h
@@ -0,0 +1,53 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import <Foundation/Foundation.h>
+
+#import "RTCPeerConnectionDelegate.h"
+
+// Observer of PeerConnection events, used by RTCPeerConnectionTest to check
+// expectations.
+@interface RTCPeerConnectionSyncObserver : NSObject<RTCPeerConnectionDelegate>
+// TODO(hughv): Add support for RTCVideoRendererDelegate when Video is enabled.
+
+// Transfer received ICE candidates to the caller.
+- (NSArray*)releaseReceivedICECandidates;
+
+// Register expectations for events that this observer should see before it can
+// be considered satisfied (see below).
+- (void)expectError;
+- (void)expectSignalingChange:(RTCSignalingState)state;
+- (void)expectAddStream:(NSString *)label;
+- (void)expectRemoveStream:(NSString *)label;
+- (void)expectICECandidates:(int)count;
+- (void)expectICEConnectionChange:(RTCICEConnectionState)state;
+- (void)expectICEGatheringChange:(RTCICEGatheringState)state;
+
+// Wait until all registered expectations above have been observed.
+- (void)waitForAllExpectationsToBeSatisfied;
+
+@end
diff --git a/talk/app/webrtc/objctests/RTCPeerConnectionSyncObserver.m b/talk/app/webrtc/objctests/RTCPeerConnectionSyncObserver.m
new file mode 100644
index 0000000..0f33bac
--- /dev/null
+++ b/talk/app/webrtc/objctests/RTCPeerConnectionSyncObserver.m
@@ -0,0 +1,190 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+#error "This file requires ARC support."
+#endif
+
+#import "RTCPeerConnectionSyncObserver.h"
+
+#import "RTCMediaStream.h"
+
+@implementation RTCPeerConnectionSyncObserver {
+ int _expectedErrors;
+ NSMutableArray *_expectedSignalingChanges;
+ NSMutableArray *_expectedAddStreamLabels;
+ NSMutableArray *_expectedRemoveStreamLabels;
+ int _expectedICECandidates;
+ NSMutableArray *_receivedICECandidates;
+ NSMutableArray *_expectedICEConnectionChanges;
+ NSMutableArray *_expectedICEGatheringChanges;
+}
+
+- (id)init {
+ self = [super init];
+ if (self) {
+ _expectedSignalingChanges = [NSMutableArray array];
+ _expectedSignalingChanges = [NSMutableArray array];
+ _expectedAddStreamLabels = [NSMutableArray array];
+ _expectedRemoveStreamLabels = [NSMutableArray array];
+ _receivedICECandidates = [NSMutableArray array];
+ _expectedICEConnectionChanges = [NSMutableArray array];
+ _expectedICEGatheringChanges = [NSMutableArray array];
+ }
+ return self;
+}
+
+- (int)popFirstElementAsInt:(NSMutableArray *)array {
+ NSAssert([array count] > 0, @"Empty array");
+ NSNumber *boxedState = [array objectAtIndex:0];
+ [array removeObjectAtIndex:0];
+ return [boxedState intValue];
+}
+
+- (NSString *)popFirstElementAsNSString:(NSMutableArray *)array {
+ NSAssert([array count] > 0, @"Empty expectation array");
+ NSString *string = [array objectAtIndex:0];
+ [array removeObjectAtIndex:0];
+ return string;
+}
+
+- (BOOL)areAllExpectationsSatisfied {
+ return _expectedICECandidates <= 0 && // See comment in gotICECandidate.
+ _expectedErrors == 0 &&
+ [_expectedSignalingChanges count] == 0 &&
+ [_expectedICEConnectionChanges count] == 0 &&
+ [_expectedICEGatheringChanges count] == 0 &&
+ [_expectedAddStreamLabels count] == 0 &&
+ [_expectedRemoveStreamLabels count] == 0;
+ // TODO(hughv): Test video state here too.
+}
+
+- (NSArray *)releaseReceivedICECandidates {
+ NSArray* ret = _receivedICECandidates;
+ _receivedICECandidates = [NSMutableArray array];
+ return ret;
+}
+
+- (void)expectError {
+ ++_expectedErrors;
+}
+
+- (void)expectSignalingChange:(RTCSignalingState)state {
+ [_expectedSignalingChanges addObject:@((int)state)];
+}
+
+- (void)expectAddStream:(NSString *)label {
+ [_expectedAddStreamLabels addObject:label];
+}
+
+- (void)expectRemoveStream:(NSString *)label {
+ [_expectedRemoveStreamLabels addObject:label];
+}
+
+- (void)expectICECandidates:(int)count {
+ _expectedICECandidates += count;
+}
+
+- (void)expectICEConnectionChange:(RTCICEConnectionState)state {
+ [_expectedICEConnectionChanges addObject:@((int)state)];
+}
+
+- (void)expectICEGatheringChange:(RTCICEGatheringState)state {
+ [_expectedICEGatheringChanges addObject:@((int)state)];
+}
+
+- (void)waitForAllExpectationsToBeSatisfied {
+ // TODO (fischman): Revisit. Keeping in sync with the Java version, but
+ // polling is not optimal.
+ // https://code.google.com/p/libjingle/source/browse/trunk/talk/app/webrtc/javatests/src/org/webrtc/PeerConnectionTest.java?line=212#212
+ while (![self areAllExpectationsSatisfied]) {
+ [[NSRunLoop currentRunLoop]
+ runUntilDate:[NSDate dateWithTimeIntervalSinceNow:1]];
+ }
+}
+
+#pragma mark - RTCPeerConnectionDelegate methods
+
+- (void)peerConnectionOnError:(RTCPeerConnection *)peerConnection {
+ NSLog(@"RTCPeerConnectionDelegate::onError");
+ NSAssert(--_expectedErrors >= 0, @"Unexpected error");
+}
+
+- (void)peerConnection:(RTCPeerConnection *)peerConnection
+ signalingStateChanged:(RTCSignalingState)stateChanged {
+ int expectedState = [self popFirstElementAsInt:_expectedSignalingChanges];
+ NSString *message = [NSString stringWithFormat: @"RTCPeerConnectionDelegate::"
+ @"onSignalingStateChange [%d] expected[%d]", stateChanged, expectedState];
+ NSAssert(expectedState == (int) stateChanged, message);
+}
+
+- (void)peerConnection:(RTCPeerConnection *)peerConnection
+ addedStream:(RTCMediaStream *)stream {
+ NSString *expectedLabel =
+ [self popFirstElementAsNSString:_expectedAddStreamLabels];
+ NSAssert([expectedLabel isEqual:stream.label], @"Stream not expected");
+}
+
+- (void)peerConnection:(RTCPeerConnection *)peerConnection
+ removedStream:(RTCMediaStream *)stream {
+ NSString *expectedLabel =
+ [self popFirstElementAsNSString:_expectedRemoveStreamLabels];
+ NSAssert([expectedLabel isEqual:stream.label], @"Stream not expected");
+}
+
+- (void)peerConnectionOnRenegotiationNeeded:
+ (RTCPeerConnection *)peerConnection {
+}
+
+- (void)peerConnection:(RTCPeerConnection *)peerConnection
+ gotICECandidate:(RTCICECandidate *)candidate {
+ --_expectedICECandidates;
+ // We don't assert expectedICECandidates >= 0 because it's hard to know
+ // how many to expect, in general. We only use expectICECandidates to
+ // assert a minimal count.
+ [_receivedICECandidates addObject:candidate];
+}
+
+- (void)peerConnection:(RTCPeerConnection *)peerConnection
+ iceGatheringChanged:(RTCICEGatheringState)newState {
+ // It's fine to get a variable number of GATHERING messages before
+ // COMPLETE fires (depending on how long the test runs) so we don't assert
+ // any particular count.
+ if (newState == RTCICEGatheringGathering) {
+ return;
+ }
+ int expectedState = [self popFirstElementAsInt:_expectedICEGatheringChanges];
+ NSAssert(expectedState == (int)newState, @"Empty expectation array");
+}
+
+- (void)peerConnection:(RTCPeerConnection *)peerConnection
+ iceConnectionChanged:(RTCICEConnectionState)newState {
+ int expectedState = [self popFirstElementAsInt:_expectedICEConnectionChanges];
+ NSAssert(expectedState == (int)newState, @"Empty expectation array");
+}
+
+@end
diff --git a/talk/app/webrtc/objctests/RTCPeerConnectionTest.mm b/talk/app/webrtc/objctests/RTCPeerConnectionTest.mm
new file mode 100644
index 0000000..826409f5
--- /dev/null
+++ b/talk/app/webrtc/objctests/RTCPeerConnectionTest.mm
@@ -0,0 +1,235 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import <Foundation/Foundation.h>
+
+#import "RTCICEServer.h"
+#import "RTCMediaConstraints.h"
+#import "RTCMediaStream.h"
+#import "RTCPeerConnection.h"
+#import "RTCPeerConnectionFactory.h"
+#import "RTCPeerConnectionSyncObserver.h"
+#import "RTCSessionDescription.h"
+#import "RTCSessionDescriptionSyncObserver.h"
+#import "RTCVideoRenderer.h"
+#import "RTCVideoTrack.h"
+
+#include "talk/base/gunit.h"
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+#error "This file requires ARC support."
+#endif
+
+@interface RTCPeerConnectionTest : NSObject
+
+// Returns whether the two sessions are of the same type.
++ (BOOL)isSession:(RTCSessionDescription *)session1
+ ofSameTypeAsSession:(RTCSessionDescription *)session2;
+
+// Create and add tracks to pc, with the given source, label, and IDs
+- (RTCMediaStream *)
+ addTracksToPeerConnection:(RTCPeerConnection *)pc
+ withFactory:(RTCPeerConnectionFactory *)factory
+ videoSource:(RTCVideoSource *)videoSource
+ streamLabel:(NSString *)streamLabel
+ videoTrackID:(NSString *)videoTrackID
+ audioTrackID:(NSString *)audioTrackID;
+
+- (void)testCompleteSession;
+
+@end
+
+@implementation RTCPeerConnectionTest
+
++ (BOOL)isSession:(RTCSessionDescription *)session1
+ ofSameTypeAsSession:(RTCSessionDescription *)session2 {
+ return [session1.type isEqual:session2.type];
+}
+
+- (RTCMediaStream *)
+ addTracksToPeerConnection:(RTCPeerConnection *)pc
+ withFactory:(RTCPeerConnectionFactory *)factory
+ videoSource:(RTCVideoSource *)videoSource
+ streamLabel:(NSString *)streamLabel
+ videoTrackID:(NSString *)videoTrackID
+ audioTrackID:(NSString *)audioTrackID {
+ RTCMediaStream *localMediaStream = [factory mediaStreamWithLabel:streamLabel];
+ RTCVideoTrack *videoTrack =
+ [factory videoTrackWithID:videoTrackID source:videoSource];
+ RTCVideoRenderer *videoRenderer =
+ [[RTCVideoRenderer alloc] initWithDelegate:nil];
+ [videoTrack addRenderer:videoRenderer];
+ [localMediaStream addVideoTrack:videoTrack];
+ // Test that removal/re-add works.
+ [localMediaStream removeVideoTrack:videoTrack];
+ [localMediaStream addVideoTrack:videoTrack];
+ RTCAudioTrack *audioTrack = [factory audioTrackWithID:audioTrackID];
+ [localMediaStream addAudioTrack:audioTrack];
+ RTCMediaConstraints *constraints = [[RTCMediaConstraints alloc] init];
+ [pc addStream:localMediaStream constraints:constraints];
+ return localMediaStream;
+}
+
+- (void)testCompleteSession {
+ RTCPeerConnectionFactory *factory = [[RTCPeerConnectionFactory alloc] init];
+ NSString *stunURL = @"stun:stun.l.google.com:19302";
+ RTCICEServer *stunServer =
+ [[RTCICEServer alloc] initWithURI:[NSURL URLWithString:stunURL]
+ password:@""];
+ NSArray *iceServers = @[stunServer];
+
+ RTCMediaConstraints *constraints = [[RTCMediaConstraints alloc] init];
+ RTCPeerConnectionSyncObserver *offeringExpectations =
+ [[RTCPeerConnectionSyncObserver alloc] init];
+ RTCPeerConnection *pcOffer =
+ [factory peerConnectionWithICEServers:iceServers
+ constraints:constraints
+ delegate:offeringExpectations];
+
+ RTCPeerConnectionSyncObserver *answeringExpectations =
+ [[RTCPeerConnectionSyncObserver alloc] init];
+ RTCPeerConnection *pcAnswer =
+ [factory peerConnectionWithICEServers:iceServers
+ constraints:constraints
+ delegate:answeringExpectations];
+
+ // TODO(hughv): Create video capturer
+ RTCVideoCapturer *capturer = nil;
+ RTCVideoSource *videoSource =
+ [factory videoSourceWithCapturer:capturer constraints:constraints];
+
+ // Here and below, "oLMS" refers to offerer's local media stream, and "aLMS"
+ // refers to the answerer's local media stream, with suffixes of "a0" and "v0"
+ // for audio and video tracks, resp. These mirror chrome historical naming.
+ RTCMediaStream *oLMSUnused =
+ [self addTracksToPeerConnection:pcOffer
+ withFactory:factory
+ videoSource:videoSource
+ streamLabel:@"oLMS"
+ videoTrackID:@"oLMSv0"
+ audioTrackID:@"oLMSa0"];
+ RTCSessionDescriptionSyncObserver *sdpObserver =
+ [[RTCSessionDescriptionSyncObserver alloc] init];
+ [pcOffer createOfferWithDelegate:sdpObserver constraints:constraints];
+ [sdpObserver wait];
+ EXPECT_TRUE(sdpObserver.success);
+ RTCSessionDescription *offerSDP = sdpObserver.sessionDescription;
+ EXPECT_EQ([@"offer" compare:offerSDP.type options:NSCaseInsensitiveSearch],
+ NSOrderedSame);
+ EXPECT_GT([offerSDP.description length], 0);
+
+ sdpObserver = [[RTCSessionDescriptionSyncObserver alloc] init];
+ [answeringExpectations
+ expectSignalingChange:RTCSignalingHaveRemoteOffer];
+ [answeringExpectations expectAddStream:@"oLMS"];
+ [pcAnswer setRemoteDescriptionWithDelegate:sdpObserver
+ sessionDescription:offerSDP];
+ [sdpObserver wait];
+
+ RTCMediaStream *aLMSUnused =
+ [self addTracksToPeerConnection:pcAnswer
+ withFactory:factory
+ videoSource:videoSource
+ streamLabel:@"aLMS"
+ videoTrackID:@"aLMSv0"
+ audioTrackID:@"aLMSa0"];
+
+ sdpObserver = [[RTCSessionDescriptionSyncObserver alloc] init];
+ [pcAnswer createAnswerWithDelegate:sdpObserver constraints:constraints];
+ [sdpObserver wait];
+ EXPECT_TRUE(sdpObserver.success);
+ RTCSessionDescription *answerSDP = sdpObserver.sessionDescription;
+ EXPECT_EQ([@"answer" compare:answerSDP.type options:NSCaseInsensitiveSearch],
+ NSOrderedSame);
+ EXPECT_GT([answerSDP.description length], 0);
+
+ [offeringExpectations expectICECandidates:2];
+ [answeringExpectations expectICECandidates:2];
+
+ sdpObserver = [[RTCSessionDescriptionSyncObserver alloc] init];
+ [answeringExpectations expectSignalingChange:RTCSignalingStable];
+ [pcAnswer setLocalDescriptionWithDelegate:sdpObserver
+ sessionDescription:answerSDP];
+ [sdpObserver wait];
+ EXPECT_TRUE(sdpObserver.sessionDescription == NULL);
+
+ sdpObserver = [[RTCSessionDescriptionSyncObserver alloc] init];
+ [offeringExpectations expectSignalingChange:RTCSignalingHaveLocalOffer];
+ [pcOffer setLocalDescriptionWithDelegate:sdpObserver
+ sessionDescription:offerSDP];
+ [sdpObserver wait];
+ EXPECT_TRUE(sdpObserver.sessionDescription == NULL);
+
+ [offeringExpectations expectICEConnectionChange:RTCICEConnectionChecking];
+ [offeringExpectations expectICEConnectionChange:RTCICEConnectionConnected];
+ [answeringExpectations expectICEConnectionChange:RTCICEConnectionChecking];
+ [answeringExpectations expectICEConnectionChange:RTCICEConnectionConnected];
+
+ [offeringExpectations expectICEGatheringChange:RTCICEGatheringComplete];
+ [answeringExpectations expectICEGatheringChange:RTCICEGatheringComplete];
+
+ sdpObserver = [[RTCSessionDescriptionSyncObserver alloc] init];
+ [offeringExpectations expectSignalingChange:RTCSignalingStable];
+ [offeringExpectations expectAddStream:@"aLMS"];
+ [pcOffer setRemoteDescriptionWithDelegate:sdpObserver
+ sessionDescription:answerSDP];
+ [sdpObserver wait];
+ EXPECT_TRUE(sdpObserver.sessionDescription == NULL);
+
+ EXPECT_TRUE([offerSDP.type isEqual:pcOffer.localDescription.type]);
+ EXPECT_TRUE([answerSDP.type isEqual:pcOffer.remoteDescription.type]);
+ EXPECT_TRUE([offerSDP.type isEqual:pcAnswer.remoteDescription.type]);
+ EXPECT_TRUE([answerSDP.type isEqual:pcAnswer.localDescription.type]);
+
+ for (RTCICECandidate *candidate in
+ offeringExpectations.releaseReceivedICECandidates) {
+ [pcAnswer addICECandidate:candidate];
+ }
+ for (RTCICECandidate *candidate in
+ answeringExpectations.releaseReceivedICECandidates) {
+ [pcOffer addICECandidate:candidate];
+ }
+
+ [offeringExpectations waitForAllExpectationsToBeSatisfied];
+ [answeringExpectations waitForAllExpectationsToBeSatisfied];
+
+ // Let the audio feedback run for 10s to allow human testing and to ensure
+ // things stabilize. TODO(fischman): replace seconds with # of video frames,
+ // when we have video flowing.
+ [[NSRunLoop currentRunLoop]
+ runUntilDate:[NSDate dateWithTimeIntervalSinceNow:10]];
+
+ // TODO(hughv): Implement orderly shutdown.
+}
+
+@end
+
+
+TEST(RTCPeerConnectionTest, SessionTest) {
+ RTCPeerConnectionTest *pcTest = [[RTCPeerConnectionTest alloc] init];
+ [pcTest testCompleteSession];
+}
diff --git a/talk/app/webrtc/objctests/RTCSessionDescriptionSyncObserver.h b/talk/app/webrtc/objctests/RTCSessionDescriptionSyncObserver.h
new file mode 100644
index 0000000..18d7902
--- /dev/null
+++ b/talk/app/webrtc/objctests/RTCSessionDescriptionSyncObserver.h
@@ -0,0 +1,49 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import <Foundation/Foundation.h>
+
+#import "RTCSessionDescriptonDelegate.h"
+
+@class RTCSessionDescription;
+
+// Observer of SDP-related events, used by RTCPeerConnectionTest to check
+// expectations.
+@interface RTCSessionDescriptionSyncObserver : NSObject<
+ RTCSessionDescriptonDelegate>
+
+// Error string. May be nil.
+@property(atomic, copy) NSString *error;
+// Created session description. May be nil.
+@property(atomic, strong) RTCSessionDescription *sessionDescription;
+// Whether an SDP-related callback reported success.
+@property(atomic, assign) BOOL success;
+
+// Wait for an SDP-related callback to fire.
+- (void)wait;
+
+@end
diff --git a/talk/app/webrtc/objctests/RTCSessionDescriptionSyncObserver.m b/talk/app/webrtc/objctests/RTCSessionDescriptionSyncObserver.m
new file mode 100644
index 0000000..c04c1c3
--- /dev/null
+++ b/talk/app/webrtc/objctests/RTCSessionDescriptionSyncObserver.m
@@ -0,0 +1,97 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+#error "This file requires ARC support."
+#endif
+
+#import "RTCSessionDescriptionSyncObserver.h"
+
+#import "RTCSessionDescription.h"
+
+@interface RTCSessionDescriptionSyncObserver()
+
+// CondVar used to wait for, and signal arrival of, an SDP-related callback.
+@property(nonatomic, strong) NSCondition *condition;
+// Whether an SDP-related callback has fired; cleared before wait returns.
+@property(atomic, assign) BOOL signaled;
+
+@end
+
+@implementation RTCSessionDescriptionSyncObserver
+
+- (id)init {
+ if ((self = [super init])) {
+ if (!(_condition = [[NSCondition alloc] init]))
+ self = nil;
+ }
+ return self;
+}
+
+- (void)signal {
+ self.signaled = YES;
+ [self.condition signal];
+}
+
+- (void)wait {
+ [self.condition lock];
+ if (!self.signaled)
+ [self.condition wait];
+ self.signaled = NO;
+ [self.condition unlock];
+}
+
+#pragma mark - RTCSessionDescriptonDelegate methods
+- (void)peerConnection:(RTCPeerConnection *)peerConnection
+ didCreateSessionDescription:(RTCSessionDescription *)sdp
+ error:(NSError *)error {
+ [self.condition lock];
+ if (error) {
+ self.success = NO;
+ self.error = error.description;
+ } else {
+ self.success = YES;
+ self.sessionDescription = sdp;
+ }
+ [self signal];
+ [self.condition unlock];
+}
+
+- (void)peerConnection:(RTCPeerConnection *)peerConnection
+ didSetSessionDescriptionWithError:(NSError *)error {
+ [self.condition lock];
+ if (error) {
+ self.success = NO;
+ self.error = error.description;
+ } else {
+ self.success = YES;
+ }
+ [self signal];
+ [self.condition unlock];
+}
+
+@end
diff --git a/talk/app/webrtc/objctests/mac/main.mm b/talk/app/webrtc/objctests/mac/main.mm
new file mode 100644
index 0000000..3fb24f3
--- /dev/null
+++ b/talk/app/webrtc/objctests/mac/main.mm
@@ -0,0 +1,33 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/base/gunit.h"
+
+int main(int argc, char *argv[]) {
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/talk/app/webrtc/peerconnection.cc b/talk/app/webrtc/peerconnection.cc
new file mode 100644
index 0000000..6d3417a
--- /dev/null
+++ b/talk/app/webrtc/peerconnection.cc
@@ -0,0 +1,755 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/peerconnection.h"
+
+#include <vector>
+
+#include "talk/app/webrtc/dtmfsender.h"
+#include "talk/app/webrtc/jsepicecandidate.h"
+#include "talk/app/webrtc/jsepsessiondescription.h"
+#include "talk/app/webrtc/mediastreamhandler.h"
+#include "talk/app/webrtc/streamcollection.h"
+#include "talk/base/logging.h"
+#include "talk/base/stringencode.h"
+#include "talk/session/media/channelmanager.h"
+
+namespace {
+
+using webrtc::PeerConnectionInterface;
+
+// The min number of tokens in the ice uri.
+static const size_t kMinIceUriTokens = 2;
+// The min number of tokens must present in Turn host uri.
+// e.g. user@turn.example.org
+static const size_t kTurnHostTokensNum = 2;
+// Number of tokens must be preset when TURN uri has transport param.
+static const size_t kTurnTransportTokensNum = 2;
+// The default stun port.
+static const int kDefaultPort = 3478;
+static const char kTransport[] = "transport";
+static const char kDefaultTransportType[] = "udp";
+
+// NOTE: Must be in the same order as the ServiceType enum.
+static const char* kValidIceServiceTypes[] = {
+ "stun", "stuns", "turn", "turns", "invalid" };
+
+enum ServiceType {
+ STUN, // Indicates a STUN server.
+ STUNS, // Indicates a STUN server used with a TLS session.
+ TURN, // Indicates a TURN server
+ TURNS, // Indicates a TURN server used with a TLS session.
+ INVALID, // Unknown.
+};
+
+enum {
+ MSG_CREATE_SESSIONDESCRIPTION_SUCCESS = 0,
+ MSG_CREATE_SESSIONDESCRIPTION_FAILED,
+ MSG_SET_SESSIONDESCRIPTION_SUCCESS,
+ MSG_SET_SESSIONDESCRIPTION_FAILED,
+ MSG_GETSTATS,
+ MSG_ICECONNECTIONCHANGE,
+ MSG_ICEGATHERINGCHANGE,
+ MSG_ICECANDIDATE,
+ MSG_ICECOMPLETE,
+};
+
+struct CandidateMsg : public talk_base::MessageData {
+ explicit CandidateMsg(const webrtc::JsepIceCandidate* candidate)
+ : candidate(candidate) {
+ }
+ talk_base::scoped_ptr<const webrtc::JsepIceCandidate> candidate;
+};
+
+struct CreateSessionDescriptionMsg : public talk_base::MessageData {
+ explicit CreateSessionDescriptionMsg(
+ webrtc::CreateSessionDescriptionObserver* observer)
+ : observer(observer) {
+ }
+
+ talk_base::scoped_refptr<webrtc::CreateSessionDescriptionObserver> observer;
+ std::string error;
+ talk_base::scoped_ptr<webrtc::SessionDescriptionInterface> description;
+};
+
+struct SetSessionDescriptionMsg : public talk_base::MessageData {
+ explicit SetSessionDescriptionMsg(
+ webrtc::SetSessionDescriptionObserver* observer)
+ : observer(observer) {
+ }
+
+ talk_base::scoped_refptr<webrtc::SetSessionDescriptionObserver> observer;
+ std::string error;
+};
+
+struct GetStatsMsg : public talk_base::MessageData {
+ explicit GetStatsMsg(webrtc::StatsObserver* observer)
+ : observer(observer) {
+ }
+ webrtc::StatsReports reports;
+ talk_base::scoped_refptr<webrtc::StatsObserver> observer;
+};
+
+typedef webrtc::PortAllocatorFactoryInterface::StunConfiguration
+ StunConfiguration;
+typedef webrtc::PortAllocatorFactoryInterface::TurnConfiguration
+ TurnConfiguration;
+
+bool ParseIceServers(const PeerConnectionInterface::IceServers& configuration,
+ std::vector<StunConfiguration>* stun_config,
+ std::vector<TurnConfiguration>* turn_config) {
+ // draft-nandakumar-rtcweb-stun-uri-01
+ // stunURI = scheme ":" stun-host [ ":" stun-port ]
+ // scheme = "stun" / "stuns"
+ // stun-host = IP-literal / IPv4address / reg-name
+ // stun-port = *DIGIT
+
+ // draft-petithuguenin-behave-turn-uris-01
+ // turnURI = scheme ":" turn-host [ ":" turn-port ]
+ // [ "?transport=" transport ]
+ // scheme = "turn" / "turns"
+ // transport = "udp" / "tcp" / transport-ext
+ // transport-ext = 1*unreserved
+ // turn-host = IP-literal / IPv4address / reg-name
+ // turn-port = *DIGIT
+
+ // TODO(ronghuawu): Handle IPV6 address
+ for (size_t i = 0; i < configuration.size(); ++i) {
+ webrtc::PeerConnectionInterface::IceServer server = configuration[i];
+ if (server.uri.empty()) {
+ LOG(WARNING) << "Empty uri.";
+ continue;
+ }
+ std::vector<std::string> tokens;
+ std::string turn_transport_type = kDefaultTransportType;
+ talk_base::tokenize(server.uri, '?', &tokens);
+ std::string uri_without_transport = tokens[0];
+ // Let's look into transport= param, if it exists.
+ if (tokens.size() == kTurnTransportTokensNum) { // ?transport= is present.
+ std::string uri_transport_param = tokens[1];
+ talk_base::tokenize(uri_transport_param, '=', &tokens);
+ if (tokens[0] == kTransport) {
+ turn_transport_type = tokens[1];
+ }
+ }
+
+ tokens.clear();
+ talk_base::tokenize(uri_without_transport, ':', &tokens);
+ if (tokens.size() < kMinIceUriTokens) {
+ LOG(WARNING) << "Invalid uri: " << server.uri;
+ continue;
+ }
+ ServiceType service_type = INVALID;
+ const std::string& type = tokens[0];
+ for (size_t i = 0; i < ARRAY_SIZE(kValidIceServiceTypes); ++i) {
+ if (type.compare(kValidIceServiceTypes[i]) == 0) {
+ service_type = static_cast<ServiceType>(i);
+ break;
+ }
+ }
+ if (service_type == INVALID) {
+ LOG(WARNING) << "Invalid service type: " << type;
+ continue;
+ }
+ std::string address = tokens[1];
+ int port = kDefaultPort;
+ if (tokens.size() > kMinIceUriTokens) {
+ if (!talk_base::FromString(tokens[2], &port)) {
+ LOG(LS_WARNING) << "Failed to parse port string: " << tokens[2];
+ continue;
+ }
+
+ if (port <= 0 || port > 0xffff) {
+ LOG(WARNING) << "Invalid port: " << port;
+ continue;
+ }
+ }
+
+ switch (service_type) {
+ case STUN:
+ case STUNS:
+ stun_config->push_back(StunConfiguration(address, port));
+ break;
+ case TURN: {
+ if (server.username.empty()) {
+ // Turn url example from the spec |url:"turn:user@turn.example.org"|.
+ std::vector<std::string> turn_tokens;
+ talk_base::tokenize(address, '@', &turn_tokens);
+ if (turn_tokens.size() == kTurnHostTokensNum) {
+ server.username = talk_base::s_url_decode(turn_tokens[0]);
+ address = turn_tokens[1];
+ }
+ }
+ turn_config->push_back(TurnConfiguration(address, port,
+ server.username,
+ server.password,
+ turn_transport_type));
+ // STUN functionality is part of TURN.
+ stun_config->push_back(StunConfiguration(address, port));
+ break;
+ }
+ case TURNS:
+ case INVALID:
+ default:
+ LOG(WARNING) << "Configuration not supported: " << server.uri;
+ return false;
+ }
+ }
+ return true;
+}
+
+// Check if we can send |new_stream| on a PeerConnection.
+// Currently only one audio but multiple video track is supported per
+// PeerConnection.
+bool CanAddLocalMediaStream(webrtc::StreamCollectionInterface* current_streams,
+ webrtc::MediaStreamInterface* new_stream) {
+ if (!new_stream || !current_streams)
+ return false;
+ if (current_streams->find(new_stream->label()) != NULL) {
+ LOG(LS_ERROR) << "MediaStream with label " << new_stream->label()
+ << " is already added.";
+ return false;
+ }
+
+ bool audio_track_exist = false;
+ for (size_t j = 0; j < current_streams->count(); ++j) {
+ if (!audio_track_exist) {
+ audio_track_exist = current_streams->at(j)->GetAudioTracks().size() > 0;
+ }
+ }
+ if (audio_track_exist && (new_stream->GetAudioTracks().size() > 0)) {
+ LOG(LS_ERROR) << "AddStream - Currently only one audio track is supported"
+ << "per PeerConnection.";
+ return false;
+ }
+ return true;
+}
+
+} // namespace
+
+namespace webrtc {
+
+PeerConnection::PeerConnection(PeerConnectionFactory* factory)
+ : factory_(factory),
+ observer_(NULL),
+ signaling_state_(kStable),
+ ice_state_(kIceNew),
+ ice_connection_state_(kIceConnectionNew),
+ ice_gathering_state_(kIceGatheringNew) {
+}
+
+PeerConnection::~PeerConnection() {
+ if (mediastream_signaling_)
+ mediastream_signaling_->TearDown();
+ if (stream_handler_container_)
+ stream_handler_container_->TearDown();
+}
+
+bool PeerConnection::Initialize(
+ const PeerConnectionInterface::IceServers& configuration,
+ const MediaConstraintsInterface* constraints,
+ webrtc::PortAllocatorFactoryInterface* allocator_factory,
+ PeerConnectionObserver* observer) {
+ std::vector<PortAllocatorFactoryInterface::StunConfiguration> stun_config;
+ std::vector<PortAllocatorFactoryInterface::TurnConfiguration> turn_config;
+ if (!ParseIceServers(configuration, &stun_config, &turn_config)) {
+ return false;
+ }
+
+ return DoInitialize(stun_config, turn_config, constraints,
+ allocator_factory, observer);
+}
+
+bool PeerConnection::DoInitialize(
+ const StunConfigurations& stun_config,
+ const TurnConfigurations& turn_config,
+ const MediaConstraintsInterface* constraints,
+ webrtc::PortAllocatorFactoryInterface* allocator_factory,
+ PeerConnectionObserver* observer) {
+ ASSERT(observer != NULL);
+ if (!observer)
+ return false;
+ observer_ = observer;
+ port_allocator_.reset(
+ allocator_factory->CreatePortAllocator(stun_config, turn_config));
+ // To handle both internal and externally created port allocator, we will
+ // enable BUNDLE here. Also enabling TURN and disable legacy relay service.
+ port_allocator_->set_flags(cricket::PORTALLOCATOR_ENABLE_BUNDLE |
+ cricket::PORTALLOCATOR_ENABLE_SHARED_UFRAG |
+ cricket::PORTALLOCATOR_ENABLE_SHARED_SOCKET);
+ // No step delay is used while allocating ports.
+ port_allocator_->set_step_delay(cricket::kMinimumStepDelay);
+
+ mediastream_signaling_.reset(new MediaStreamSignaling(
+ factory_->signaling_thread(), this));
+
+ session_.reset(new WebRtcSession(factory_->channel_manager(),
+ factory_->signaling_thread(),
+ factory_->worker_thread(),
+ port_allocator_.get(),
+ mediastream_signaling_.get()));
+ stream_handler_container_.reset(new MediaStreamHandlerContainer(
+ session_.get(), session_.get()));
+ stats_.set_session(session_.get());
+
+ // Initialize the WebRtcSession. It creates transport channels etc.
+ if (!session_->Initialize(constraints))
+ return false;
+
+
+ // Register PeerConnection as receiver of local ice candidates.
+ // All the callbacks will be posted to the application from PeerConnection.
+ session_->RegisterIceObserver(this);
+ session_->SignalState.connect(this, &PeerConnection::OnSessionStateChange);
+ return true;
+}
+
+talk_base::scoped_refptr<StreamCollectionInterface>
+PeerConnection::local_streams() {
+ return mediastream_signaling_->local_streams();
+}
+
+talk_base::scoped_refptr<StreamCollectionInterface>
+PeerConnection::remote_streams() {
+ return mediastream_signaling_->remote_streams();
+}
+
+bool PeerConnection::AddStream(MediaStreamInterface* local_stream,
+ const MediaConstraintsInterface* constraints) {
+ if (IsClosed()) {
+ return false;
+ }
+ if (!CanAddLocalMediaStream(mediastream_signaling_->local_streams(),
+ local_stream))
+ return false;
+
+ // TODO(perkj): Implement support for MediaConstraints in AddStream.
+ if (!mediastream_signaling_->AddLocalStream(local_stream)) {
+ return false;
+ }
+ stats_.AddStream(local_stream);
+ observer_->OnRenegotiationNeeded();
+ return true;
+}
+
+void PeerConnection::RemoveStream(MediaStreamInterface* local_stream) {
+ if (IsClosed()) {
+ return;
+ }
+ mediastream_signaling_->RemoveLocalStream(local_stream);
+ observer_->OnRenegotiationNeeded();
+}
+
+talk_base::scoped_refptr<DtmfSenderInterface> PeerConnection::CreateDtmfSender(
+ AudioTrackInterface* track) {
+ if (!track) {
+ LOG(LS_ERROR) << "CreateDtmfSender - track is NULL.";
+ return NULL;
+ }
+ if (!mediastream_signaling_->local_streams()->FindAudioTrack(track->id())) {
+ LOG(LS_ERROR) << "CreateDtmfSender is called with a non local audio track.";
+ return NULL;
+ }
+
+ talk_base::scoped_refptr<DtmfSenderInterface> sender(
+ DtmfSender::Create(track, signaling_thread(), session_.get()));
+ if (!sender.get()) {
+ LOG(LS_ERROR) << "CreateDtmfSender failed on DtmfSender::Create.";
+ return NULL;
+ }
+ return DtmfSenderProxy::Create(signaling_thread(), sender.get());
+}
+
+bool PeerConnection::GetStats(StatsObserver* observer,
+ MediaStreamTrackInterface* track) {
+ if (!VERIFY(observer != NULL)) {
+ LOG(LS_ERROR) << "GetStats - observer is NULL.";
+ return false;
+ }
+
+ stats_.UpdateStats();
+ talk_base::scoped_ptr<GetStatsMsg> msg(new GetStatsMsg(observer));
+ if (!stats_.GetStats(track, &(msg->reports))) {
+ return false;
+ }
+ signaling_thread()->Post(this, MSG_GETSTATS, msg.release());
+ return true;
+}
+
+PeerConnectionInterface::SignalingState PeerConnection::signaling_state() {
+ return signaling_state_;
+}
+
+PeerConnectionInterface::IceState PeerConnection::ice_state() {
+ return ice_state_;
+}
+
+PeerConnectionInterface::IceConnectionState
+PeerConnection::ice_connection_state() {
+ return ice_connection_state_;
+}
+
+PeerConnectionInterface::IceGatheringState
+PeerConnection::ice_gathering_state() {
+ return ice_gathering_state_;
+}
+
+talk_base::scoped_refptr<DataChannelInterface>
+PeerConnection::CreateDataChannel(
+ const std::string& label,
+ const DataChannelInit* config) {
+ talk_base::scoped_refptr<DataChannelInterface> channel(
+ session_->CreateDataChannel(label, config));
+ if (!channel.get())
+ return NULL;
+
+ observer_->OnRenegotiationNeeded();
+ return DataChannelProxy::Create(signaling_thread(), channel.get());
+}
+
+void PeerConnection::CreateOffer(CreateSessionDescriptionObserver* observer,
+ const MediaConstraintsInterface* constraints) {
+ if (!VERIFY(observer != NULL)) {
+ LOG(LS_ERROR) << "CreateOffer - observer is NULL.";
+ return;
+ }
+ CreateSessionDescriptionMsg* msg = new CreateSessionDescriptionMsg(observer);
+ msg->description.reset(
+ session_->CreateOffer(constraints));
+
+ if (!msg->description) {
+ msg->error = "CreateOffer failed.";
+ signaling_thread()->Post(this, MSG_CREATE_SESSIONDESCRIPTION_FAILED, msg);
+ return;
+ }
+
+ signaling_thread()->Post(this, MSG_CREATE_SESSIONDESCRIPTION_SUCCESS, msg);
+}
+
+void PeerConnection::CreateAnswer(
+ CreateSessionDescriptionObserver* observer,
+ const MediaConstraintsInterface* constraints) {
+ if (!VERIFY(observer != NULL)) {
+ LOG(LS_ERROR) << "CreateAnswer - observer is NULL.";
+ return;
+ }
+ CreateSessionDescriptionMsg* msg = new CreateSessionDescriptionMsg(observer);
+ msg->description.reset(session_->CreateAnswer(constraints));
+ if (!msg->description) {
+ msg->error = "CreateAnswer failed.";
+ signaling_thread()->Post(this, MSG_CREATE_SESSIONDESCRIPTION_FAILED, msg);
+ return;
+ }
+
+ signaling_thread()->Post(this, MSG_CREATE_SESSIONDESCRIPTION_SUCCESS, msg);
+}
+
+void PeerConnection::SetLocalDescription(
+ SetSessionDescriptionObserver* observer,
+ SessionDescriptionInterface* desc) {
+ if (!VERIFY(observer != NULL)) {
+ LOG(LS_ERROR) << "SetLocalDescription - observer is NULL.";
+ return;
+ }
+ if (!desc) {
+ PostSetSessionDescriptionFailure(observer, "SessionDescription is NULL.");
+ return;
+ }
+
+ // Update stats here so that we have the most recent stats for tracks and
+ // streams that might be removed by updating the session description.
+ stats_.UpdateStats();
+ std::string error;
+ if (!session_->SetLocalDescription(desc, &error)) {
+ PostSetSessionDescriptionFailure(observer, error);
+ return;
+ }
+ SetSessionDescriptionMsg* msg = new SetSessionDescriptionMsg(observer);
+ signaling_thread()->Post(this, MSG_SET_SESSIONDESCRIPTION_SUCCESS, msg);
+}
+
+void PeerConnection::SetRemoteDescription(
+ SetSessionDescriptionObserver* observer,
+ SessionDescriptionInterface* desc) {
+ if (!VERIFY(observer != NULL)) {
+ LOG(LS_ERROR) << "SetRemoteDescription - observer is NULL.";
+ return;
+ }
+
+ if (!desc) {
+ PostSetSessionDescriptionFailure(observer, "SessionDescription is NULL.");
+ return;
+ }
+ // Update stats here so that we have the most recent stats for tracks and
+ // streams that might be removed by updating the session description.
+ stats_.UpdateStats();
+ std::string error;
+ if (!session_->SetRemoteDescription(desc, &error)) {
+ PostSetSessionDescriptionFailure(observer, error);
+ return;
+ }
+ SetSessionDescriptionMsg* msg = new SetSessionDescriptionMsg(observer);
+ signaling_thread()->Post(this, MSG_SET_SESSIONDESCRIPTION_SUCCESS, msg);
+}
+
+void PeerConnection::PostSetSessionDescriptionFailure(
+ SetSessionDescriptionObserver* observer,
+ const std::string& error) {
+ SetSessionDescriptionMsg* msg = new SetSessionDescriptionMsg(observer);
+ msg->error = error;
+ signaling_thread()->Post(this, MSG_SET_SESSIONDESCRIPTION_FAILED, msg);
+}
+
+bool PeerConnection::UpdateIce(const IceServers& configuration,
+ const MediaConstraintsInterface* constraints) {
+ // TODO(ronghuawu): Implement UpdateIce.
+ LOG(LS_ERROR) << "UpdateIce is not implemented.";
+ return false;
+}
+
+bool PeerConnection::AddIceCandidate(
+ const IceCandidateInterface* ice_candidate) {
+ return session_->ProcessIceMessage(ice_candidate);
+}
+
+const SessionDescriptionInterface* PeerConnection::local_description() const {
+ return session_->local_description();
+}
+
+const SessionDescriptionInterface* PeerConnection::remote_description() const {
+ return session_->remote_description();
+}
+
+void PeerConnection::Close() {
+ // Update stats here so that we have the most recent stats for tracks and
+ // streams before the channels are closed.
+ stats_.UpdateStats();
+
+ session_->Terminate();
+}
+
+void PeerConnection::OnSessionStateChange(cricket::BaseSession* /*session*/,
+ cricket::BaseSession::State state) {
+ switch (state) {
+ case cricket::BaseSession::STATE_INIT:
+ ChangeSignalingState(PeerConnectionInterface::kStable);
+ case cricket::BaseSession::STATE_SENTINITIATE:
+ ChangeSignalingState(PeerConnectionInterface::kHaveLocalOffer);
+ break;
+ case cricket::BaseSession::STATE_SENTPRACCEPT:
+ ChangeSignalingState(PeerConnectionInterface::kHaveLocalPrAnswer);
+ break;
+ case cricket::BaseSession::STATE_RECEIVEDINITIATE:
+ ChangeSignalingState(PeerConnectionInterface::kHaveRemoteOffer);
+ break;
+ case cricket::BaseSession::STATE_RECEIVEDPRACCEPT:
+ ChangeSignalingState(PeerConnectionInterface::kHaveRemotePrAnswer);
+ break;
+ case cricket::BaseSession::STATE_SENTACCEPT:
+ case cricket::BaseSession::STATE_RECEIVEDACCEPT:
+ ChangeSignalingState(PeerConnectionInterface::kStable);
+ break;
+ case cricket::BaseSession::STATE_RECEIVEDTERMINATE:
+ ChangeSignalingState(PeerConnectionInterface::kClosed);
+ break;
+ default:
+ break;
+ }
+}
+
+void PeerConnection::OnMessage(talk_base::Message* msg) {
+ switch (msg->message_id) {
+ case MSG_CREATE_SESSIONDESCRIPTION_SUCCESS: {
+ CreateSessionDescriptionMsg* param =
+ static_cast<CreateSessionDescriptionMsg*>(msg->pdata);
+ param->observer->OnSuccess(param->description.release());
+ delete param;
+ break;
+ }
+ case MSG_CREATE_SESSIONDESCRIPTION_FAILED: {
+ CreateSessionDescriptionMsg* param =
+ static_cast<CreateSessionDescriptionMsg*>(msg->pdata);
+ param->observer->OnFailure(param->error);
+ delete param;
+ break;
+ }
+ case MSG_SET_SESSIONDESCRIPTION_SUCCESS: {
+ SetSessionDescriptionMsg* param =
+ static_cast<SetSessionDescriptionMsg*>(msg->pdata);
+ param->observer->OnSuccess();
+ delete param;
+ break;
+ }
+ case MSG_SET_SESSIONDESCRIPTION_FAILED: {
+ SetSessionDescriptionMsg* param =
+ static_cast<SetSessionDescriptionMsg*>(msg->pdata);
+ param->observer->OnFailure(param->error);
+ delete param;
+ break;
+ }
+ case MSG_GETSTATS: {
+ GetStatsMsg* param = static_cast<GetStatsMsg*>(msg->pdata);
+ param->observer->OnComplete(param->reports);
+ delete param;
+ break;
+ }
+ case MSG_ICECONNECTIONCHANGE: {
+ observer_->OnIceConnectionChange(ice_connection_state_);
+ break;
+ }
+ case MSG_ICEGATHERINGCHANGE: {
+ observer_->OnIceGatheringChange(ice_gathering_state_);
+ break;
+ }
+ case MSG_ICECANDIDATE: {
+ CandidateMsg* data = static_cast<CandidateMsg*>(msg->pdata);
+ observer_->OnIceCandidate(data->candidate.get());
+ delete data;
+ break;
+ }
+ case MSG_ICECOMPLETE: {
+ observer_->OnIceComplete();
+ break;
+ }
+ default:
+ ASSERT(false && "Not implemented");
+ break;
+ }
+}
+
+void PeerConnection::OnAddRemoteStream(MediaStreamInterface* stream) {
+ stats_.AddStream(stream);
+ observer_->OnAddStream(stream);
+}
+
+void PeerConnection::OnRemoveRemoteStream(MediaStreamInterface* stream) {
+ stream_handler_container_->RemoveRemoteStream(stream);
+ observer_->OnRemoveStream(stream);
+}
+
+void PeerConnection::OnAddDataChannel(DataChannelInterface* data_channel) {
+ observer_->OnDataChannel(DataChannelProxy::Create(signaling_thread(),
+ data_channel));
+}
+
+void PeerConnection::OnAddRemoteAudioTrack(MediaStreamInterface* stream,
+ AudioTrackInterface* audio_track,
+ uint32 ssrc) {
+ stream_handler_container_->AddRemoteAudioTrack(stream, audio_track, ssrc);
+}
+
+void PeerConnection::OnAddRemoteVideoTrack(MediaStreamInterface* stream,
+ VideoTrackInterface* video_track,
+ uint32 ssrc) {
+ stream_handler_container_->AddRemoteVideoTrack(stream, video_track, ssrc);
+}
+
+void PeerConnection::OnRemoveRemoteAudioTrack(
+ MediaStreamInterface* stream,
+ AudioTrackInterface* audio_track) {
+ stream_handler_container_->RemoveRemoteTrack(stream, audio_track);
+}
+
+void PeerConnection::OnRemoveRemoteVideoTrack(
+ MediaStreamInterface* stream,
+ VideoTrackInterface* video_track) {
+ stream_handler_container_->RemoveRemoteTrack(stream, video_track);
+}
+void PeerConnection::OnAddLocalAudioTrack(MediaStreamInterface* stream,
+ AudioTrackInterface* audio_track,
+ uint32 ssrc) {
+ stream_handler_container_->AddLocalAudioTrack(stream, audio_track, ssrc);
+}
+void PeerConnection::OnAddLocalVideoTrack(MediaStreamInterface* stream,
+ VideoTrackInterface* video_track,
+ uint32 ssrc) {
+ stream_handler_container_->AddLocalVideoTrack(stream, video_track, ssrc);
+}
+
+void PeerConnection::OnRemoveLocalAudioTrack(MediaStreamInterface* stream,
+ AudioTrackInterface* audio_track) {
+ stream_handler_container_->RemoveLocalTrack(stream, audio_track);
+}
+
+void PeerConnection::OnRemoveLocalVideoTrack(MediaStreamInterface* stream,
+ VideoTrackInterface* video_track) {
+ stream_handler_container_->RemoveLocalTrack(stream, video_track);
+}
+
+void PeerConnection::OnRemoveLocalStream(MediaStreamInterface* stream) {
+ stream_handler_container_->RemoveLocalStream(stream);
+}
+
+void PeerConnection::OnIceConnectionChange(
+ PeerConnectionInterface::IceConnectionState new_state) {
+ ice_connection_state_ = new_state;
+ signaling_thread()->Post(this, MSG_ICECONNECTIONCHANGE);
+}
+
+void PeerConnection::OnIceGatheringChange(
+ PeerConnectionInterface::IceGatheringState new_state) {
+ if (IsClosed()) {
+ return;
+ }
+ ice_gathering_state_ = new_state;
+ signaling_thread()->Post(this, MSG_ICEGATHERINGCHANGE);
+}
+
+void PeerConnection::OnIceCandidate(const IceCandidateInterface* candidate) {
+ JsepIceCandidate* candidate_copy = NULL;
+ if (candidate) {
+ // TODO(ronghuawu): Make IceCandidateInterface reference counted instead
+ // of making a copy.
+ candidate_copy = new JsepIceCandidate(candidate->sdp_mid(),
+ candidate->sdp_mline_index(),
+ candidate->candidate());
+ }
+ // The Post takes the ownership of the |candidate_copy|.
+ signaling_thread()->Post(this, MSG_ICECANDIDATE,
+ new CandidateMsg(candidate_copy));
+}
+
+void PeerConnection::OnIceComplete() {
+ signaling_thread()->Post(this, MSG_ICECOMPLETE);
+}
+
+void PeerConnection::ChangeSignalingState(
+ PeerConnectionInterface::SignalingState signaling_state) {
+ signaling_state_ = signaling_state;
+ if (signaling_state == kClosed) {
+ ice_connection_state_ = kIceConnectionClosed;
+ observer_->OnIceConnectionChange(ice_connection_state_);
+ if (ice_gathering_state_ != kIceGatheringComplete) {
+ ice_gathering_state_ = kIceGatheringComplete;
+ observer_->OnIceGatheringChange(ice_gathering_state_);
+ }
+ }
+ observer_->OnSignalingChange(signaling_state_);
+ observer_->OnStateChange(PeerConnectionObserver::kSignalingState);
+}
+
+} // namespace webrtc
diff --git a/talk/app/webrtc/peerconnection.h b/talk/app/webrtc/peerconnection.h
new file mode 100644
index 0000000..28aa9d8
--- /dev/null
+++ b/talk/app/webrtc/peerconnection.h
@@ -0,0 +1,192 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_APP_WEBRTC_PEERCONNECTION_H_
+#define TALK_APP_WEBRTC_PEERCONNECTION_H_
+
+#include <string>
+
+#include "talk/app/webrtc/mediastreamsignaling.h"
+#include "talk/app/webrtc/peerconnectioninterface.h"
+#include "talk/app/webrtc/peerconnectionfactory.h"
+#include "talk/app/webrtc/statscollector.h"
+#include "talk/app/webrtc/streamcollection.h"
+#include "talk/app/webrtc/webrtcsession.h"
+#include "talk/base/scoped_ptr.h"
+
+namespace webrtc {
+class MediaStreamHandlerContainer;
+
+typedef std::vector<PortAllocatorFactoryInterface::StunConfiguration>
+ StunConfigurations;
+typedef std::vector<PortAllocatorFactoryInterface::TurnConfiguration>
+ TurnConfigurations;
+
+// PeerConnectionImpl implements the PeerConnection interface.
+// It uses MediaStreamSignaling and WebRtcSession to implement
+// the PeerConnection functionality.
+class PeerConnection : public PeerConnectionInterface,
+ public MediaStreamSignalingObserver,
+ public IceObserver,
+ public talk_base::MessageHandler,
+ public sigslot::has_slots<> {
+ public:
+ explicit PeerConnection(PeerConnectionFactory* factory);
+
+ bool Initialize(const PeerConnectionInterface::IceServers& configuration,
+ const MediaConstraintsInterface* constraints,
+ webrtc::PortAllocatorFactoryInterface* allocator_factory,
+ PeerConnectionObserver* observer);
+ virtual talk_base::scoped_refptr<StreamCollectionInterface> local_streams();
+ virtual talk_base::scoped_refptr<StreamCollectionInterface> remote_streams();
+ virtual bool AddStream(MediaStreamInterface* local_stream,
+ const MediaConstraintsInterface* constraints);
+ virtual void RemoveStream(MediaStreamInterface* local_stream);
+
+ virtual talk_base::scoped_refptr<DtmfSenderInterface> CreateDtmfSender(
+ AudioTrackInterface* track);
+
+ virtual talk_base::scoped_refptr<DataChannelInterface> CreateDataChannel(
+ const std::string& label,
+ const DataChannelInit* config);
+ virtual bool GetStats(StatsObserver* observer,
+ webrtc::MediaStreamTrackInterface* track);
+
+ virtual SignalingState signaling_state();
+
+ // TODO(bemasc): Remove ice_state() when callers are removed.
+ virtual IceState ice_state();
+ virtual IceConnectionState ice_connection_state();
+ virtual IceGatheringState ice_gathering_state();
+
+ virtual const SessionDescriptionInterface* local_description() const;
+ virtual const SessionDescriptionInterface* remote_description() const;
+
+ // JSEP01
+ virtual void CreateOffer(CreateSessionDescriptionObserver* observer,
+ const MediaConstraintsInterface* constraints);
+ virtual void CreateAnswer(CreateSessionDescriptionObserver* observer,
+ const MediaConstraintsInterface* constraints);
+ virtual void SetLocalDescription(SetSessionDescriptionObserver* observer,
+ SessionDescriptionInterface* desc);
+ virtual void SetRemoteDescription(SetSessionDescriptionObserver* observer,
+ SessionDescriptionInterface* desc);
+ virtual bool UpdateIce(const IceServers& configuration,
+ const MediaConstraintsInterface* constraints);
+ virtual bool AddIceCandidate(const IceCandidateInterface* candidate);
+
+ virtual void Close();
+
+ protected:
+ virtual ~PeerConnection();
+
+ private:
+ // Implements MessageHandler.
+ virtual void OnMessage(talk_base::Message* msg);
+
+ // Implements MediaStreamSignalingObserver.
+ virtual void OnAddRemoteStream(MediaStreamInterface* stream) OVERRIDE;
+ virtual void OnRemoveRemoteStream(MediaStreamInterface* stream) OVERRIDE;
+ virtual void OnAddDataChannel(DataChannelInterface* data_channel) OVERRIDE;
+ virtual void OnAddRemoteAudioTrack(MediaStreamInterface* stream,
+ AudioTrackInterface* audio_track,
+ uint32 ssrc) OVERRIDE;
+ virtual void OnAddRemoteVideoTrack(MediaStreamInterface* stream,
+ VideoTrackInterface* video_track,
+ uint32 ssrc) OVERRIDE;
+ virtual void OnRemoveRemoteAudioTrack(
+ MediaStreamInterface* stream,
+ AudioTrackInterface* audio_track) OVERRIDE;
+ virtual void OnRemoveRemoteVideoTrack(
+ MediaStreamInterface* stream,
+ VideoTrackInterface* video_track) OVERRIDE;
+ virtual void OnAddLocalAudioTrack(MediaStreamInterface* stream,
+ AudioTrackInterface* audio_track,
+ uint32 ssrc) OVERRIDE;
+ virtual void OnAddLocalVideoTrack(MediaStreamInterface* stream,
+ VideoTrackInterface* video_track,
+ uint32 ssrc) OVERRIDE;
+ virtual void OnRemoveLocalAudioTrack(
+ MediaStreamInterface* stream,
+ AudioTrackInterface* audio_track) OVERRIDE;
+ virtual void OnRemoveLocalVideoTrack(
+ MediaStreamInterface* stream,
+ VideoTrackInterface* video_track) OVERRIDE;
+ virtual void OnRemoveLocalStream(MediaStreamInterface* stream);
+
+ // Implements IceObserver
+ virtual void OnIceConnectionChange(IceConnectionState new_state);
+ virtual void OnIceGatheringChange(IceGatheringState new_state);
+ virtual void OnIceCandidate(const IceCandidateInterface* candidate);
+ virtual void OnIceComplete();
+
+ // Signals from WebRtcSession.
+ void OnSessionStateChange(cricket::BaseSession* session,
+ cricket::BaseSession::State state);
+ void ChangeSignalingState(SignalingState signaling_state);
+
+ bool DoInitialize(const StunConfigurations& stun_config,
+ const TurnConfigurations& turn_config,
+ const MediaConstraintsInterface* constraints,
+ webrtc::PortAllocatorFactoryInterface* allocator_factory,
+ PeerConnectionObserver* observer);
+
+ talk_base::Thread* signaling_thread() const {
+ return factory_->signaling_thread();
+ }
+
+ void PostSetSessionDescriptionFailure(SetSessionDescriptionObserver* observer,
+ const std::string& error);
+
+ bool IsClosed() const {
+ return signaling_state_ == PeerConnectionInterface::kClosed;
+ }
+
+ // Storing the factory as a scoped reference pointer ensures that the memory
+ // in the PeerConnectionFactoryImpl remains available as long as the
+ // PeerConnection is running. It is passed to PeerConnection as a raw pointer.
+ // However, since the reference counting is done in the
+ // PeerConnectionFactoryInteface all instances created using the raw pointer
+ // will refer to the same reference count.
+ talk_base::scoped_refptr<PeerConnectionFactory> factory_;
+ PeerConnectionObserver* observer_;
+ SignalingState signaling_state_;
+ // TODO(bemasc): Remove ice_state_.
+ IceState ice_state_;
+ IceConnectionState ice_connection_state_;
+ IceGatheringState ice_gathering_state_;
+
+ talk_base::scoped_ptr<cricket::PortAllocator> port_allocator_;
+ talk_base::scoped_ptr<WebRtcSession> session_;
+ talk_base::scoped_ptr<MediaStreamSignaling> mediastream_signaling_;
+ talk_base::scoped_ptr<MediaStreamHandlerContainer> stream_handler_container_;
+ StatsCollector stats_;
+};
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_PEERCONNECTION_H_
diff --git a/talk/app/webrtc/peerconnection_unittest.cc b/talk/app/webrtc/peerconnection_unittest.cc
new file mode 100644
index 0000000..96a9c1c
--- /dev/null
+++ b/talk/app/webrtc/peerconnection_unittest.cc
@@ -0,0 +1,1374 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+
+#include <algorithm>
+#include <list>
+#include <map>
+#include <vector>
+
+#include "talk/app/webrtc/dtmfsender.h"
+#include "talk/app/webrtc/fakeportallocatorfactory.h"
+#include "talk/app/webrtc/localaudiosource.h"
+#include "talk/app/webrtc/mediastreaminterface.h"
+#include "talk/app/webrtc/peerconnectionfactory.h"
+#include "talk/app/webrtc/peerconnectioninterface.h"
+#include "talk/app/webrtc/test/fakeaudiocapturemodule.h"
+#include "talk/app/webrtc/test/fakeconstraints.h"
+#include "talk/app/webrtc/test/fakevideotrackrenderer.h"
+#include "talk/app/webrtc/test/fakeperiodicvideocapturer.h"
+#include "talk/app/webrtc/test/mockpeerconnectionobservers.h"
+#include "talk/app/webrtc/videosourceinterface.h"
+#include "talk/base/gunit.h"
+#include "talk/base/scoped_ptr.h"
+#include "talk/base/ssladapter.h"
+#include "talk/base/sslstreamadapter.h"
+#include "talk/base/thread.h"
+#include "talk/media/webrtc/fakewebrtcvideoengine.h"
+#include "talk/p2p/base/constants.h"
+#include "talk/p2p/base/sessiondescription.h"
+#include "talk/session/media/mediasession.h"
+
+#define MAYBE_SKIP_TEST(feature) \
+ if (!(feature())) { \
+ LOG(LS_INFO) << "Feature disabled... skipping"; \
+ return; \
+ }
+
+using cricket::ContentInfo;
+using cricket::FakeWebRtcVideoDecoder;
+using cricket::FakeWebRtcVideoDecoderFactory;
+using cricket::FakeWebRtcVideoEncoder;
+using cricket::FakeWebRtcVideoEncoderFactory;
+using cricket::MediaContentDescription;
+using webrtc::DataBuffer;
+using webrtc::DataChannelInterface;
+using webrtc::DtmfSender;
+using webrtc::DtmfSenderInterface;
+using webrtc::DtmfSenderObserverInterface;
+using webrtc::FakeConstraints;
+using webrtc::MediaConstraintsInterface;
+using webrtc::MediaStreamTrackInterface;
+using webrtc::MockCreateSessionDescriptionObserver;
+using webrtc::MockDataChannelObserver;
+using webrtc::MockSetSessionDescriptionObserver;
+using webrtc::MockStatsObserver;
+using webrtc::SessionDescriptionInterface;
+using webrtc::StreamCollectionInterface;
+
+static const int kMaxWaitMs = 1000;
+static const int kMaxWaitForStatsMs = 3000;
+static const int kMaxWaitForFramesMs = 5000;
+static const int kEndAudioFrameCount = 3;
+static const int kEndVideoFrameCount = 3;
+
+static const char kStreamLabelBase[] = "stream_label";
+static const char kVideoTrackLabelBase[] = "video_track";
+static const char kAudioTrackLabelBase[] = "audio_track";
+static const char kDataChannelLabel[] = "data_channel";
+
+static void RemoveLinesFromSdp(const std::string& line_start,
+ std::string* sdp) {
+ const char kSdpLineEnd[] = "\r\n";
+ size_t ssrc_pos = 0;
+ while ((ssrc_pos = sdp->find(line_start, ssrc_pos)) !=
+ std::string::npos) {
+ size_t end_ssrc = sdp->find(kSdpLineEnd, ssrc_pos);
+ sdp->erase(ssrc_pos, end_ssrc - ssrc_pos + strlen(kSdpLineEnd));
+ }
+}
+
+class SignalingMessageReceiver {
+ public:
+ protected:
+ SignalingMessageReceiver() {}
+ virtual ~SignalingMessageReceiver() {}
+};
+
+class JsepMessageReceiver : public SignalingMessageReceiver {
+ public:
+ virtual void ReceiveSdpMessage(const std::string& type,
+ std::string& msg) = 0;
+ virtual void ReceiveIceMessage(const std::string& sdp_mid,
+ int sdp_mline_index,
+ const std::string& msg) = 0;
+
+ protected:
+ JsepMessageReceiver() {}
+ virtual ~JsepMessageReceiver() {}
+};
+
+template <typename MessageReceiver>
+class PeerConnectionTestClientBase
+ : public webrtc::PeerConnectionObserver,
+ public MessageReceiver {
+ public:
+ ~PeerConnectionTestClientBase() {
+ while (!fake_video_renderers_.empty()) {
+ RenderMap::iterator it = fake_video_renderers_.begin();
+ delete it->second;
+ fake_video_renderers_.erase(it);
+ }
+ }
+
+ virtual void Negotiate() = 0;
+
+ virtual void Negotiate(bool audio, bool video) = 0;
+
+ virtual void SetVideoConstraints(
+ const webrtc::FakeConstraints& video_constraint) {
+ video_constraints_ = video_constraint;
+ }
+
+ void AddMediaStream(bool audio, bool video) {
+ std::string label = kStreamLabelBase +
+ talk_base::ToString<int>(peer_connection_->local_streams()->count());
+ talk_base::scoped_refptr<webrtc::MediaStreamInterface> stream =
+ peer_connection_factory_->CreateLocalMediaStream(label);
+
+ if (audio && can_receive_audio()) {
+ FakeConstraints constraints;
+ // Disable highpass filter so that we can get all the test audio frames.
+ constraints.AddMandatory(
+ MediaConstraintsInterface::kHighpassFilter, false);
+ talk_base::scoped_refptr<webrtc::LocalAudioSource> source =
+ webrtc::LocalAudioSource::Create(&constraints);
+ // TODO(perkj): Test audio source when it is implemented. Currently audio
+ // always use the default input.
+ talk_base::scoped_refptr<webrtc::AudioTrackInterface> audio_track(
+ peer_connection_factory_->CreateAudioTrack(kAudioTrackLabelBase,
+ source));
+ stream->AddTrack(audio_track);
+ }
+ if (video && can_receive_video()) {
+ stream->AddTrack(CreateLocalVideoTrack(label));
+ }
+
+ EXPECT_TRUE(peer_connection_->AddStream(stream, NULL));
+ }
+
+ size_t NumberOfLocalMediaStreams() {
+ return peer_connection_->local_streams()->count();
+ }
+
+ bool SessionActive() {
+ return peer_connection_->signaling_state() ==
+ webrtc::PeerConnectionInterface::kStable;
+ }
+
+ void set_signaling_message_receiver(
+ MessageReceiver* signaling_message_receiver) {
+ signaling_message_receiver_ = signaling_message_receiver;
+ }
+
+ void EnableVideoDecoderFactory() {
+ video_decoder_factory_enabled_ = true;
+ fake_video_decoder_factory_->AddSupportedVideoCodecType(
+ webrtc::kVideoCodecVP8);
+ }
+
+ bool AudioFramesReceivedCheck(int number_of_frames) const {
+ return number_of_frames <= fake_audio_capture_module_->frames_received();
+ }
+
+ bool VideoFramesReceivedCheck(int number_of_frames) {
+ if (video_decoder_factory_enabled_) {
+ const std::vector<FakeWebRtcVideoDecoder*>& decoders
+ = fake_video_decoder_factory_->decoders();
+ if (decoders.empty()) {
+ return number_of_frames <= 0;
+ }
+
+ for (std::vector<FakeWebRtcVideoDecoder*>::const_iterator
+ it = decoders.begin(); it != decoders.end(); ++it) {
+ if (number_of_frames > (*it)->GetNumFramesReceived()) {
+ return false;
+ }
+ }
+ return true;
+ } else {
+ if (fake_video_renderers_.empty()) {
+ return number_of_frames <= 0;
+ }
+
+ for (RenderMap::const_iterator it = fake_video_renderers_.begin();
+ it != fake_video_renderers_.end(); ++it) {
+ if (number_of_frames > it->second->num_rendered_frames()) {
+ return false;
+ }
+ }
+ return true;
+ }
+ }
+ // Verify the CreateDtmfSender interface
+ void VerifyDtmf() {
+ talk_base::scoped_ptr<DummyDtmfObserver> observer(new DummyDtmfObserver());
+ talk_base::scoped_refptr<DtmfSenderInterface> dtmf_sender;
+
+ // We can't create a DTMF sender with an invalid audio track or a non local
+ // track.
+ EXPECT_TRUE(peer_connection_->CreateDtmfSender(NULL) == NULL);
+ talk_base::scoped_refptr<webrtc::AudioTrackInterface> non_localtrack(
+ peer_connection_factory_->CreateAudioTrack("dummy_track",
+ NULL));
+ EXPECT_TRUE(peer_connection_->CreateDtmfSender(non_localtrack) == NULL);
+
+ // We should be able to create a DTMF sender from a local track.
+ webrtc::AudioTrackInterface* localtrack =
+ peer_connection_->local_streams()->at(0)->GetAudioTracks()[0];
+ dtmf_sender = peer_connection_->CreateDtmfSender(localtrack);
+ EXPECT_TRUE(dtmf_sender.get() != NULL);
+ dtmf_sender->RegisterObserver(observer.get());
+
+ // Test the DtmfSender object just created.
+ EXPECT_TRUE(dtmf_sender->CanInsertDtmf());
+ EXPECT_TRUE(dtmf_sender->InsertDtmf("1a", 100, 50));
+
+ // We don't need to verify that the DTMF tones are actually sent out because
+ // that is already covered by the tests of the lower level components.
+
+ EXPECT_TRUE_WAIT(observer->completed(), kMaxWaitMs);
+ std::vector<std::string> tones;
+ tones.push_back("1");
+ tones.push_back("a");
+ tones.push_back("");
+ observer->Verify(tones);
+
+ dtmf_sender->UnregisterObserver();
+ }
+
+ // Verifies that the SessionDescription have rejected the appropriate media
+ // content.
+ void VerifyRejectedMediaInSessionDescription() {
+ ASSERT_TRUE(peer_connection_->remote_description() != NULL);
+ ASSERT_TRUE(peer_connection_->local_description() != NULL);
+ const cricket::SessionDescription* remote_desc =
+ peer_connection_->remote_description()->description();
+ const cricket::SessionDescription* local_desc =
+ peer_connection_->local_description()->description();
+
+ const ContentInfo* remote_audio_content = GetFirstAudioContent(remote_desc);
+ if (remote_audio_content) {
+ const ContentInfo* audio_content =
+ GetFirstAudioContent(local_desc);
+ EXPECT_EQ(can_receive_audio(), !audio_content->rejected);
+ }
+
+ const ContentInfo* remote_video_content = GetFirstVideoContent(remote_desc);
+ if (remote_video_content) {
+ const ContentInfo* video_content =
+ GetFirstVideoContent(local_desc);
+ EXPECT_EQ(can_receive_video(), !video_content->rejected);
+ }
+ }
+
+ void SetExpectIceRestart(bool expect_restart) {
+ expect_ice_restart_ = expect_restart;
+ }
+
+ bool ExpectIceRestart() const { return expect_ice_restart_; }
+
+ void VerifyLocalIceUfragAndPassword() {
+ ASSERT_TRUE(peer_connection_->local_description() != NULL);
+ const cricket::SessionDescription* desc =
+ peer_connection_->local_description()->description();
+ const cricket::ContentInfos& contents = desc->contents();
+
+ for (size_t index = 0; index < contents.size(); ++index) {
+ if (contents[index].rejected)
+ continue;
+ const cricket::TransportDescription* transport_desc =
+ desc->GetTransportDescriptionByName(contents[index].name);
+
+ std::map<int, IceUfragPwdPair>::const_iterator ufragpair_it =
+ ice_ufrag_pwd_.find(index);
+ if (ufragpair_it == ice_ufrag_pwd_.end()) {
+ ASSERT_FALSE(ExpectIceRestart());
+ ice_ufrag_pwd_[index] = IceUfragPwdPair(transport_desc->ice_ufrag,
+ transport_desc->ice_pwd);
+ } else if (ExpectIceRestart()) {
+ const IceUfragPwdPair& ufrag_pwd = ufragpair_it->second;
+ EXPECT_NE(ufrag_pwd.first, transport_desc->ice_ufrag);
+ EXPECT_NE(ufrag_pwd.second, transport_desc->ice_pwd);
+ } else {
+ const IceUfragPwdPair& ufrag_pwd = ufragpair_it->second;
+ EXPECT_EQ(ufrag_pwd.first, transport_desc->ice_ufrag);
+ EXPECT_EQ(ufrag_pwd.second, transport_desc->ice_pwd);
+ }
+ }
+ }
+
+ int GetAudioOutputLevelStats(webrtc::MediaStreamTrackInterface* track) {
+ talk_base::scoped_refptr<MockStatsObserver>
+ observer(new talk_base::RefCountedObject<MockStatsObserver>());
+ EXPECT_TRUE(peer_connection_->GetStats(observer, track));
+ EXPECT_TRUE_WAIT(observer->called(), kMaxWaitMs);
+ return observer->AudioOutputLevel();
+ }
+
+ int GetAudioInputLevelStats() {
+ talk_base::scoped_refptr<MockStatsObserver>
+ observer(new talk_base::RefCountedObject<MockStatsObserver>());
+ EXPECT_TRUE(peer_connection_->GetStats(observer, NULL));
+ EXPECT_TRUE_WAIT(observer->called(), kMaxWaitMs);
+ return observer->AudioInputLevel();
+ }
+
+ int GetBytesReceivedStats(webrtc::MediaStreamTrackInterface* track) {
+ talk_base::scoped_refptr<MockStatsObserver>
+ observer(new talk_base::RefCountedObject<MockStatsObserver>());
+ EXPECT_TRUE(peer_connection_->GetStats(observer, track));
+ EXPECT_TRUE_WAIT(observer->called(), kMaxWaitMs);
+ return observer->BytesReceived();
+ }
+
+ int GetBytesSentStats(webrtc::MediaStreamTrackInterface* track) {
+ talk_base::scoped_refptr<MockStatsObserver>
+ observer(new talk_base::RefCountedObject<MockStatsObserver>());
+ EXPECT_TRUE(peer_connection_->GetStats(observer, track));
+ EXPECT_TRUE_WAIT(observer->called(), kMaxWaitMs);
+ return observer->BytesSent();
+ }
+
+ int rendered_width() {
+ EXPECT_FALSE(fake_video_renderers_.empty());
+ return fake_video_renderers_.empty() ? 1 :
+ fake_video_renderers_.begin()->second->width();
+ }
+
+ int rendered_height() {
+ EXPECT_FALSE(fake_video_renderers_.empty());
+ return fake_video_renderers_.empty() ? 1 :
+ fake_video_renderers_.begin()->second->height();
+ }
+
+ size_t number_of_remote_streams() {
+ if (!pc())
+ return 0;
+ return pc()->remote_streams()->count();
+ }
+
+ StreamCollectionInterface* remote_streams() {
+ if (!pc()) {
+ ADD_FAILURE();
+ return NULL;
+ }
+ return pc()->remote_streams();
+ }
+
+ StreamCollectionInterface* local_streams() {
+ if (!pc()) {
+ ADD_FAILURE();
+ return NULL;
+ }
+ return pc()->local_streams();
+ }
+
+ webrtc::PeerConnectionInterface::SignalingState signaling_state() {
+ return pc()->signaling_state();
+ }
+
+ webrtc::PeerConnectionInterface::IceConnectionState ice_connection_state() {
+ return pc()->ice_connection_state();
+ }
+
+ webrtc::PeerConnectionInterface::IceGatheringState ice_gathering_state() {
+ return pc()->ice_gathering_state();
+ }
+
+ // PeerConnectionObserver callbacks.
+ virtual void OnError() {}
+ virtual void OnMessage(const std::string&) {}
+ virtual void OnSignalingMessage(const std::string& /*msg*/) {}
+ virtual void OnSignalingChange(
+ webrtc::PeerConnectionInterface::SignalingState new_state) {
+ EXPECT_EQ(peer_connection_->signaling_state(), new_state);
+ }
+ virtual void OnAddStream(webrtc::MediaStreamInterface* media_stream) {
+ for (size_t i = 0; i < media_stream->GetVideoTracks().size(); ++i) {
+ const std::string id = media_stream->GetVideoTracks()[i]->id();
+ ASSERT_TRUE(fake_video_renderers_.find(id) ==
+ fake_video_renderers_.end());
+ fake_video_renderers_[id] = new webrtc::FakeVideoTrackRenderer(
+ media_stream->GetVideoTracks()[i]);
+ }
+ }
+ virtual void OnRemoveStream(webrtc::MediaStreamInterface* media_stream) {}
+ virtual void OnRenegotiationNeeded() {}
+ virtual void OnIceConnectionChange(
+ webrtc::PeerConnectionInterface::IceConnectionState new_state) {
+ EXPECT_EQ(peer_connection_->ice_connection_state(), new_state);
+ }
+ virtual void OnIceGatheringChange(
+ webrtc::PeerConnectionInterface::IceGatheringState new_state) {
+ EXPECT_EQ(peer_connection_->ice_gathering_state(), new_state);
+ }
+ virtual void OnIceCandidate(
+ const webrtc::IceCandidateInterface* /*candidate*/) {}
+
+ webrtc::PeerConnectionInterface* pc() {
+ return peer_connection_.get();
+ }
+
+ protected:
+ explicit PeerConnectionTestClientBase(const std::string& id)
+ : id_(id),
+ expect_ice_restart_(false),
+ fake_video_decoder_factory_(NULL),
+ fake_video_encoder_factory_(NULL),
+ video_decoder_factory_enabled_(false),
+ signaling_message_receiver_(NULL) {
+ }
+ bool Init(const MediaConstraintsInterface* constraints) {
+ EXPECT_TRUE(!peer_connection_);
+ EXPECT_TRUE(!peer_connection_factory_);
+ allocator_factory_ = webrtc::FakePortAllocatorFactory::Create();
+ if (!allocator_factory_) {
+ return false;
+ }
+ audio_thread_.Start();
+ fake_audio_capture_module_ = FakeAudioCaptureModule::Create(
+ &audio_thread_);
+
+ if (fake_audio_capture_module_ == NULL) {
+ return false;
+ }
+ fake_video_decoder_factory_ = new FakeWebRtcVideoDecoderFactory();
+ fake_video_encoder_factory_ = new FakeWebRtcVideoEncoderFactory();
+ peer_connection_factory_ = webrtc::CreatePeerConnectionFactory(
+ talk_base::Thread::Current(), talk_base::Thread::Current(),
+ fake_audio_capture_module_, fake_video_encoder_factory_,
+ fake_video_decoder_factory_);
+ if (!peer_connection_factory_) {
+ return false;
+ }
+ peer_connection_ = CreatePeerConnection(allocator_factory_.get(),
+ constraints);
+ return peer_connection_.get() != NULL;
+ }
+ virtual talk_base::scoped_refptr<webrtc::PeerConnectionInterface>
+ CreatePeerConnection(webrtc::PortAllocatorFactoryInterface* factory,
+ const MediaConstraintsInterface* constraints) = 0;
+ MessageReceiver* signaling_message_receiver() {
+ return signaling_message_receiver_;
+ }
+ webrtc::PeerConnectionFactoryInterface* peer_connection_factory() {
+ return peer_connection_factory_.get();
+ }
+
+ virtual bool can_receive_audio() = 0;
+ virtual bool can_receive_video() = 0;
+ const std::string& id() const { return id_; }
+
+ private:
+ class DummyDtmfObserver : public DtmfSenderObserverInterface {
+ public:
+ DummyDtmfObserver() : completed_(false) {}
+
+ // Implements DtmfSenderObserverInterface.
+ void OnToneChange(const std::string& tone) {
+ tones_.push_back(tone);
+ if (tone.empty()) {
+ completed_ = true;
+ }
+ }
+
+ void Verify(const std::vector<std::string>& tones) const {
+ ASSERT_TRUE(tones_.size() == tones.size());
+ EXPECT_TRUE(std::equal(tones.begin(), tones.end(), tones_.begin()));
+ }
+
+ bool completed() const { return completed_; }
+
+ private:
+ bool completed_;
+ std::vector<std::string> tones_;
+ };
+
+ talk_base::scoped_refptr<webrtc::VideoTrackInterface>
+ CreateLocalVideoTrack(const std::string stream_label) {
+ // Set max frame rate to 10fps to reduce the risk of the tests to be flaky.
+ FakeConstraints source_constraints = video_constraints_;
+ source_constraints.SetMandatoryMaxFrameRate(10);
+
+ talk_base::scoped_refptr<webrtc::VideoSourceInterface> source =
+ peer_connection_factory_->CreateVideoSource(
+ new webrtc::FakePeriodicVideoCapturer(),
+ &source_constraints);
+ std::string label = stream_label + kVideoTrackLabelBase;
+ return peer_connection_factory_->CreateVideoTrack(label, source);
+ }
+
+ std::string id_;
+ // Separate thread for executing |fake_audio_capture_module_| tasks. Audio
+ // processing must not be performed on the same thread as signaling due to
+ // signaling time constraints and relative complexity of the audio pipeline.
+ // This is consistent with the video pipeline that us a a separate thread for
+ // encoding and decoding.
+ talk_base::Thread audio_thread_;
+
+ talk_base::scoped_refptr<webrtc::PortAllocatorFactoryInterface>
+ allocator_factory_;
+ talk_base::scoped_refptr<webrtc::PeerConnectionInterface> peer_connection_;
+ talk_base::scoped_refptr<webrtc::PeerConnectionFactoryInterface>
+ peer_connection_factory_;
+
+ typedef std::pair<std::string, std::string> IceUfragPwdPair;
+ std::map<int, IceUfragPwdPair> ice_ufrag_pwd_;
+ bool expect_ice_restart_;
+
+ // Needed to keep track of number of frames send.
+ talk_base::scoped_refptr<FakeAudioCaptureModule> fake_audio_capture_module_;
+ // Needed to keep track of number of frames received.
+ typedef std::map<std::string, webrtc::FakeVideoTrackRenderer*> RenderMap;
+ RenderMap fake_video_renderers_;
+ // Needed to keep track of number of frames received when external decoder
+ // used.
+ FakeWebRtcVideoDecoderFactory* fake_video_decoder_factory_;
+ FakeWebRtcVideoEncoderFactory* fake_video_encoder_factory_;
+ bool video_decoder_factory_enabled_;
+ webrtc::FakeConstraints video_constraints_;
+
+ // For remote peer communication.
+ MessageReceiver* signaling_message_receiver_;
+};
+
+class JsepTestClient
+ : public PeerConnectionTestClientBase<JsepMessageReceiver> {
+ public:
+ static JsepTestClient* CreateClient(
+ const std::string& id,
+ const MediaConstraintsInterface* constraints) {
+ JsepTestClient* client(new JsepTestClient(id));
+ if (!client->Init(constraints)) {
+ delete client;
+ return NULL;
+ }
+ return client;
+ }
+ ~JsepTestClient() {}
+
+ virtual void Negotiate() {
+ Negotiate(true, true);
+ }
+ virtual void Negotiate(bool audio, bool video) {
+ talk_base::scoped_ptr<SessionDescriptionInterface> offer;
+ EXPECT_TRUE(DoCreateOffer(offer.use()));
+
+ if (offer->description()->GetContentByName("audio")) {
+ offer->description()->GetContentByName("audio")->rejected = !audio;
+ }
+ if (offer->description()->GetContentByName("video")) {
+ offer->description()->GetContentByName("video")->rejected = !video;
+ }
+
+ std::string sdp;
+ EXPECT_TRUE(offer->ToString(&sdp));
+ EXPECT_TRUE(DoSetLocalDescription(offer.release()));
+ signaling_message_receiver()->ReceiveSdpMessage(
+ webrtc::SessionDescriptionInterface::kOffer, sdp);
+ }
+ // JsepMessageReceiver callback.
+ virtual void ReceiveSdpMessage(const std::string& type,
+ std::string& msg) {
+ FilterIncomingSdpMessage(&msg);
+ if (type == webrtc::SessionDescriptionInterface::kOffer) {
+ HandleIncomingOffer(msg);
+ } else {
+ HandleIncomingAnswer(msg);
+ }
+ }
+ // JsepMessageReceiver callback.
+ virtual void ReceiveIceMessage(const std::string& sdp_mid,
+ int sdp_mline_index,
+ const std::string& msg) {
+ LOG(INFO) << id() << "ReceiveIceMessage";
+ talk_base::scoped_ptr<webrtc::IceCandidateInterface> candidate(
+ webrtc::CreateIceCandidate(sdp_mid, sdp_mline_index, msg, NULL));
+ EXPECT_TRUE(pc()->AddIceCandidate(candidate.get()));
+ }
+ // Implements PeerConnectionObserver functions needed by Jsep.
+ virtual void OnIceCandidate(const webrtc::IceCandidateInterface* candidate) {
+ LOG(INFO) << id() << "OnIceCandidate";
+
+ std::string ice_sdp;
+ EXPECT_TRUE(candidate->ToString(&ice_sdp));
+ if (signaling_message_receiver() == NULL) {
+ // Remote party may be deleted.
+ return;
+ }
+ signaling_message_receiver()->ReceiveIceMessage(candidate->sdp_mid(),
+ candidate->sdp_mline_index(), ice_sdp);
+ }
+
+ void IceRestart() {
+ session_description_constraints_.SetMandatoryIceRestart(true);
+ SetExpectIceRestart(true);
+ }
+
+ void SetReceiveAudioVideo(bool audio, bool video) {
+ session_description_constraints_.SetMandatoryReceiveAudio(audio);
+ session_description_constraints_.SetMandatoryReceiveVideo(video);
+ ASSERT_EQ(audio, can_receive_audio());
+ ASSERT_EQ(video, can_receive_video());
+ }
+
+ void RemoveMsidFromReceivedSdp(bool remove) {
+ remove_msid_ = remove;
+ }
+
+ void RemoveSdesCryptoFromReceivedSdp(bool remove) {
+ remove_sdes_ = remove;
+ }
+
+ void RemoveBundleFromReceivedSdp(bool remove) {
+ remove_bundle_ = remove;
+ }
+
+ virtual bool can_receive_audio() {
+ bool value;
+ if (webrtc::FindConstraint(&session_description_constraints_,
+ MediaConstraintsInterface::kOfferToReceiveAudio, &value, NULL)) {
+ return value;
+ }
+ return true;
+ }
+
+ virtual bool can_receive_video() {
+ bool value;
+ if (webrtc::FindConstraint(&session_description_constraints_,
+ MediaConstraintsInterface::kOfferToReceiveVideo, &value, NULL)) {
+ return value;
+ }
+ return true;
+ }
+
+ virtual void OnIceComplete() {
+ LOG(INFO) << id() << "OnIceComplete";
+ }
+
+ virtual void OnDataChannel(DataChannelInterface* data_channel) {
+ LOG(INFO) << id() << "OnDataChannel";
+ data_channel_ = data_channel;
+ data_observer_.reset(new MockDataChannelObserver(data_channel));
+ }
+
+ void CreateDataChannel() {
+ data_channel_ = pc()->CreateDataChannel(kDataChannelLabel,
+ NULL);
+ ASSERT_TRUE(data_channel_.get() != NULL);
+ data_observer_.reset(new MockDataChannelObserver(data_channel_));
+ }
+
+ DataChannelInterface* data_channel() { return data_channel_; }
+ const MockDataChannelObserver* data_observer() const {
+ return data_observer_.get();
+ }
+
+ protected:
+ explicit JsepTestClient(const std::string& id)
+ : PeerConnectionTestClientBase<JsepMessageReceiver>(id),
+ remove_msid_(false),
+ remove_bundle_(false),
+ remove_sdes_(false) {
+ }
+
+ virtual talk_base::scoped_refptr<webrtc::PeerConnectionInterface>
+ CreatePeerConnection(webrtc::PortAllocatorFactoryInterface* factory,
+ const MediaConstraintsInterface* constraints) {
+ // CreatePeerConnection with IceServers.
+ webrtc::PeerConnectionInterface::IceServers ice_servers;
+ webrtc::PeerConnectionInterface::IceServer ice_server;
+ ice_server.uri = "stun:stun.l.google.com:19302";
+ ice_servers.push_back(ice_server);
+ return peer_connection_factory()->CreatePeerConnection(
+ ice_servers, constraints, factory, NULL, this);
+ }
+
+ void HandleIncomingOffer(const std::string& msg) {
+ LOG(INFO) << id() << "HandleIncomingOffer ";
+ if (NumberOfLocalMediaStreams() == 0) {
+ // If we are not sending any streams ourselves it is time to add some.
+ AddMediaStream(true, true);
+ }
+ talk_base::scoped_ptr<SessionDescriptionInterface> desc(
+ webrtc::CreateSessionDescription("offer", msg, NULL));
+ EXPECT_TRUE(DoSetRemoteDescription(desc.release()));
+ talk_base::scoped_ptr<SessionDescriptionInterface> answer;
+ EXPECT_TRUE(DoCreateAnswer(answer.use()));
+ std::string sdp;
+ EXPECT_TRUE(answer->ToString(&sdp));
+ EXPECT_TRUE(DoSetLocalDescription(answer.release()));
+ if (signaling_message_receiver()) {
+ signaling_message_receiver()->ReceiveSdpMessage(
+ webrtc::SessionDescriptionInterface::kAnswer, sdp);
+ }
+ }
+
+ void HandleIncomingAnswer(const std::string& msg) {
+ LOG(INFO) << id() << "HandleIncomingAnswer";
+ talk_base::scoped_ptr<SessionDescriptionInterface> desc(
+ webrtc::CreateSessionDescription("answer", msg, NULL));
+ EXPECT_TRUE(DoSetRemoteDescription(desc.release()));
+ }
+
+ bool DoCreateOfferAnswer(SessionDescriptionInterface** desc,
+ bool offer) {
+ talk_base::scoped_refptr<MockCreateSessionDescriptionObserver>
+ observer(new talk_base::RefCountedObject<
+ MockCreateSessionDescriptionObserver>());
+ if (offer) {
+ pc()->CreateOffer(observer, &session_description_constraints_);
+ } else {
+ pc()->CreateAnswer(observer, &session_description_constraints_);
+ }
+ EXPECT_EQ_WAIT(true, observer->called(), kMaxWaitMs);
+ *desc = observer->release_desc();
+ if (observer->result() && ExpectIceRestart()) {
+ EXPECT_EQ(0u, (*desc)->candidates(0)->count());
+ }
+ return observer->result();
+ }
+
+ bool DoCreateOffer(SessionDescriptionInterface** desc) {
+ return DoCreateOfferAnswer(desc, true);
+ }
+
+ bool DoCreateAnswer(SessionDescriptionInterface** desc) {
+ return DoCreateOfferAnswer(desc, false);
+ }
+
+ bool DoSetLocalDescription(SessionDescriptionInterface* desc) {
+ talk_base::scoped_refptr<MockSetSessionDescriptionObserver>
+ observer(new talk_base::RefCountedObject<
+ MockSetSessionDescriptionObserver>());
+ LOG(INFO) << id() << "SetLocalDescription ";
+ pc()->SetLocalDescription(observer, desc);
+ // Ignore the observer result. If we wait for the result with
+ // EXPECT_TRUE_WAIT, local ice candidates might be sent to the remote peer
+ // before the offer which is an error.
+ // The reason is that EXPECT_TRUE_WAIT uses
+ // talk_base::Thread::Current()->ProcessMessages(1);
+ // ProcessMessages waits at least 1ms but processes all messages before
+ // returning. Since this test is synchronous and send messages to the remote
+ // peer whenever a callback is invoked, this can lead to messages being
+ // sent to the remote peer in the wrong order.
+ // TODO(perkj): Find a way to check the result without risking that the
+ // order of sent messages are changed. Ex- by posting all messages that are
+ // sent to the remote peer.
+ return true;
+ }
+
+ bool DoSetRemoteDescription(SessionDescriptionInterface* desc) {
+ talk_base::scoped_refptr<MockSetSessionDescriptionObserver>
+ observer(new talk_base::RefCountedObject<
+ MockSetSessionDescriptionObserver>());
+ LOG(INFO) << id() << "SetRemoteDescription ";
+ pc()->SetRemoteDescription(observer, desc);
+ EXPECT_TRUE_WAIT(observer->called(), kMaxWaitMs);
+ return observer->result();
+ }
+
+ // This modifies all received SDP messages before they are processed.
+ void FilterIncomingSdpMessage(std::string* sdp) {
+ if (remove_msid_) {
+ const char kSdpSsrcAttribute[] = "a=ssrc:";
+ RemoveLinesFromSdp(kSdpSsrcAttribute, sdp);
+ const char kSdpMsidSupportedAttribute[] = "a=msid-semantic:";
+ RemoveLinesFromSdp(kSdpMsidSupportedAttribute, sdp);
+ }
+ if (remove_bundle_) {
+ const char kSdpBundleAttribute[] = "a=group:BUNDLE";
+ RemoveLinesFromSdp(kSdpBundleAttribute, sdp);
+ }
+ if (remove_sdes_) {
+ const char kSdpSdesCryptoAttribute[] = "a=crypto";
+ RemoveLinesFromSdp(kSdpSdesCryptoAttribute, sdp);
+ }
+ }
+
+ private:
+ webrtc::FakeConstraints session_description_constraints_;
+ bool remove_msid_; // True if MSID should be removed in received SDP.
+ bool remove_bundle_; // True if bundle should be removed in received SDP.
+ bool remove_sdes_; // True if a=crypto should be removed in received SDP.
+
+ talk_base::scoped_refptr<DataChannelInterface> data_channel_;
+ talk_base::scoped_ptr<MockDataChannelObserver> data_observer_;
+};
+
+template <typename SignalingClass>
+class P2PTestConductor : public testing::Test {
+ public:
+ bool SessionActive() {
+ return initiating_client_->SessionActive() &&
+ receiving_client_->SessionActive();
+ }
+ // Return true if the number of frames provided have been received or it is
+ // known that that will never occur (e.g. no frames will be sent or
+ // captured).
+ bool FramesNotPending(int audio_frames_to_receive,
+ int video_frames_to_receive) {
+ return VideoFramesReceivedCheck(video_frames_to_receive) &&
+ AudioFramesReceivedCheck(audio_frames_to_receive);
+ }
+ bool AudioFramesReceivedCheck(int frames_received) {
+ return initiating_client_->AudioFramesReceivedCheck(frames_received) &&
+ receiving_client_->AudioFramesReceivedCheck(frames_received);
+ }
+ bool VideoFramesReceivedCheck(int frames_received) {
+ return initiating_client_->VideoFramesReceivedCheck(frames_received) &&
+ receiving_client_->VideoFramesReceivedCheck(frames_received);
+ }
+ void VerifyDtmf() {
+ initiating_client_->VerifyDtmf();
+ receiving_client_->VerifyDtmf();
+ }
+
+ void TestUpdateOfferWithRejectedContent() {
+ initiating_client_->Negotiate(true, false);
+ EXPECT_TRUE_WAIT(
+ FramesNotPending(kEndAudioFrameCount * 2, kEndVideoFrameCount),
+ kMaxWaitForFramesMs);
+ // There shouldn't be any more video frame after the new offer is
+ // negotiated.
+ EXPECT_FALSE(VideoFramesReceivedCheck(kEndVideoFrameCount + 1));
+ }
+
+ void VerifyRenderedSize(int width, int height) {
+ EXPECT_EQ(width, receiving_client()->rendered_width());
+ EXPECT_EQ(height, receiving_client()->rendered_height());
+ EXPECT_EQ(width, initializing_client()->rendered_width());
+ EXPECT_EQ(height, initializing_client()->rendered_height());
+ }
+
+ void VerifySessionDescriptions() {
+ initiating_client_->VerifyRejectedMediaInSessionDescription();
+ receiving_client_->VerifyRejectedMediaInSessionDescription();
+ initiating_client_->VerifyLocalIceUfragAndPassword();
+ receiving_client_->VerifyLocalIceUfragAndPassword();
+ }
+
+ P2PTestConductor() {
+ talk_base::InitializeSSL(NULL);
+ }
+ ~P2PTestConductor() {
+ if (initiating_client_) {
+ initiating_client_->set_signaling_message_receiver(NULL);
+ }
+ if (receiving_client_) {
+ receiving_client_->set_signaling_message_receiver(NULL);
+ }
+ }
+
+ bool CreateTestClients() {
+ return CreateTestClients(NULL, NULL);
+ }
+
+ bool CreateTestClients(MediaConstraintsInterface* init_constraints,
+ MediaConstraintsInterface* recv_constraints) {
+ initiating_client_.reset(SignalingClass::CreateClient("Caller: ",
+ init_constraints));
+ receiving_client_.reset(SignalingClass::CreateClient("Callee: ",
+ recv_constraints));
+ if (!initiating_client_ || !receiving_client_) {
+ return false;
+ }
+ initiating_client_->set_signaling_message_receiver(receiving_client_.get());
+ receiving_client_->set_signaling_message_receiver(initiating_client_.get());
+ return true;
+ }
+
+ void SetVideoConstraints(const webrtc::FakeConstraints& init_constraints,
+ const webrtc::FakeConstraints& recv_constraints) {
+ initiating_client_->SetVideoConstraints(init_constraints);
+ receiving_client_->SetVideoConstraints(recv_constraints);
+ }
+
+ void EnableVideoDecoderFactory() {
+ initiating_client_->EnableVideoDecoderFactory();
+ receiving_client_->EnableVideoDecoderFactory();
+ }
+
+ // This test sets up a call between two parties. Both parties send static
+ // frames to each other. Once the test is finished the number of sent frames
+ // is compared to the number of received frames.
+ void LocalP2PTest() {
+ if (initiating_client_->NumberOfLocalMediaStreams() == 0) {
+ initiating_client_->AddMediaStream(true, true);
+ }
+ initiating_client_->Negotiate();
+ const int kMaxWaitForActivationMs = 5000;
+ // Assert true is used here since next tests are guaranteed to fail and
+ // would eat up 5 seconds.
+ ASSERT_TRUE_WAIT(SessionActive(), kMaxWaitForActivationMs);
+ VerifySessionDescriptions();
+
+
+ int audio_frame_count = kEndAudioFrameCount;
+ // TODO(ronghuawu): Add test to cover the case of sendonly and recvonly.
+ if (!initiating_client_->can_receive_audio() ||
+ !receiving_client_->can_receive_audio()) {
+ audio_frame_count = -1;
+ }
+ int video_frame_count = kEndVideoFrameCount;
+ if (!initiating_client_->can_receive_video() ||
+ !receiving_client_->can_receive_video()) {
+ video_frame_count = -1;
+ }
+
+ if (audio_frame_count != -1 || video_frame_count != -1) {
+ // Audio or video is expected to flow, so both sides should get to the
+ // Connected state.
+ // Note: These tests have been observed to fail under heavy load at
+ // shorter timeouts, so they may be flaky.
+ EXPECT_EQ_WAIT(
+ webrtc::PeerConnectionInterface::kIceConnectionConnected,
+ initiating_client_->ice_connection_state(),
+ kMaxWaitForFramesMs);
+ EXPECT_EQ_WAIT(
+ webrtc::PeerConnectionInterface::kIceConnectionConnected,
+ receiving_client_->ice_connection_state(),
+ kMaxWaitForFramesMs);
+ }
+
+ if (initiating_client_->can_receive_audio() ||
+ initiating_client_->can_receive_video()) {
+ // The initiating client can receive media, so it must produce candidates
+ // that will serve as destinations for that media.
+ // TODO(bemasc): Understand why the state is not already Complete here, as
+ // seems to be the case for the receiving client. This may indicate a bug
+ // in the ICE gathering system.
+ EXPECT_NE(webrtc::PeerConnectionInterface::kIceGatheringNew,
+ initiating_client_->ice_gathering_state());
+ }
+ if (receiving_client_->can_receive_audio() ||
+ receiving_client_->can_receive_video()) {
+ EXPECT_EQ_WAIT(webrtc::PeerConnectionInterface::kIceGatheringComplete,
+ receiving_client_->ice_gathering_state(),
+ kMaxWaitForFramesMs);
+ }
+
+ EXPECT_TRUE_WAIT(FramesNotPending(audio_frame_count, video_frame_count),
+ kMaxWaitForFramesMs);
+ }
+
+ SignalingClass* initializing_client() { return initiating_client_.get(); }
+ SignalingClass* receiving_client() { return receiving_client_.get(); }
+
+ private:
+ talk_base::scoped_ptr<SignalingClass> initiating_client_;
+ talk_base::scoped_ptr<SignalingClass> receiving_client_;
+};
+typedef P2PTestConductor<JsepTestClient> JsepPeerConnectionP2PTestClient;
+
+// This test sets up a Jsep call between two parties and test Dtmf.
+TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestDtmf) {
+ ASSERT_TRUE(CreateTestClients());
+ LocalP2PTest();
+ VerifyDtmf();
+}
+
+// This test sets up a Jsep call between two parties and test that we can get a
+// video aspect ratio of 16:9.
+TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTest16To9) {
+ ASSERT_TRUE(CreateTestClients());
+ FakeConstraints constraint;
+ double requested_ratio = 640.0/360;
+ constraint.SetMandatoryMinAspectRatio(requested_ratio);
+ SetVideoConstraints(constraint, constraint);
+ LocalP2PTest();
+
+ ASSERT_LE(0, initializing_client()->rendered_height());
+ double initiating_video_ratio =
+ static_cast<double> (initializing_client()->rendered_width()) /
+ initializing_client()->rendered_height();
+ EXPECT_LE(requested_ratio, initiating_video_ratio);
+
+ ASSERT_LE(0, receiving_client()->rendered_height());
+ double receiving_video_ratio =
+ static_cast<double> (receiving_client()->rendered_width()) /
+ receiving_client()->rendered_height();
+ EXPECT_LE(requested_ratio, receiving_video_ratio);
+}
+
+// This test sets up a Jsep call between two parties and test that the
+// received video has a resolution of 1280*720.
+// TODO(mallinath): Enable when
+// http://code.google.com/p/webrtc/issues/detail?id=981 is fixed.
+TEST_F(JsepPeerConnectionP2PTestClient, DISABLED_LocalP2PTest1280By720) {
+ ASSERT_TRUE(CreateTestClients());
+ FakeConstraints constraint;
+ constraint.SetMandatoryMinWidth(1280);
+ constraint.SetMandatoryMinHeight(720);
+ SetVideoConstraints(constraint, constraint);
+ LocalP2PTest();
+ VerifyRenderedSize(1280, 720);
+}
+
+// This test sets up a call between two endpoints that are configured to use
+// DTLS key agreement. As a result, DTLS is negotiated and used for transport.
+TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestDtls) {
+ MAYBE_SKIP_TEST(talk_base::SSLStreamAdapter::HaveDtlsSrtp);
+ FakeConstraints setup_constraints;
+ setup_constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp,
+ true);
+ ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints));
+ LocalP2PTest();
+ VerifyRenderedSize(640, 480);
+}
+
+// This test sets up a call between an endpoint configured to use either SDES or
+// DTLS (the offerer) and just SDES (the answerer). As a result, SDES is used
+// instead of DTLS.
+TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestOfferDtlsToSdes) {
+ MAYBE_SKIP_TEST(talk_base::SSLStreamAdapter::HaveDtlsSrtp);
+ FakeConstraints setup_constraints;
+ setup_constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp,
+ true);
+ ASSERT_TRUE(CreateTestClients(&setup_constraints, NULL));
+ LocalP2PTest();
+ VerifyRenderedSize(640, 480);
+}
+
+// This test sets up a call between an endpoint configured to use SDES
+// (the offerer) and either SDES or DTLS (the answerer). As a result, SDES is
+// used instead of DTLS.
+TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestOfferSdesToDtls) {
+ MAYBE_SKIP_TEST(talk_base::SSLStreamAdapter::HaveDtlsSrtp);
+ FakeConstraints setup_constraints;
+ setup_constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp,
+ true);
+ ASSERT_TRUE(CreateTestClients(NULL, &setup_constraints));
+ LocalP2PTest();
+ VerifyRenderedSize(640, 480);
+}
+
+// This test sets up a call between two endpoints that are configured to use
+// DTLS key agreement. The offerer don't support SDES. As a result, DTLS is
+// negotiated and used for transport.
+TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestOfferDtlsButNotSdes) {
+ MAYBE_SKIP_TEST(talk_base::SSLStreamAdapter::HaveDtlsSrtp);
+ FakeConstraints setup_constraints;
+ setup_constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp,
+ true);
+ ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints));
+ receiving_client()->RemoveSdesCryptoFromReceivedSdp(true);
+ LocalP2PTest();
+ VerifyRenderedSize(640, 480);
+}
+
+// This test sets up a Jsep call between two parties, and the callee only
+// accept to receive video.
+TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestAnswerVideo) {
+ ASSERT_TRUE(CreateTestClients());
+ receiving_client()->SetReceiveAudioVideo(false, true);
+ LocalP2PTest();
+}
+
+// This test sets up a Jsep call between two parties, and the callee only
+// accept to receive audio.
+TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestAnswerAudio) {
+ ASSERT_TRUE(CreateTestClients());
+ receiving_client()->SetReceiveAudioVideo(true, false);
+ LocalP2PTest();
+}
+
+// This test sets up a Jsep call between two parties, and the callee reject both
+// audio and video.
+TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestAnswerNone) {
+ ASSERT_TRUE(CreateTestClients());
+ receiving_client()->SetReceiveAudioVideo(false, false);
+ LocalP2PTest();
+}
+
+// This test sets up an audio and video call between two parties. After the call
+// runs for a while (10 frames), the caller sends an update offer with video
+// being rejected. Once the re-negotiation is done, the video flow should stop
+// and the audio flow should continue.
+TEST_F(JsepPeerConnectionP2PTestClient, UpdateOfferWithRejectedContent) {
+ ASSERT_TRUE(CreateTestClients());
+ LocalP2PTest();
+ TestUpdateOfferWithRejectedContent();
+}
+
+// This test sets up a Jsep call between two parties. The MSID is removed from
+// the SDP strings from the caller.
+TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestWithoutMsid) {
+ ASSERT_TRUE(CreateTestClients());
+ receiving_client()->RemoveMsidFromReceivedSdp(true);
+ // TODO(perkj): Currently there is a bug that cause audio to stop playing if
+ // audio and video is muxed when MSID is disabled. Remove
+ // SetRemoveBundleFromSdp once
+ // https://code.google.com/p/webrtc/issues/detail?id=1193 is fixed.
+ receiving_client()->RemoveBundleFromReceivedSdp(true);
+ LocalP2PTest();
+}
+
+// This test sets up a Jsep call between two parties and the initiating peer
+// sends two steams.
+// TODO(perkj): Disabled due to
+// https://code.google.com/p/webrtc/issues/detail?id=1454
+TEST_F(JsepPeerConnectionP2PTestClient, DISABLED_LocalP2PTestTwoStreams) {
+ ASSERT_TRUE(CreateTestClients());
+ // Set optional video constraint to max 320pixels to decrease CPU usage.
+ FakeConstraints constraint;
+ constraint.SetOptionalMaxWidth(320);
+ SetVideoConstraints(constraint, constraint);
+ initializing_client()->AddMediaStream(true, true);
+ initializing_client()->AddMediaStream(false, true);
+ ASSERT_EQ(2u, initializing_client()->NumberOfLocalMediaStreams());
+ LocalP2PTest();
+ EXPECT_EQ(2u, receiving_client()->number_of_remote_streams());
+}
+
+// Test that we can receive the audio output level from a remote audio track.
+TEST_F(JsepPeerConnectionP2PTestClient, GetAudioOutputLevelStats) {
+ ASSERT_TRUE(CreateTestClients());
+ LocalP2PTest();
+
+ StreamCollectionInterface* remote_streams =
+ initializing_client()->remote_streams();
+ ASSERT_GT(remote_streams->count(), 0u);
+ ASSERT_GT(remote_streams->at(0)->GetAudioTracks().size(), 0u);
+ MediaStreamTrackInterface* remote_audio_track =
+ remote_streams->at(0)->GetAudioTracks()[0];
+
+ // Get the audio output level stats. Note that the level is not available
+ // until a RTCP packet has been received.
+ EXPECT_TRUE_WAIT(
+ initializing_client()->GetAudioOutputLevelStats(remote_audio_track) > 0,
+ kMaxWaitForStatsMs);
+}
+
+// Test that an audio input level is reported.
+TEST_F(JsepPeerConnectionP2PTestClient, GetAudioInputLevelStats) {
+ ASSERT_TRUE(CreateTestClients());
+ LocalP2PTest();
+
+ // Get the audio input level stats. The level should be available very
+ // soon after the test starts.
+ EXPECT_TRUE_WAIT(initializing_client()->GetAudioInputLevelStats() > 0,
+ kMaxWaitForStatsMs);
+}
+
+// Test that we can get incoming byte counts from both audio and video tracks.
+TEST_F(JsepPeerConnectionP2PTestClient, GetBytesReceivedStats) {
+ ASSERT_TRUE(CreateTestClients());
+ LocalP2PTest();
+
+ StreamCollectionInterface* remote_streams =
+ initializing_client()->remote_streams();
+ ASSERT_GT(remote_streams->count(), 0u);
+ ASSERT_GT(remote_streams->at(0)->GetAudioTracks().size(), 0u);
+ MediaStreamTrackInterface* remote_audio_track =
+ remote_streams->at(0)->GetAudioTracks()[0];
+ EXPECT_TRUE_WAIT(
+ initializing_client()->GetBytesReceivedStats(remote_audio_track) > 0,
+ kMaxWaitForStatsMs);
+
+ MediaStreamTrackInterface* remote_video_track =
+ remote_streams->at(0)->GetVideoTracks()[0];
+ EXPECT_TRUE_WAIT(
+ initializing_client()->GetBytesReceivedStats(remote_video_track) > 0,
+ kMaxWaitForStatsMs);
+}
+
+// Test that we can get outgoing byte counts from both audio and video tracks.
+TEST_F(JsepPeerConnectionP2PTestClient, GetBytesSentStats) {
+ ASSERT_TRUE(CreateTestClients());
+ LocalP2PTest();
+
+ StreamCollectionInterface* local_streams =
+ initializing_client()->local_streams();
+ ASSERT_GT(local_streams->count(), 0u);
+ ASSERT_GT(local_streams->at(0)->GetAudioTracks().size(), 0u);
+ MediaStreamTrackInterface* local_audio_track =
+ local_streams->at(0)->GetAudioTracks()[0];
+ EXPECT_TRUE_WAIT(
+ initializing_client()->GetBytesSentStats(local_audio_track) > 0,
+ kMaxWaitForStatsMs);
+
+ MediaStreamTrackInterface* local_video_track =
+ local_streams->at(0)->GetVideoTracks()[0];
+ EXPECT_TRUE_WAIT(
+ initializing_client()->GetBytesSentStats(local_video_track) > 0,
+ kMaxWaitForStatsMs);
+}
+
+// This test sets up a call between two parties with audio, video and data.
+TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestDataChannel) {
+ FakeConstraints setup_constraints;
+ setup_constraints.SetAllowRtpDataChannels();
+ ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints));
+ initializing_client()->CreateDataChannel();
+ LocalP2PTest();
+ ASSERT_TRUE(initializing_client()->data_channel() != NULL);
+ ASSERT_TRUE(receiving_client()->data_channel() != NULL);
+ EXPECT_TRUE_WAIT(initializing_client()->data_observer()->IsOpen(),
+ kMaxWaitMs);
+ EXPECT_TRUE_WAIT(receiving_client()->data_observer()->IsOpen(),
+ kMaxWaitMs);
+
+ std::string data = "hello world";
+ initializing_client()->data_channel()->Send(DataBuffer(data));
+ EXPECT_EQ_WAIT(data, receiving_client()->data_observer()->last_message(),
+ kMaxWaitMs);
+ receiving_client()->data_channel()->Send(DataBuffer(data));
+ EXPECT_EQ_WAIT(data, initializing_client()->data_observer()->last_message(),
+ kMaxWaitMs);
+
+ receiving_client()->data_channel()->Close();
+ // Send new offer and answer.
+ receiving_client()->Negotiate();
+ EXPECT_FALSE(initializing_client()->data_observer()->IsOpen());
+ EXPECT_FALSE(receiving_client()->data_observer()->IsOpen());
+}
+
+// This test sets up a call between two parties and creates a data channel.
+// The test tests that received data is buffered unless an observer has been
+// registered.
+// Rtp data channels can receive data before the underlying
+// transport has detected that a channel is writable and thus data can be
+// received before the data channel state changes to open. That is hard to test
+// but the same buffering is used in that case.
+TEST_F(JsepPeerConnectionP2PTestClient, RegisterDataChannelObserver) {
+ FakeConstraints setup_constraints;
+ setup_constraints.SetAllowRtpDataChannels();
+ ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints));
+ initializing_client()->CreateDataChannel();
+ initializing_client()->Negotiate();
+
+ ASSERT_TRUE(initializing_client()->data_channel() != NULL);
+ ASSERT_TRUE(receiving_client()->data_channel() != NULL);
+ EXPECT_TRUE_WAIT(initializing_client()->data_observer()->IsOpen(),
+ kMaxWaitMs);
+ EXPECT_EQ_WAIT(DataChannelInterface::kOpen,
+ receiving_client()->data_channel()->state(), kMaxWaitMs);
+
+ // Unregister the existing observer.
+ receiving_client()->data_channel()->UnregisterObserver();
+ std::string data = "hello world";
+ initializing_client()->data_channel()->Send(DataBuffer(data));
+ // Wait a while to allow the sent data to arrive before an observer is
+ // registered..
+ talk_base::Thread::Current()->ProcessMessages(100);
+
+ MockDataChannelObserver new_observer(receiving_client()->data_channel());
+ EXPECT_EQ_WAIT(data, new_observer.last_message(), kMaxWaitMs);
+}
+
+// This test sets up a call between two parties with audio, video and but only
+// the initiating client support data.
+TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestReceiverDoesntSupportData) {
+ FakeConstraints setup_constraints;
+ setup_constraints.SetAllowRtpDataChannels();
+ ASSERT_TRUE(CreateTestClients(&setup_constraints, NULL));
+ initializing_client()->CreateDataChannel();
+ LocalP2PTest();
+ EXPECT_TRUE(initializing_client()->data_channel() != NULL);
+ EXPECT_FALSE(receiving_client()->data_channel());
+ EXPECT_FALSE(initializing_client()->data_observer()->IsOpen());
+}
+
+// This test sets up a call between two parties with audio, video. When audio
+// and video is setup and flowing and data channel is negotiated.
+TEST_F(JsepPeerConnectionP2PTestClient, AddDataChannelAfterRenegotiation) {
+ FakeConstraints setup_constraints;
+ setup_constraints.SetAllowRtpDataChannels();
+ ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints));
+ LocalP2PTest();
+ initializing_client()->CreateDataChannel();
+ // Send new offer and answer.
+ initializing_client()->Negotiate();
+ ASSERT_TRUE(initializing_client()->data_channel() != NULL);
+ ASSERT_TRUE(receiving_client()->data_channel() != NULL);
+ EXPECT_TRUE_WAIT(initializing_client()->data_observer()->IsOpen(),
+ kMaxWaitMs);
+ EXPECT_TRUE_WAIT(receiving_client()->data_observer()->IsOpen(),
+ kMaxWaitMs);
+}
+
+// This test sets up a call between two parties with audio, and video.
+// During the call, the initializing side restart ice and the test verifies that
+// new ice candidates are generated and audio and video still can flow.
+TEST_F(JsepPeerConnectionP2PTestClient, IceRestart) {
+ ASSERT_TRUE(CreateTestClients());
+
+ // Negotiate and wait for ice completion and make sure audio and video plays.
+ LocalP2PTest();
+
+ // Create a SDP string of the first audio candidate for both clients.
+ const webrtc::IceCandidateCollection* audio_candidates_initiator =
+ initializing_client()->pc()->local_description()->candidates(0);
+ const webrtc::IceCandidateCollection* audio_candidates_receiver =
+ receiving_client()->pc()->local_description()->candidates(0);
+ ASSERT_GT(audio_candidates_initiator->count(), 0u);
+ ASSERT_GT(audio_candidates_receiver->count(), 0u);
+ std::string initiator_candidate;
+ EXPECT_TRUE(
+ audio_candidates_initiator->at(0)->ToString(&initiator_candidate));
+ std::string receiver_candidate;
+ EXPECT_TRUE(audio_candidates_receiver->at(0)->ToString(&receiver_candidate));
+
+ // Restart ice on the initializing client.
+ receiving_client()->SetExpectIceRestart(true);
+ initializing_client()->IceRestart();
+
+ // Negotiate and wait for ice completion again and make sure audio and video
+ // plays.
+ LocalP2PTest();
+
+ // Create a SDP string of the first audio candidate for both clients again.
+ const webrtc::IceCandidateCollection* audio_candidates_initiator_restart =
+ initializing_client()->pc()->local_description()->candidates(0);
+ const webrtc::IceCandidateCollection* audio_candidates_reciever_restart =
+ receiving_client()->pc()->local_description()->candidates(0);
+ ASSERT_GT(audio_candidates_initiator_restart->count(), 0u);
+ ASSERT_GT(audio_candidates_reciever_restart->count(), 0u);
+ std::string initiator_candidate_restart;
+ EXPECT_TRUE(audio_candidates_initiator_restart->at(0)->ToString(
+ &initiator_candidate_restart));
+ std::string receiver_candidate_restart;
+ EXPECT_TRUE(audio_candidates_reciever_restart->at(0)->ToString(
+ &receiver_candidate_restart));
+
+ // Verify that the first candidates in the local session descriptions has
+ // changed.
+ EXPECT_NE(initiator_candidate, initiator_candidate_restart);
+ EXPECT_NE(receiver_candidate, receiver_candidate_restart);
+}
+
+
+// This test sets up a Jsep call between two parties with external
+// VideoDecoderFactory.
+TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestWithVideoDecoderFactory) {
+ ASSERT_TRUE(CreateTestClients());
+ EnableVideoDecoderFactory();
+ LocalP2PTest();
+}
diff --git a/talk/app/webrtc/peerconnectionfactory.cc b/talk/app/webrtc/peerconnectionfactory.cc
new file mode 100644
index 0000000..7ae5a3b
--- /dev/null
+++ b/talk/app/webrtc/peerconnectionfactory.cc
@@ -0,0 +1,369 @@
+/*
+ * libjingle
+ * Copyright 2004--2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/peerconnectionfactory.h"
+
+#include "talk/app/webrtc/audiotrack.h"
+#include "talk/app/webrtc/localaudiosource.h"
+#include "talk/app/webrtc/localvideosource.h"
+#include "talk/app/webrtc/mediastreamproxy.h"
+#include "talk/app/webrtc/mediastreamtrackproxy.h"
+#include "talk/app/webrtc/peerconnection.h"
+#include "talk/app/webrtc/peerconnectionproxy.h"
+#include "talk/app/webrtc/portallocatorfactory.h"
+#include "talk/app/webrtc/videosourceproxy.h"
+#include "talk/app/webrtc/videotrack.h"
+#include "talk/media/devices/dummydevicemanager.h"
+#include "talk/media/webrtc/webrtcmediaengine.h"
+#include "talk/media/webrtc/webrtcvideodecoderfactory.h"
+#include "talk/media/webrtc/webrtcvideoencoderfactory.h"
+#include "webrtc/modules/audio_device/include/audio_device.h"
+
+using talk_base::scoped_refptr;
+
+namespace {
+
+typedef talk_base::TypedMessageData<bool> InitMessageData;
+
+struct CreatePeerConnectionParams : public talk_base::MessageData {
+ CreatePeerConnectionParams(
+ const webrtc::PeerConnectionInterface::IceServers& configuration,
+ const webrtc::MediaConstraintsInterface* constraints,
+ webrtc::PortAllocatorFactoryInterface* allocator_factory,
+ webrtc::PeerConnectionObserver* observer)
+ : configuration(configuration),
+ constraints(constraints),
+ allocator_factory(allocator_factory),
+ observer(observer) {
+ }
+ scoped_refptr<webrtc::PeerConnectionInterface> peerconnection;
+ const webrtc::PeerConnectionInterface::IceServers& configuration;
+ const webrtc::MediaConstraintsInterface* constraints;
+ scoped_refptr<webrtc::PortAllocatorFactoryInterface> allocator_factory;
+ webrtc::PeerConnectionObserver* observer;
+};
+
+struct CreatePeerConnectionParamsDeprecated : public talk_base::MessageData {
+ CreatePeerConnectionParamsDeprecated(
+ const std::string& configuration,
+ webrtc::PortAllocatorFactoryInterface* allocator_factory,
+ webrtc::PeerConnectionObserver* observer)
+ : configuration(configuration),
+ allocator_factory(allocator_factory),
+ observer(observer) {
+ }
+ scoped_refptr<webrtc::PeerConnectionInterface> peerconnection;
+ const std::string& configuration;
+ scoped_refptr<webrtc::PortAllocatorFactoryInterface> allocator_factory;
+ webrtc::PeerConnectionObserver* observer;
+};
+
+struct CreateAudioSourceParams : public talk_base::MessageData {
+ explicit CreateAudioSourceParams(
+ const webrtc::MediaConstraintsInterface* constraints)
+ : constraints(constraints) {
+ }
+ const webrtc::MediaConstraintsInterface* constraints;
+ scoped_refptr<webrtc::AudioSourceInterface> source;
+};
+
+struct CreateVideoSourceParams : public talk_base::MessageData {
+ CreateVideoSourceParams(cricket::VideoCapturer* capturer,
+ const webrtc::MediaConstraintsInterface* constraints)
+ : capturer(capturer),
+ constraints(constraints) {
+ }
+ cricket::VideoCapturer* capturer;
+ const webrtc::MediaConstraintsInterface* constraints;
+ scoped_refptr<webrtc::VideoSourceInterface> source;
+};
+
+enum {
+ MSG_INIT_FACTORY = 1,
+ MSG_TERMINATE_FACTORY,
+ MSG_CREATE_PEERCONNECTION,
+ MSG_CREATE_AUDIOSOURCE,
+ MSG_CREATE_VIDEOSOURCE,
+};
+
+} // namespace
+
+namespace webrtc {
+
+scoped_refptr<PeerConnectionFactoryInterface>
+CreatePeerConnectionFactory() {
+ scoped_refptr<PeerConnectionFactory> pc_factory(
+ new talk_base::RefCountedObject<PeerConnectionFactory>());
+
+ if (!pc_factory->Initialize()) {
+ return NULL;
+ }
+ return pc_factory;
+}
+
+scoped_refptr<PeerConnectionFactoryInterface>
+CreatePeerConnectionFactory(
+ talk_base::Thread* worker_thread,
+ talk_base::Thread* signaling_thread,
+ AudioDeviceModule* default_adm,
+ cricket::WebRtcVideoEncoderFactory* encoder_factory,
+ cricket::WebRtcVideoDecoderFactory* decoder_factory) {
+ scoped_refptr<PeerConnectionFactory> pc_factory(
+ new talk_base::RefCountedObject<PeerConnectionFactory>(
+ worker_thread, signaling_thread, default_adm,
+ encoder_factory, decoder_factory));
+ if (!pc_factory->Initialize()) {
+ return NULL;
+ }
+ return pc_factory;
+}
+
+PeerConnectionFactory::PeerConnectionFactory()
+ : owns_ptrs_(true),
+ signaling_thread_(new talk_base::Thread),
+ worker_thread_(new talk_base::Thread) {
+ bool result = signaling_thread_->Start();
+ ASSERT(result);
+ result = worker_thread_->Start();
+ ASSERT(result);
+}
+
+PeerConnectionFactory::PeerConnectionFactory(
+ talk_base::Thread* worker_thread,
+ talk_base::Thread* signaling_thread,
+ AudioDeviceModule* default_adm,
+ cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
+ cricket::WebRtcVideoDecoderFactory* video_decoder_factory)
+ : owns_ptrs_(false),
+ signaling_thread_(signaling_thread),
+ worker_thread_(worker_thread),
+ default_adm_(default_adm),
+ video_encoder_factory_(video_encoder_factory),
+ video_decoder_factory_(video_decoder_factory) {
+ ASSERT(worker_thread != NULL);
+ ASSERT(signaling_thread != NULL);
+ // TODO: Currently there is no way creating an external adm in
+ // libjingle source tree. So we can 't currently assert if this is NULL.
+ // ASSERT(default_adm != NULL);
+}
+
+PeerConnectionFactory::~PeerConnectionFactory() {
+ signaling_thread_->Clear(this);
+ signaling_thread_->Send(this, MSG_TERMINATE_FACTORY);
+ if (owns_ptrs_) {
+ delete signaling_thread_;
+ delete worker_thread_;
+ }
+}
+
+bool PeerConnectionFactory::Initialize() {
+ InitMessageData result(false);
+ signaling_thread_->Send(this, MSG_INIT_FACTORY, &result);
+ return result.data();
+}
+
+void PeerConnectionFactory::OnMessage(talk_base::Message* msg) {
+ switch (msg->message_id) {
+ case MSG_INIT_FACTORY: {
+ InitMessageData* pdata = static_cast<InitMessageData*> (msg->pdata);
+ pdata->data() = Initialize_s();
+ break;
+ }
+ case MSG_TERMINATE_FACTORY: {
+ Terminate_s();
+ break;
+ }
+ case MSG_CREATE_PEERCONNECTION: {
+ CreatePeerConnectionParams* pdata =
+ static_cast<CreatePeerConnectionParams*> (msg->pdata);
+ pdata->peerconnection = CreatePeerConnection_s(pdata->configuration,
+ pdata->constraints,
+ pdata->allocator_factory,
+ pdata->observer);
+ break;
+ }
+ case MSG_CREATE_AUDIOSOURCE: {
+ CreateAudioSourceParams* pdata =
+ static_cast<CreateAudioSourceParams*>(msg->pdata);
+ pdata->source = CreateAudioSource_s(pdata->constraints);
+ break;
+ }
+ case MSG_CREATE_VIDEOSOURCE: {
+ CreateVideoSourceParams* pdata =
+ static_cast<CreateVideoSourceParams*> (msg->pdata);
+ pdata->source = CreateVideoSource_s(pdata->capturer, pdata->constraints);
+ break;
+ }
+ }
+}
+
+bool PeerConnectionFactory::Initialize_s() {
+ talk_base::InitRandom(talk_base::Time());
+
+ allocator_factory_ = PortAllocatorFactory::Create(worker_thread_);
+ if (!allocator_factory_)
+ return false;
+
+ cricket::DummyDeviceManager* device_manager(
+ new cricket::DummyDeviceManager());
+ // TODO: Need to make sure only one VoE is created inside
+ // WebRtcMediaEngine.
+ cricket::WebRtcMediaEngine* webrtc_media_engine(
+ new cricket::WebRtcMediaEngine(default_adm_.get(),
+ NULL, // No secondary adm.
+ video_encoder_factory_.get(),
+ video_decoder_factory_.get()));
+
+ channel_manager_.reset(new cricket::ChannelManager(
+ webrtc_media_engine, device_manager, worker_thread_));
+ if (!channel_manager_->Init()) {
+ return false;
+ }
+ return true;
+}
+
+// Terminate what we created on the signaling thread.
+void PeerConnectionFactory::Terminate_s() {
+ channel_manager_.reset(NULL);
+ allocator_factory_ = NULL;
+}
+
+talk_base::scoped_refptr<AudioSourceInterface>
+PeerConnectionFactory::CreateAudioSource_s(
+ const MediaConstraintsInterface* constraints) {
+ talk_base::scoped_refptr<LocalAudioSource> source(
+ LocalAudioSource::Create(constraints));
+ return source;
+}
+
+talk_base::scoped_refptr<VideoSourceInterface>
+PeerConnectionFactory::CreateVideoSource_s(
+ cricket::VideoCapturer* capturer,
+ const MediaConstraintsInterface* constraints) {
+ talk_base::scoped_refptr<LocalVideoSource> source(
+ LocalVideoSource::Create(channel_manager_.get(), capturer,
+ constraints));
+ return VideoSourceProxy::Create(signaling_thread_, source);
+}
+
+scoped_refptr<PeerConnectionInterface>
+PeerConnectionFactory::CreatePeerConnection(
+ const PeerConnectionInterface::IceServers& configuration,
+ const MediaConstraintsInterface* constraints,
+ PortAllocatorFactoryInterface* allocator_factory,
+ DTLSIdentityServiceInterface* dtls_identity_service,
+ PeerConnectionObserver* observer) {
+ CreatePeerConnectionParams params(configuration, constraints,
+ allocator_factory, observer);
+ signaling_thread_->Send(this, MSG_CREATE_PEERCONNECTION, ¶ms);
+ return params.peerconnection;
+}
+
+scoped_refptr<PeerConnectionInterface>
+PeerConnectionFactory::CreatePeerConnection(
+ const PeerConnectionInterface::IceServers& configuration,
+ const MediaConstraintsInterface* constraints,
+ DTLSIdentityServiceInterface* dtls_identity_service,
+ PeerConnectionObserver* observer) {
+ return CreatePeerConnection(
+ configuration, constraints, NULL, dtls_identity_service, observer);
+}
+
+talk_base::scoped_refptr<PeerConnectionInterface>
+PeerConnectionFactory::CreatePeerConnection_s(
+ const PeerConnectionInterface::IceServers& configuration,
+ const MediaConstraintsInterface* constraints,
+ PortAllocatorFactoryInterface* allocator_factory,
+ PeerConnectionObserver* observer) {
+ ASSERT(allocator_factory || allocator_factory_);
+ talk_base::scoped_refptr<PeerConnection> pc(
+ new talk_base::RefCountedObject<PeerConnection>(this));
+ if (!pc->Initialize(
+ configuration,
+ constraints,
+ allocator_factory ? allocator_factory : allocator_factory_.get(),
+ observer)) {
+ return NULL;
+ }
+ return PeerConnectionProxy::Create(signaling_thread(), pc);
+}
+
+scoped_refptr<MediaStreamInterface>
+PeerConnectionFactory::CreateLocalMediaStream(const std::string& label) {
+ return MediaStreamProxy::Create(signaling_thread_,
+ MediaStream::Create(label));
+}
+
+talk_base::scoped_refptr<AudioSourceInterface>
+PeerConnectionFactory::CreateAudioSource(
+ const MediaConstraintsInterface* constraints) {
+ CreateAudioSourceParams params(constraints);
+ signaling_thread_->Send(this, MSG_CREATE_AUDIOSOURCE, ¶ms);
+ return params.source;
+}
+
+talk_base::scoped_refptr<VideoSourceInterface>
+PeerConnectionFactory::CreateVideoSource(
+ cricket::VideoCapturer* capturer,
+ const MediaConstraintsInterface* constraints) {
+
+ CreateVideoSourceParams params(capturer,
+ constraints);
+ signaling_thread_->Send(this, MSG_CREATE_VIDEOSOURCE, ¶ms);
+ return params.source;
+}
+
+talk_base::scoped_refptr<VideoTrackInterface>
+PeerConnectionFactory::CreateVideoTrack(
+ const std::string& id,
+ VideoSourceInterface* source) {
+ talk_base::scoped_refptr<VideoTrackInterface> track(
+ VideoTrack::Create(id, source));
+ return VideoTrackProxy::Create(signaling_thread_, track);
+}
+
+scoped_refptr<AudioTrackInterface> PeerConnectionFactory::CreateAudioTrack(
+ const std::string& id,
+ AudioSourceInterface* source) {
+ talk_base::scoped_refptr<AudioTrackInterface> track(
+ AudioTrack::Create(id, source));
+ return AudioTrackProxy::Create(signaling_thread_, track);
+}
+
+cricket::ChannelManager* PeerConnectionFactory::channel_manager() {
+ return channel_manager_.get();
+}
+
+talk_base::Thread* PeerConnectionFactory::signaling_thread() {
+ return signaling_thread_;
+}
+
+talk_base::Thread* PeerConnectionFactory::worker_thread() {
+ return worker_thread_;
+}
+
+} // namespace webrtc
diff --git a/talk/app/webrtc/peerconnectionfactory.h b/talk/app/webrtc/peerconnectionfactory.h
new file mode 100644
index 0000000..c0e15e3
--- /dev/null
+++ b/talk/app/webrtc/peerconnectionfactory.h
@@ -0,0 +1,127 @@
+/*
+ * libjingle
+ * Copyright 2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef TALK_APP_WEBRTC_PEERCONNECTIONFACTORY_H_
+#define TALK_APP_WEBRTC_PEERCONNECTIONFACTORY_H_
+
+#include <string>
+
+#include "talk/app/webrtc/mediastreaminterface.h"
+#include "talk/app/webrtc/peerconnectioninterface.h"
+#include "talk/base/scoped_ptr.h"
+#include "talk/base/thread.h"
+#include "talk/session/media/channelmanager.h"
+
+namespace webrtc {
+
+class PeerConnectionFactory : public PeerConnectionFactoryInterface,
+ public talk_base::MessageHandler {
+ public:
+ virtual talk_base::scoped_refptr<PeerConnectionInterface>
+ CreatePeerConnection(
+ const PeerConnectionInterface::IceServers& configuration,
+ const MediaConstraintsInterface* constraints,
+ DTLSIdentityServiceInterface* dtls_identity_service,
+ PeerConnectionObserver* observer);
+
+ virtual talk_base::scoped_refptr<PeerConnectionInterface>
+ CreatePeerConnection(
+ const PeerConnectionInterface::IceServers& configuration,
+ const MediaConstraintsInterface* constraints,
+ PortAllocatorFactoryInterface* allocator_factory,
+ DTLSIdentityServiceInterface* dtls_identity_service,
+ PeerConnectionObserver* observer);
+ bool Initialize();
+
+ virtual talk_base::scoped_refptr<MediaStreamInterface>
+ CreateLocalMediaStream(const std::string& label);
+
+ virtual talk_base::scoped_refptr<AudioSourceInterface> CreateAudioSource(
+ const MediaConstraintsInterface* constraints);
+
+ virtual talk_base::scoped_refptr<VideoSourceInterface> CreateVideoSource(
+ cricket::VideoCapturer* capturer,
+ const MediaConstraintsInterface* constraints);
+
+ virtual talk_base::scoped_refptr<VideoTrackInterface>
+ CreateVideoTrack(const std::string& id,
+ VideoSourceInterface* video_source);
+
+ virtual talk_base::scoped_refptr<AudioTrackInterface>
+ CreateAudioTrack(const std::string& id,
+ AudioSourceInterface* audio_source);
+
+ virtual cricket::ChannelManager* channel_manager();
+ virtual talk_base::Thread* signaling_thread();
+ virtual talk_base::Thread* worker_thread();
+
+ protected:
+ PeerConnectionFactory();
+ PeerConnectionFactory(
+ talk_base::Thread* worker_thread,
+ talk_base::Thread* signaling_thread,
+ AudioDeviceModule* default_adm,
+ cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
+ cricket::WebRtcVideoDecoderFactory* video_decoder_factory);
+ virtual ~PeerConnectionFactory();
+
+
+ private:
+ bool Initialize_s();
+ void Terminate_s();
+ talk_base::scoped_refptr<AudioSourceInterface> CreateAudioSource_s(
+ const MediaConstraintsInterface* constraints);
+ talk_base::scoped_refptr<VideoSourceInterface> CreateVideoSource_s(
+ cricket::VideoCapturer* capturer,
+ const MediaConstraintsInterface* constraints);
+ talk_base::scoped_refptr<PeerConnectionInterface> CreatePeerConnection_s(
+ const PeerConnectionInterface::IceServers& configuration,
+ const MediaConstraintsInterface* constraints,
+ PortAllocatorFactoryInterface* allocator_factory,
+ PeerConnectionObserver* observer);
+ // Implements talk_base::MessageHandler.
+ void OnMessage(talk_base::Message* msg);
+
+ bool owns_ptrs_;
+ talk_base::Thread* signaling_thread_;
+ talk_base::Thread* worker_thread_;
+ talk_base::scoped_refptr<PortAllocatorFactoryInterface> allocator_factory_;
+ // External Audio device used for audio playback.
+ talk_base::scoped_refptr<AudioDeviceModule> default_adm_;
+ talk_base::scoped_ptr<cricket::ChannelManager> channel_manager_;
+ // External Video encoder factory. This can be NULL if the client has not
+ // injected any. In that case, video engine will use the internal SW encoder.
+ talk_base::scoped_ptr<cricket::WebRtcVideoEncoderFactory>
+ video_encoder_factory_;
+ // External Video decoder factory. This can be NULL if the client has not
+ // injected any. In that case, video engine will use the internal SW decoder.
+ talk_base::scoped_ptr<cricket::WebRtcVideoDecoderFactory>
+ video_decoder_factory_;
+};
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_PEERCONNECTIONFACTORY_H_
diff --git a/talk/app/webrtc/peerconnectionfactory_unittest.cc b/talk/app/webrtc/peerconnectionfactory_unittest.cc
new file mode 100644
index 0000000..6d54204
--- /dev/null
+++ b/talk/app/webrtc/peerconnectionfactory_unittest.cc
@@ -0,0 +1,270 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string>
+
+#include "talk/app/webrtc/fakeportallocatorfactory.h"
+#include "talk/app/webrtc/mediastreaminterface.h"
+#include "talk/app/webrtc/peerconnectionfactory.h"
+#include "talk/app/webrtc/videosourceinterface.h"
+#include "talk/app/webrtc/test/fakevideotrackrenderer.h"
+#include "talk/base/gunit.h"
+#include "talk/base/scoped_ptr.h"
+#include "talk/base/thread.h"
+#include "talk/media/base/fakevideocapturer.h"
+#include "talk/media/webrtc/webrtccommon.h"
+#include "talk/media/webrtc/webrtcvoe.h"
+
+using webrtc::FakeVideoTrackRenderer;
+using webrtc::MediaStreamInterface;
+using webrtc::PeerConnectionFactoryInterface;
+using webrtc::PeerConnectionInterface;
+using webrtc::PeerConnectionObserver;
+using webrtc::PortAllocatorFactoryInterface;
+using webrtc::VideoSourceInterface;
+using webrtc::VideoTrackInterface;
+
+namespace {
+
+typedef std::vector<PortAllocatorFactoryInterface::StunConfiguration>
+ StunConfigurations;
+typedef std::vector<PortAllocatorFactoryInterface::TurnConfiguration>
+ TurnConfigurations;
+
+static const char kStunIceServer[] = "stun:stun.l.google.com:19302";
+static const char kTurnIceServer[] = "turn:test%40hello.com@test.com:1234";
+static const char kTurnIceServerWithTransport[] =
+ "turn:test@hello.com?transport=tcp";
+static const char kSecureTurnIceServer[] =
+ "turns:test@hello.com?transport=tcp";
+static const char kTurnIceServerWithNoUsernameInUri[] =
+ "turn:test.com:1234";
+static const char kTurnPassword[] = "turnpassword";
+static const int kDefaultPort = 3478;
+static const char kTurnUsername[] = "test";
+
+class NullPeerConnectionObserver : public PeerConnectionObserver {
+ public:
+ virtual void OnError() {}
+ virtual void OnMessage(const std::string& msg) {}
+ virtual void OnSignalingMessage(const std::string& msg) {}
+ virtual void OnSignalingChange(
+ PeerConnectionInterface::SignalingState new_state) {}
+ virtual void OnAddStream(MediaStreamInterface* stream) {}
+ virtual void OnRemoveStream(MediaStreamInterface* stream) {}
+ virtual void OnRenegotiationNeeded() {}
+ virtual void OnIceConnectionChange(
+ PeerConnectionInterface::IceConnectionState new_state) {}
+ virtual void OnIceGatheringChange(
+ PeerConnectionInterface::IceGatheringState new_state) {}
+ virtual void OnIceCandidate(const webrtc::IceCandidateInterface* candidate) {}
+};
+
+} // namespace
+
+class PeerConnectionFactoryTest : public testing::Test {
+ void SetUp() {
+ factory_ = webrtc::CreatePeerConnectionFactory(talk_base::Thread::Current(),
+ talk_base::Thread::Current(),
+ NULL,
+ NULL,
+ NULL);
+
+ ASSERT_TRUE(factory_.get() != NULL);
+ allocator_factory_ = webrtc::FakePortAllocatorFactory::Create();
+ }
+
+ protected:
+ void VerifyStunConfigurations(StunConfigurations stun_config) {
+ webrtc::FakePortAllocatorFactory* allocator =
+ static_cast<webrtc::FakePortAllocatorFactory*>(
+ allocator_factory_.get());
+ ASSERT_TRUE(allocator != NULL);
+ EXPECT_EQ(stun_config.size(), allocator->stun_configs().size());
+ for (size_t i = 0; i < stun_config.size(); ++i) {
+ EXPECT_EQ(stun_config[i].server.ToString(),
+ allocator->stun_configs()[i].server.ToString());
+ }
+ }
+
+ void VerifyTurnConfigurations(TurnConfigurations turn_config) {
+ webrtc::FakePortAllocatorFactory* allocator =
+ static_cast<webrtc::FakePortAllocatorFactory*>(
+ allocator_factory_.get());
+ ASSERT_TRUE(allocator != NULL);
+ EXPECT_EQ(turn_config.size(), allocator->turn_configs().size());
+ for (size_t i = 0; i < turn_config.size(); ++i) {
+ EXPECT_EQ(turn_config[i].server.ToString(),
+ allocator->turn_configs()[i].server.ToString());
+ EXPECT_EQ(turn_config[i].username, allocator->turn_configs()[i].username);
+ EXPECT_EQ(turn_config[i].password, allocator->turn_configs()[i].password);
+ EXPECT_EQ(turn_config[i].transport_type,
+ allocator->turn_configs()[i].transport_type);
+ }
+ }
+
+ talk_base::scoped_refptr<PeerConnectionFactoryInterface> factory_;
+ NullPeerConnectionObserver observer_;
+ talk_base::scoped_refptr<PortAllocatorFactoryInterface> allocator_factory_;
+};
+
+// Verify creation of PeerConnection using internal ADM, video factory and
+// internal libjingle threads.
+TEST(PeerConnectionFactoryTestInternal, CreatePCUsingInternalModules) {
+ talk_base::scoped_refptr<PeerConnectionFactoryInterface> factory(
+ webrtc::CreatePeerConnectionFactory());
+
+ NullPeerConnectionObserver observer;
+ webrtc::PeerConnectionInterface::IceServers servers;
+
+ talk_base::scoped_refptr<PeerConnectionInterface> pc(
+ factory->CreatePeerConnection(servers, NULL, NULL, &observer));
+
+ EXPECT_TRUE(pc.get() != NULL);
+}
+
+// This test verifies creation of PeerConnection with valid STUN and TURN
+// configuration. Also verifies the URL's parsed correctly as expected.
+TEST_F(PeerConnectionFactoryTest, CreatePCUsingIceServers) {
+ webrtc::PeerConnectionInterface::IceServers ice_servers;
+ webrtc::PeerConnectionInterface::IceServer ice_server;
+ ice_server.uri = kStunIceServer;
+ ice_servers.push_back(ice_server);
+ ice_server.uri = kTurnIceServer;
+ ice_server.password = kTurnPassword;
+ ice_servers.push_back(ice_server);
+ talk_base::scoped_refptr<PeerConnectionInterface> pc(
+ factory_->CreatePeerConnection(ice_servers, NULL,
+ allocator_factory_.get(),
+ NULL,
+ &observer_));
+ EXPECT_TRUE(pc.get() != NULL);
+ StunConfigurations stun_configs;
+ webrtc::PortAllocatorFactoryInterface::StunConfiguration stun(
+ "stun.l.google.com", 19302);
+ stun_configs.push_back(stun);
+ webrtc::PortAllocatorFactoryInterface::StunConfiguration stun1(
+ "test.com", 1234);
+ stun_configs.push_back(stun1);
+ VerifyStunConfigurations(stun_configs);
+ TurnConfigurations turn_configs;
+ webrtc::PortAllocatorFactoryInterface::TurnConfiguration turn(
+ "test.com", 1234, "test@hello.com", kTurnPassword, "udp");
+ turn_configs.push_back(turn);
+ VerifyTurnConfigurations(turn_configs);
+}
+
+TEST_F(PeerConnectionFactoryTest, CreatePCUsingNoUsernameInUri) {
+ webrtc::PeerConnectionInterface::IceServers ice_servers;
+ webrtc::PeerConnectionInterface::IceServer ice_server;
+ ice_server.uri = kStunIceServer;
+ ice_servers.push_back(ice_server);
+ ice_server.uri = kTurnIceServerWithNoUsernameInUri;
+ ice_server.username = kTurnUsername;
+ ice_server.password = kTurnPassword;
+ ice_servers.push_back(ice_server);
+ talk_base::scoped_refptr<PeerConnectionInterface> pc(
+ factory_->CreatePeerConnection(ice_servers, NULL,
+ allocator_factory_.get(),
+ NULL,
+ &observer_));
+ EXPECT_TRUE(pc.get() != NULL);
+ TurnConfigurations turn_configs;
+ webrtc::PortAllocatorFactoryInterface::TurnConfiguration turn(
+ "test.com", 1234, kTurnUsername, kTurnPassword, "udp");
+ turn_configs.push_back(turn);
+ VerifyTurnConfigurations(turn_configs);
+}
+
+// This test verifies the PeerConnection created properly with TURN url which
+// has transport parameter in it.
+TEST_F(PeerConnectionFactoryTest, CreatePCUsingTurnUrlWithTransportParam) {
+ webrtc::PeerConnectionInterface::IceServers ice_servers;
+ webrtc::PeerConnectionInterface::IceServer ice_server;
+ ice_server.uri = kTurnIceServerWithTransport;
+ ice_server.password = kTurnPassword;
+ ice_servers.push_back(ice_server);
+ talk_base::scoped_refptr<PeerConnectionInterface> pc(
+ factory_->CreatePeerConnection(ice_servers, NULL,
+ allocator_factory_.get(),
+ NULL,
+ &observer_));
+ EXPECT_TRUE(pc.get() != NULL);
+ TurnConfigurations turn_configs;
+ webrtc::PortAllocatorFactoryInterface::TurnConfiguration turn(
+ "hello.com", kDefaultPort, "test", kTurnPassword, "tcp");
+ turn_configs.push_back(turn);
+ VerifyTurnConfigurations(turn_configs);
+ StunConfigurations stun_configs;
+ webrtc::PortAllocatorFactoryInterface::StunConfiguration stun(
+ "hello.com", kDefaultPort);
+ stun_configs.push_back(stun);
+ VerifyStunConfigurations(stun_configs);
+}
+
+// This test verifies factory failed to create a peerconneciton object when
+// a valid secure TURN url passed. Connecting to a secure TURN server is not
+// supported currently.
+TEST_F(PeerConnectionFactoryTest, CreatePCUsingSecureTurnUrl) {
+ webrtc::PeerConnectionInterface::IceServers ice_servers;
+ webrtc::PeerConnectionInterface::IceServer ice_server;
+ ice_server.uri = kSecureTurnIceServer;
+ ice_server.password = kTurnPassword;
+ ice_servers.push_back(ice_server);
+ talk_base::scoped_refptr<PeerConnectionInterface> pc(
+ factory_->CreatePeerConnection(ice_servers, NULL,
+ allocator_factory_.get(),
+ NULL,
+ &observer_));
+ EXPECT_TRUE(pc.get() == NULL);
+ TurnConfigurations turn_configs;
+ VerifyTurnConfigurations(turn_configs);
+}
+
+// This test verifies the captured stream is rendered locally using a
+// local video track.
+TEST_F(PeerConnectionFactoryTest, LocalRendering) {
+ cricket::FakeVideoCapturer* capturer = new cricket::FakeVideoCapturer();
+ // The source take ownership of |capturer|.
+ talk_base::scoped_refptr<VideoSourceInterface> source(
+ factory_->CreateVideoSource(capturer, NULL));
+ ASSERT_TRUE(source.get() != NULL);
+ talk_base::scoped_refptr<VideoTrackInterface> track(
+ factory_->CreateVideoTrack("testlabel", source));
+ ASSERT_TRUE(track.get() != NULL);
+ FakeVideoTrackRenderer local_renderer(track);
+
+ EXPECT_EQ(0, local_renderer.num_rendered_frames());
+ EXPECT_TRUE(capturer->CaptureFrame());
+ EXPECT_EQ(1, local_renderer.num_rendered_frames());
+
+ track->set_enabled(false);
+ EXPECT_TRUE(capturer->CaptureFrame());
+ EXPECT_EQ(1, local_renderer.num_rendered_frames());
+
+ track->set_enabled(true);
+ EXPECT_TRUE(capturer->CaptureFrame());
+ EXPECT_EQ(2, local_renderer.num_rendered_frames());
+}
diff --git a/talk/app/webrtc/peerconnectioninterface.h b/talk/app/webrtc/peerconnectioninterface.h
new file mode 100644
index 0000000..9a7cdd0
--- /dev/null
+++ b/talk/app/webrtc/peerconnectioninterface.h
@@ -0,0 +1,451 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// This file contains the PeerConnection interface as defined in
+// http://dev.w3.org/2011/webrtc/editor/webrtc.html#peer-to-peer-connections.
+// Applications must use this interface to implement peerconnection.
+// PeerConnectionFactory class provides factory methods to create
+// peerconnection, mediastream and media tracks objects.
+//
+// The Following steps are needed to setup a typical call using Jsep.
+// 1. Create a PeerConnectionFactoryInterface. Check constructors for more
+// information about input parameters.
+// 2. Create a PeerConnection object. Provide a configuration string which
+// points either to stun or turn server to generate ICE candidates and provide
+// an object that implements the PeerConnectionObserver interface.
+// 3. Create local MediaStream and MediaTracks using the PeerConnectionFactory
+// and add it to PeerConnection by calling AddStream.
+// 4. Create an offer and serialize it and send it to the remote peer.
+// 5. Once an ice candidate have been found PeerConnection will call the
+// observer function OnIceCandidate. The candidates must also be serialized and
+// sent to the remote peer.
+// 6. Once an answer is received from the remote peer, call
+// SetLocalSessionDescription with the offer and SetRemoteSessionDescription
+// with the remote answer.
+// 7. Once a remote candidate is received from the remote peer, provide it to
+// the peerconnection by calling AddIceCandidate.
+
+
+// The Receiver of a call can decide to accept or reject the call.
+// This decision will be taken by the application not peerconnection.
+// If application decides to accept the call
+// 1. Create PeerConnectionFactoryInterface if it doesn't exist.
+// 2. Create a new PeerConnection.
+// 3. Provide the remote offer to the new PeerConnection object by calling
+// SetRemoteSessionDescription.
+// 4. Generate an answer to the remote offer by calling CreateAnswer and send it
+// back to the remote peer.
+// 5. Provide the local answer to the new PeerConnection by calling
+// SetLocalSessionDescription with the answer.
+// 6. Provide the remote ice candidates by calling AddIceCandidate.
+// 7. Once a candidate have been found PeerConnection will call the observer
+// function OnIceCandidate. Send these candidates to the remote peer.
+
+#ifndef TALK_APP_WEBRTC_PEERCONNECTIONINTERFACE_H_
+#define TALK_APP_WEBRTC_PEERCONNECTIONINTERFACE_H_
+
+#include <string>
+#include <vector>
+
+#include "talk/app/webrtc/datachannelinterface.h"
+#include "talk/app/webrtc/dtmfsenderinterface.h"
+#include "talk/app/webrtc/jsep.h"
+#include "talk/app/webrtc/mediastreaminterface.h"
+#include "talk/app/webrtc/statstypes.h"
+#include "talk/base/socketaddress.h"
+
+namespace talk_base {
+class Thread;
+}
+
+namespace cricket {
+class PortAllocator;
+class WebRtcVideoDecoderFactory;
+class WebRtcVideoEncoderFactory;
+}
+
+namespace webrtc {
+class AudioDeviceModule;
+class MediaConstraintsInterface;
+
+// MediaStream container interface.
+class StreamCollectionInterface : public talk_base::RefCountInterface {
+ public:
+ // TODO(ronghuawu): Update the function names to c++ style, e.g. find -> Find.
+ virtual size_t count() = 0;
+ virtual MediaStreamInterface* at(size_t index) = 0;
+ virtual MediaStreamInterface* find(const std::string& label) = 0;
+ virtual MediaStreamTrackInterface* FindAudioTrack(
+ const std::string& id) = 0;
+ virtual MediaStreamTrackInterface* FindVideoTrack(
+ const std::string& id) = 0;
+
+ protected:
+ // Dtor protected as objects shouldn't be deleted via this interface.
+ ~StreamCollectionInterface() {}
+};
+
+class StatsObserver : public talk_base::RefCountInterface {
+ public:
+ virtual void OnComplete(const std::vector<StatsReport>& reports) = 0;
+
+ protected:
+ virtual ~StatsObserver() {}
+};
+
+class PeerConnectionInterface : public talk_base::RefCountInterface {
+ public:
+ // See http://dev.w3.org/2011/webrtc/editor/webrtc.html#state-definitions .
+ enum SignalingState {
+ kStable,
+ kHaveLocalOffer,
+ kHaveLocalPrAnswer,
+ kHaveRemoteOffer,
+ kHaveRemotePrAnswer,
+ kClosed,
+ };
+
+ // TODO(bemasc): Remove IceState when callers are changed to
+ // IceConnection/GatheringState.
+ enum IceState {
+ kIceNew,
+ kIceGathering,
+ kIceWaiting,
+ kIceChecking,
+ kIceConnected,
+ kIceCompleted,
+ kIceFailed,
+ kIceClosed,
+ };
+
+ enum IceGatheringState {
+ kIceGatheringNew,
+ kIceGatheringGathering,
+ kIceGatheringComplete
+ };
+
+ enum IceConnectionState {
+ kIceConnectionNew,
+ kIceConnectionChecking,
+ kIceConnectionConnected,
+ kIceConnectionCompleted,
+ kIceConnectionFailed,
+ kIceConnectionDisconnected,
+ kIceConnectionClosed,
+ };
+
+ struct IceServer {
+ std::string uri;
+ std::string username;
+ std::string password;
+ };
+ typedef std::vector<IceServer> IceServers;
+
+ // Accessor methods to active local streams.
+ virtual talk_base::scoped_refptr<StreamCollectionInterface>
+ local_streams() = 0;
+
+ // Accessor methods to remote streams.
+ virtual talk_base::scoped_refptr<StreamCollectionInterface>
+ remote_streams() = 0;
+
+ // Add a new MediaStream to be sent on this PeerConnection.
+ // Note that a SessionDescription negotiation is needed before the
+ // remote peer can receive the stream.
+ virtual bool AddStream(MediaStreamInterface* stream,
+ const MediaConstraintsInterface* constraints) = 0;
+
+ // Remove a MediaStream from this PeerConnection.
+ // Note that a SessionDescription negotiation is need before the
+ // remote peer is notified.
+ virtual void RemoveStream(MediaStreamInterface* stream) = 0;
+
+ // Returns pointer to the created DtmfSender on success.
+ // Otherwise returns NULL.
+ virtual talk_base::scoped_refptr<DtmfSenderInterface> CreateDtmfSender(
+ AudioTrackInterface* track) = 0;
+
+ virtual bool GetStats(StatsObserver* observer,
+ MediaStreamTrackInterface* track) = 0;
+
+ virtual talk_base::scoped_refptr<DataChannelInterface> CreateDataChannel(
+ const std::string& label,
+ const DataChannelInit* config) = 0;
+
+ virtual const SessionDescriptionInterface* local_description() const = 0;
+ virtual const SessionDescriptionInterface* remote_description() const = 0;
+
+ // Create a new offer.
+ // The CreateSessionDescriptionObserver callback will be called when done.
+ virtual void CreateOffer(CreateSessionDescriptionObserver* observer,
+ const MediaConstraintsInterface* constraints) = 0;
+ // Create an answer to an offer.
+ // The CreateSessionDescriptionObserver callback will be called when done.
+ virtual void CreateAnswer(CreateSessionDescriptionObserver* observer,
+ const MediaConstraintsInterface* constraints) = 0;
+ // Sets the local session description.
+ // JsepInterface takes the ownership of |desc| even if it fails.
+ // The |observer| callback will be called when done.
+ virtual void SetLocalDescription(SetSessionDescriptionObserver* observer,
+ SessionDescriptionInterface* desc) = 0;
+ // Sets the remote session description.
+ // JsepInterface takes the ownership of |desc| even if it fails.
+ // The |observer| callback will be called when done.
+ virtual void SetRemoteDescription(SetSessionDescriptionObserver* observer,
+ SessionDescriptionInterface* desc) = 0;
+ // Restarts or updates the ICE Agent process of gathering local candidates
+ // and pinging remote candidates.
+ virtual bool UpdateIce(const IceServers& configuration,
+ const MediaConstraintsInterface* constraints) = 0;
+ // Provides a remote candidate to the ICE Agent.
+ // A copy of the |candidate| will be created and added to the remote
+ // description. So the caller of this method still has the ownership of the
+ // |candidate|.
+ // TODO(ronghuawu): Consider to change this so that the AddIceCandidate will
+ // take the ownership of the |candidate|.
+ virtual bool AddIceCandidate(const IceCandidateInterface* candidate) = 0;
+
+ // Returns the current SignalingState.
+ virtual SignalingState signaling_state() = 0;
+
+ // TODO(bemasc): Remove ice_state when callers are changed to
+ // IceConnection/GatheringState.
+ // Returns the current IceState.
+ virtual IceState ice_state() = 0;
+ virtual IceConnectionState ice_connection_state() = 0;
+ virtual IceGatheringState ice_gathering_state() = 0;
+
+ // Terminates all media and closes the transport.
+ virtual void Close() = 0;
+
+ protected:
+ // Dtor protected as objects shouldn't be deleted via this interface.
+ ~PeerConnectionInterface() {}
+};
+
+// PeerConnection callback interface. Application should implement these
+// methods.
+class PeerConnectionObserver {
+ public:
+ enum StateType {
+ kSignalingState,
+ kIceState,
+ };
+
+ virtual void OnError() = 0;
+
+ // Triggered when the SignalingState changed.
+ virtual void OnSignalingChange(
+ PeerConnectionInterface::SignalingState new_state) {}
+
+ // Triggered when SignalingState or IceState have changed.
+ // TODO(bemasc): Remove once callers transition to OnSignalingChange.
+ virtual void OnStateChange(StateType state_changed) {}
+
+ // Triggered when media is received on a new stream from remote peer.
+ virtual void OnAddStream(MediaStreamInterface* stream) = 0;
+
+ // Triggered when a remote peer close a stream.
+ virtual void OnRemoveStream(MediaStreamInterface* stream) = 0;
+
+ // Triggered when a remote peer open a data channel.
+ // TODO(perkj): Make pure virtual.
+ virtual void OnDataChannel(DataChannelInterface* data_channel) {}
+
+ // Triggered when renegotation is needed, for example the ICE has restarted.
+ virtual void OnRenegotiationNeeded() {}
+
+ // Called any time the IceConnectionState changes
+ virtual void OnIceConnectionChange(
+ PeerConnectionInterface::IceConnectionState new_state) {}
+
+ // Called any time the IceGatheringState changes
+ virtual void OnIceGatheringChange(
+ PeerConnectionInterface::IceGatheringState new_state) {}
+
+ // New Ice candidate have been found.
+ virtual void OnIceCandidate(const IceCandidateInterface* candidate) = 0;
+
+ // TODO(bemasc): Remove this once callers transition to OnIceGatheringChange.
+ // All Ice candidates have been found.
+ virtual void OnIceComplete() {}
+
+ protected:
+ // Dtor protected as objects shouldn't be deleted via this interface.
+ ~PeerConnectionObserver() {}
+};
+
+// Factory class used for creating cricket::PortAllocator that is used
+// for ICE negotiation.
+class PortAllocatorFactoryInterface : public talk_base::RefCountInterface {
+ public:
+ struct StunConfiguration {
+ StunConfiguration(const std::string& address, int port)
+ : server(address, port) {}
+ // STUN server address and port.
+ talk_base::SocketAddress server;
+ };
+
+ struct TurnConfiguration {
+ TurnConfiguration(const std::string& address,
+ int port,
+ const std::string& username,
+ const std::string& password,
+ const std::string& transport_type)
+ : server(address, port),
+ username(username),
+ password(password),
+ transport_type(transport_type) {}
+ talk_base::SocketAddress server;
+ std::string username;
+ std::string password;
+ std::string transport_type;
+ };
+
+ virtual cricket::PortAllocator* CreatePortAllocator(
+ const std::vector<StunConfiguration>& stun_servers,
+ const std::vector<TurnConfiguration>& turn_configurations) = 0;
+
+ protected:
+ PortAllocatorFactoryInterface() {}
+ ~PortAllocatorFactoryInterface() {}
+};
+
+// Used to receive callbacks of DTLS identity requests.
+class DTLSIdentityRequestObserver : public talk_base::RefCountInterface {
+ public:
+ virtual void OnFailure(int error) = 0;
+ virtual void OnSuccess(const std::string& certificate,
+ const std::string& private_key) = 0;
+ protected:
+ virtual ~DTLSIdentityRequestObserver() {}
+};
+
+class DTLSIdentityServiceInterface {
+ public:
+ // Asynchronously request a DTLS identity, including a self-signed certificate
+ // and the private key used to sign the certificate, from the identity store
+ // for the given identity name.
+ // DTLSIdentityRequestObserver::OnSuccess will be called with the identity if
+ // the request succeeded; DTLSIdentityRequestObserver::OnFailure will be
+ // called with an error code if the request failed.
+ //
+ // Only one request can be made at a time. If a second request is called
+ // before the first one completes, RequestIdentity will abort and return
+ // false.
+ //
+ // |identity_name| is an internal name selected by the client to identify an
+ // identity within an origin. E.g. an web site may cache the certificates used
+ // to communicate with differnent peers under different identity names.
+ //
+ // |common_name| is the common name used to generate the certificate. If the
+ // certificate already exists in the store, |common_name| is ignored.
+ //
+ // |observer| is the object to receive success or failure callbacks.
+ //
+ // Returns true if either OnFailure or OnSuccess will be called.
+ virtual bool RequestIdentity(
+ const std::string& identity_name,
+ const std::string& common_name,
+ DTLSIdentityRequestObserver* observer) = 0;
+};
+
+// PeerConnectionFactoryInterface is the factory interface use for creating
+// PeerConnection, MediaStream and media tracks.
+// PeerConnectionFactoryInterface will create required libjingle threads,
+// socket and network manager factory classes for networking.
+// If an application decides to provide its own threads and network
+// implementation of these classes it should use the alternate
+// CreatePeerConnectionFactory method which accepts threads as input and use the
+// CreatePeerConnection version that takes a PortAllocatorFactoryInterface as
+// argument.
+class PeerConnectionFactoryInterface : public talk_base::RefCountInterface {
+ public:
+ virtual talk_base::scoped_refptr<PeerConnectionInterface>
+ CreatePeerConnection(
+ const PeerConnectionInterface::IceServers& configuration,
+ const MediaConstraintsInterface* constraints,
+ DTLSIdentityServiceInterface* dtls_identity_service,
+ PeerConnectionObserver* observer) = 0;
+ virtual talk_base::scoped_refptr<PeerConnectionInterface>
+ CreatePeerConnection(
+ const PeerConnectionInterface::IceServers& configuration,
+ const MediaConstraintsInterface* constraints,
+ PortAllocatorFactoryInterface* allocator_factory,
+ DTLSIdentityServiceInterface* dtls_identity_service,
+ PeerConnectionObserver* observer) = 0;
+ virtual talk_base::scoped_refptr<MediaStreamInterface>
+ CreateLocalMediaStream(const std::string& label) = 0;
+
+ // Creates a AudioSourceInterface.
+ // |constraints| decides audio processing settings but can be NULL.
+ virtual talk_base::scoped_refptr<AudioSourceInterface> CreateAudioSource(
+ const MediaConstraintsInterface* constraints) = 0;
+
+ // Creates a VideoSourceInterface. The new source take ownership of
+ // |capturer|. |constraints| decides video resolution and frame rate but can
+ // be NULL.
+ virtual talk_base::scoped_refptr<VideoSourceInterface> CreateVideoSource(
+ cricket::VideoCapturer* capturer,
+ const MediaConstraintsInterface* constraints) = 0;
+
+ // Creates a new local VideoTrack. The same |source| can be used in several
+ // tracks.
+ virtual talk_base::scoped_refptr<VideoTrackInterface>
+ CreateVideoTrack(const std::string& label,
+ VideoSourceInterface* source) = 0;
+
+ // Creates an new AudioTrack. At the moment |source| can be NULL.
+ virtual talk_base::scoped_refptr<AudioTrackInterface>
+ CreateAudioTrack(const std::string& label,
+ AudioSourceInterface* source) = 0;
+
+ protected:
+ // Dtor and ctor protected as objects shouldn't be created or deleted via
+ // this interface.
+ PeerConnectionFactoryInterface() {}
+ ~PeerConnectionFactoryInterface() {} // NOLINT
+};
+
+// Create a new instance of PeerConnectionFactoryInterface.
+talk_base::scoped_refptr<PeerConnectionFactoryInterface>
+CreatePeerConnectionFactory();
+
+// Create a new instance of PeerConnectionFactoryInterface.
+// Ownership of |factory|, |default_adm|, and optionally |encoder_factory| and
+// |decoder_factory| transferred to the returned factory.
+talk_base::scoped_refptr<PeerConnectionFactoryInterface>
+CreatePeerConnectionFactory(
+ talk_base::Thread* worker_thread,
+ talk_base::Thread* signaling_thread,
+ AudioDeviceModule* default_adm,
+ cricket::WebRtcVideoEncoderFactory* encoder_factory,
+ cricket::WebRtcVideoDecoderFactory* decoder_factory);
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_PEERCONNECTIONINTERFACE_H_
diff --git a/talk/app/webrtc/peerconnectioninterface_unittest.cc b/talk/app/webrtc/peerconnectioninterface_unittest.cc
new file mode 100644
index 0000000..782bba1
--- /dev/null
+++ b/talk/app/webrtc/peerconnectioninterface_unittest.cc
@@ -0,0 +1,1220 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string>
+
+#include "talk/app/webrtc/fakeportallocatorfactory.h"
+#include "talk/app/webrtc/jsepsessiondescription.h"
+#include "talk/app/webrtc/localvideosource.h"
+#include "talk/app/webrtc/mediastreaminterface.h"
+#include "talk/app/webrtc/peerconnectioninterface.h"
+#include "talk/app/webrtc/test/fakeconstraints.h"
+#include "talk/app/webrtc/test/mockpeerconnectionobservers.h"
+#include "talk/app/webrtc/test/testsdpstrings.h"
+#include "talk/base/gunit.h"
+#include "talk/base/scoped_ptr.h"
+#include "talk/base/sslstreamadapter.h"
+#include "talk/base/stringutils.h"
+#include "talk/base/thread.h"
+#include "talk/media/base/fakevideocapturer.h"
+#include "talk/session/media/mediasession.h"
+
+static const char kStreamLabel1[] = "local_stream_1";
+static const char kStreamLabel2[] = "local_stream_2";
+static const char kStreamLabel3[] = "local_stream_3";
+static const int kDefaultStunPort = 3478;
+static const char kStunAddressOnly[] = "stun:address";
+static const char kStunInvalidPort[] = "stun:address:-1";
+static const char kStunAddressPortAndMore1[] = "stun:address:port:more";
+static const char kStunAddressPortAndMore2[] = "stun:address:port more";
+static const char kTurnIceServerUri[] = "turn:user@turn.example.org";
+static const char kTurnUsername[] = "user";
+static const char kTurnPassword[] = "password";
+static const char kTurnHostname[] = "turn.example.org";
+static const uint32 kTimeout = 5000U;
+
+#define MAYBE_SKIP_TEST(feature) \
+ if (!(feature())) { \
+ LOG(LS_INFO) << "Feature disabled... skipping"; \
+ return; \
+ }
+
+using talk_base::scoped_ptr;
+using talk_base::scoped_refptr;
+using webrtc::AudioSourceInterface;
+using webrtc::AudioTrackInterface;
+using webrtc::DataBuffer;
+using webrtc::DataChannelInterface;
+using webrtc::FakeConstraints;
+using webrtc::FakePortAllocatorFactory;
+using webrtc::IceCandidateInterface;
+using webrtc::MediaStreamInterface;
+using webrtc::MediaStreamTrackInterface;
+using webrtc::MockCreateSessionDescriptionObserver;
+using webrtc::MockDataChannelObserver;
+using webrtc::MockSetSessionDescriptionObserver;
+using webrtc::MockStatsObserver;
+using webrtc::PeerConnectionInterface;
+using webrtc::PeerConnectionObserver;
+using webrtc::PortAllocatorFactoryInterface;
+using webrtc::SdpParseError;
+using webrtc::SessionDescriptionInterface;
+using webrtc::VideoSourceInterface;
+using webrtc::VideoTrackInterface;
+
+namespace {
+
+// Gets the first ssrc of given content type from the ContentInfo.
+bool GetFirstSsrc(const cricket::ContentInfo* content_info, int* ssrc) {
+ if (!content_info || !ssrc) {
+ return false;
+ }
+ const cricket::MediaContentDescription* media_desc =
+ static_cast<const cricket::MediaContentDescription*> (
+ content_info->description);
+ if (!media_desc || media_desc->streams().empty()) {
+ return false;
+ }
+ *ssrc = media_desc->streams().begin()->first_ssrc();
+ return true;
+}
+
+void SetSsrcToZero(std::string* sdp) {
+ const char kSdpSsrcAtribute[] = "a=ssrc:";
+ const char kSdpSsrcAtributeZero[] = "a=ssrc:0";
+ size_t ssrc_pos = 0;
+ while ((ssrc_pos = sdp->find(kSdpSsrcAtribute, ssrc_pos)) !=
+ std::string::npos) {
+ size_t end_ssrc = sdp->find(" ", ssrc_pos);
+ sdp->replace(ssrc_pos, end_ssrc - ssrc_pos, kSdpSsrcAtributeZero);
+ ssrc_pos = end_ssrc;
+ }
+}
+
+class MockPeerConnectionObserver : public PeerConnectionObserver {
+ public:
+ MockPeerConnectionObserver()
+ : renegotiation_needed_(false),
+ ice_complete_(false) {
+ }
+ ~MockPeerConnectionObserver() {
+ }
+ void SetPeerConnectionInterface(PeerConnectionInterface* pc) {
+ pc_ = pc;
+ if (pc) {
+ state_ = pc_->signaling_state();
+ }
+ }
+ virtual void OnError() {}
+ virtual void OnSignalingChange(
+ PeerConnectionInterface::SignalingState new_state) {
+ EXPECT_EQ(pc_->signaling_state(), new_state);
+ state_ = new_state;
+ }
+ // TODO(bemasc): Remove this once callers transition to OnIceGatheringChange.
+ virtual void OnStateChange(StateType state_changed) {
+ if (pc_.get() == NULL)
+ return;
+ switch (state_changed) {
+ case kSignalingState:
+ // OnSignalingChange and OnStateChange(kSignalingState) should always
+ // be called approximately simultaneously. To ease testing, we require
+ // that they always be called in that order. This check verifies
+ // that OnSignalingChange has just been called.
+ EXPECT_EQ(pc_->signaling_state(), state_);
+ break;
+ case kIceState:
+ ADD_FAILURE();
+ break;
+ default:
+ ADD_FAILURE();
+ break;
+ }
+ }
+ virtual void OnAddStream(MediaStreamInterface* stream) {
+ last_added_stream_ = stream;
+ }
+ virtual void OnRemoveStream(MediaStreamInterface* stream) {
+ last_removed_stream_ = stream;
+ }
+ virtual void OnRenegotiationNeeded() {
+ renegotiation_needed_ = true;
+ }
+ virtual void OnDataChannel(DataChannelInterface* data_channel) {
+ last_datachannel_ = data_channel;
+ }
+
+ virtual void OnIceConnectionChange(
+ PeerConnectionInterface::IceConnectionState new_state) {
+ EXPECT_EQ(pc_->ice_connection_state(), new_state);
+ }
+ virtual void OnIceGatheringChange(
+ PeerConnectionInterface::IceGatheringState new_state) {
+ EXPECT_EQ(pc_->ice_gathering_state(), new_state);
+ }
+ virtual void OnIceCandidate(const webrtc::IceCandidateInterface* candidate) {
+ EXPECT_NE(PeerConnectionInterface::kIceGatheringNew,
+ pc_->ice_gathering_state());
+
+ std::string sdp;
+ EXPECT_TRUE(candidate->ToString(&sdp));
+ EXPECT_LT(0u, sdp.size());
+ last_candidate_.reset(webrtc::CreateIceCandidate(candidate->sdp_mid(),
+ candidate->sdp_mline_index(), sdp, NULL));
+ EXPECT_TRUE(last_candidate_.get() != NULL);
+ }
+ // TODO(bemasc): Remove this once callers transition to OnSignalingChange.
+ virtual void OnIceComplete() {
+ ice_complete_ = true;
+ // OnIceGatheringChange(IceGatheringCompleted) and OnIceComplete() should
+ // be called approximately simultaneously. For ease of testing, this
+ // check additionally requires that they be called in the above order.
+ EXPECT_EQ(PeerConnectionInterface::kIceGatheringComplete,
+ pc_->ice_gathering_state());
+ }
+
+ // Returns the label of the last added stream.
+ // Empty string if no stream have been added.
+ std::string GetLastAddedStreamLabel() {
+ if (last_added_stream_.get())
+ return last_added_stream_->label();
+ return "";
+ }
+ std::string GetLastRemovedStreamLabel() {
+ if (last_removed_stream_.get())
+ return last_removed_stream_->label();
+ return "";
+ }
+
+ scoped_refptr<PeerConnectionInterface> pc_;
+ PeerConnectionInterface::SignalingState state_;
+ scoped_ptr<IceCandidateInterface> last_candidate_;
+ scoped_refptr<DataChannelInterface> last_datachannel_;
+ bool renegotiation_needed_;
+ bool ice_complete_;
+
+ private:
+ scoped_refptr<MediaStreamInterface> last_added_stream_;
+ scoped_refptr<MediaStreamInterface> last_removed_stream_;
+};
+
+} // namespace
+class PeerConnectionInterfaceTest : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ pc_factory_ = webrtc::CreatePeerConnectionFactory(
+ talk_base::Thread::Current(), talk_base::Thread::Current(), NULL, NULL,
+ NULL);
+ ASSERT_TRUE(pc_factory_.get() != NULL);
+ }
+
+ void CreatePeerConnection() {
+ CreatePeerConnection("", "", NULL);
+ }
+
+ void CreatePeerConnection(webrtc::MediaConstraintsInterface* constraints) {
+ CreatePeerConnection("", "", constraints);
+ }
+
+ void CreatePeerConnection(const std::string& uri,
+ const std::string& password,
+ webrtc::MediaConstraintsInterface* constraints) {
+ PeerConnectionInterface::IceServer server;
+ PeerConnectionInterface::IceServers servers;
+ server.uri = uri;
+ server.password = password;
+ servers.push_back(server);
+
+ port_allocator_factory_ = FakePortAllocatorFactory::Create();
+ pc_ = pc_factory_->CreatePeerConnection(servers, constraints,
+ port_allocator_factory_.get(),
+ NULL,
+ &observer_);
+ ASSERT_TRUE(pc_.get() != NULL);
+ observer_.SetPeerConnectionInterface(pc_.get());
+ EXPECT_EQ(PeerConnectionInterface::kStable, observer_.state_);
+ }
+
+ void CreatePeerConnectionWithDifferentConfigurations() {
+ CreatePeerConnection(kStunAddressOnly, "", NULL);
+ EXPECT_EQ(1u, port_allocator_factory_->stun_configs().size());
+ EXPECT_EQ(0u, port_allocator_factory_->turn_configs().size());
+ EXPECT_EQ("address",
+ port_allocator_factory_->stun_configs()[0].server.hostname());
+ EXPECT_EQ(kDefaultStunPort,
+ port_allocator_factory_->stun_configs()[0].server.port());
+
+ CreatePeerConnection(kStunInvalidPort, "", NULL);
+ EXPECT_EQ(0u, port_allocator_factory_->stun_configs().size());
+ EXPECT_EQ(0u, port_allocator_factory_->turn_configs().size());
+
+ CreatePeerConnection(kStunAddressPortAndMore1, "", NULL);
+ EXPECT_EQ(0u, port_allocator_factory_->stun_configs().size());
+ EXPECT_EQ(0u, port_allocator_factory_->turn_configs().size());
+
+ CreatePeerConnection(kStunAddressPortAndMore2, "", NULL);
+ EXPECT_EQ(0u, port_allocator_factory_->stun_configs().size());
+ EXPECT_EQ(0u, port_allocator_factory_->turn_configs().size());
+
+ CreatePeerConnection(kTurnIceServerUri, kTurnPassword, NULL);
+ EXPECT_EQ(1u, port_allocator_factory_->stun_configs().size());
+ EXPECT_EQ(1u, port_allocator_factory_->turn_configs().size());
+ EXPECT_EQ(kTurnUsername,
+ port_allocator_factory_->turn_configs()[0].username);
+ EXPECT_EQ(kTurnPassword,
+ port_allocator_factory_->turn_configs()[0].password);
+ EXPECT_EQ(kTurnHostname,
+ port_allocator_factory_->turn_configs()[0].server.hostname());
+ EXPECT_EQ(kTurnHostname,
+ port_allocator_factory_->stun_configs()[0].server.hostname());
+ }
+
+ void ReleasePeerConnection() {
+ pc_ = NULL;
+ observer_.SetPeerConnectionInterface(NULL);
+ }
+
+ void AddStream(const std::string& label) {
+ // Create a local stream.
+ scoped_refptr<MediaStreamInterface> stream(
+ pc_factory_->CreateLocalMediaStream(label));
+ scoped_refptr<VideoSourceInterface> video_source(
+ pc_factory_->CreateVideoSource(new cricket::FakeVideoCapturer(), NULL));
+ scoped_refptr<VideoTrackInterface> video_track(
+ pc_factory_->CreateVideoTrack(label + "v0", video_source));
+ stream->AddTrack(video_track.get());
+ EXPECT_TRUE(pc_->AddStream(stream, NULL));
+ EXPECT_TRUE_WAIT(observer_.renegotiation_needed_, kTimeout);
+ observer_.renegotiation_needed_ = false;
+ }
+
+ void AddVoiceStream(const std::string& label) {
+ // Create a local stream.
+ scoped_refptr<MediaStreamInterface> stream(
+ pc_factory_->CreateLocalMediaStream(label));
+ scoped_refptr<AudioTrackInterface> audio_track(
+ pc_factory_->CreateAudioTrack(label + "a0", NULL));
+ stream->AddTrack(audio_track.get());
+ EXPECT_TRUE(pc_->AddStream(stream, NULL));
+ EXPECT_TRUE_WAIT(observer_.renegotiation_needed_, kTimeout);
+ observer_.renegotiation_needed_ = false;
+ }
+
+ void AddAudioVideoStream(const std::string& stream_label,
+ const std::string& audio_track_label,
+ const std::string& video_track_label) {
+ // Create a local stream.
+ scoped_refptr<MediaStreamInterface> stream(
+ pc_factory_->CreateLocalMediaStream(stream_label));
+ scoped_refptr<AudioTrackInterface> audio_track(
+ pc_factory_->CreateAudioTrack(
+ audio_track_label, static_cast<AudioSourceInterface*>(NULL)));
+ stream->AddTrack(audio_track.get());
+ scoped_refptr<VideoTrackInterface> video_track(
+ pc_factory_->CreateVideoTrack(video_track_label, NULL));
+ stream->AddTrack(video_track.get());
+ EXPECT_TRUE(pc_->AddStream(stream, NULL));
+ EXPECT_TRUE_WAIT(observer_.renegotiation_needed_, kTimeout);
+ observer_.renegotiation_needed_ = false;
+ }
+
+ bool DoCreateOfferAnswer(SessionDescriptionInterface** desc, bool offer) {
+ talk_base::scoped_refptr<MockCreateSessionDescriptionObserver>
+ observer(new talk_base::RefCountedObject<
+ MockCreateSessionDescriptionObserver>());
+ if (offer) {
+ pc_->CreateOffer(observer, NULL);
+ } else {
+ pc_->CreateAnswer(observer, NULL);
+ }
+ EXPECT_EQ_WAIT(true, observer->called(), kTimeout);
+ *desc = observer->release_desc();
+ return observer->result();
+ }
+
+ bool DoCreateOffer(SessionDescriptionInterface** desc) {
+ return DoCreateOfferAnswer(desc, true);
+ }
+
+ bool DoCreateAnswer(SessionDescriptionInterface** desc) {
+ return DoCreateOfferAnswer(desc, false);
+ }
+
+ bool DoSetSessionDescription(SessionDescriptionInterface* desc, bool local) {
+ talk_base::scoped_refptr<MockSetSessionDescriptionObserver>
+ observer(new talk_base::RefCountedObject<
+ MockSetSessionDescriptionObserver>());
+ if (local) {
+ pc_->SetLocalDescription(observer, desc);
+ } else {
+ pc_->SetRemoteDescription(observer, desc);
+ }
+ EXPECT_EQ_WAIT(true, observer->called(), kTimeout);
+ return observer->result();
+ }
+
+ bool DoSetLocalDescription(SessionDescriptionInterface* desc) {
+ return DoSetSessionDescription(desc, true);
+ }
+
+ bool DoSetRemoteDescription(SessionDescriptionInterface* desc) {
+ return DoSetSessionDescription(desc, false);
+ }
+
+ // Calls PeerConnection::GetStats and check the return value.
+ // It does not verify the values in the StatReports since a RTCP packet might
+ // be required.
+ bool DoGetStats(MediaStreamTrackInterface* track) {
+ talk_base::scoped_refptr<MockStatsObserver> observer(
+ new talk_base::RefCountedObject<MockStatsObserver>());
+ if (!pc_->GetStats(observer, track))
+ return false;
+ EXPECT_TRUE_WAIT(observer->called(), kTimeout);
+ return observer->called();
+ }
+
+ void InitiateCall() {
+ CreatePeerConnection();
+ // Create a local stream with audio&video tracks.
+ AddAudioVideoStream(kStreamLabel1, "audio_label", "video_label");
+ CreateOfferReceiveAnswer();
+ }
+
+ // Verify that RTP Header extensions has been negotiated for audio and video.
+ void VerifyRemoteRtpHeaderExtensions() {
+ const cricket::MediaContentDescription* desc =
+ cricket::GetFirstAudioContentDescription(
+ pc_->remote_description()->description());
+ ASSERT_TRUE(desc != NULL);
+ EXPECT_GT(desc->rtp_header_extensions().size(), 0u);
+
+ desc = cricket::GetFirstVideoContentDescription(
+ pc_->remote_description()->description());
+ ASSERT_TRUE(desc != NULL);
+ EXPECT_GT(desc->rtp_header_extensions().size(), 0u);
+ }
+
+ void CreateOfferAsRemoteDescription() {
+ talk_base::scoped_ptr<SessionDescriptionInterface> offer;
+ EXPECT_TRUE(DoCreateOffer(offer.use()));
+ std::string sdp;
+ EXPECT_TRUE(offer->ToString(&sdp));
+ SessionDescriptionInterface* remote_offer =
+ webrtc::CreateSessionDescription(SessionDescriptionInterface::kOffer,
+ sdp, NULL);
+ EXPECT_TRUE(DoSetRemoteDescription(remote_offer));
+ EXPECT_EQ(PeerConnectionInterface::kHaveRemoteOffer, observer_.state_);
+ }
+
+ void CreateAnswerAsLocalDescription() {
+ scoped_ptr<SessionDescriptionInterface> answer;
+ EXPECT_TRUE(DoCreateAnswer(answer.use()));
+
+ // TODO(perkj): Currently SetLocalDescription fails if any parameters in an
+ // audio codec change, even if the parameter has nothing to do with
+ // receiving. Not all parameters are serialized to SDP.
+ // Since CreatePrAnswerAsLocalDescription serialize/deserialize
+ // the SessionDescription, it is necessary to do that here to in order to
+ // get ReceiveOfferCreatePrAnswerAndAnswer and RenegotiateAudioOnly to pass.
+ // https://code.google.com/p/webrtc/issues/detail?id=1356
+ std::string sdp;
+ EXPECT_TRUE(answer->ToString(&sdp));
+ SessionDescriptionInterface* new_answer =
+ webrtc::CreateSessionDescription(SessionDescriptionInterface::kAnswer,
+ sdp, NULL);
+ EXPECT_TRUE(DoSetLocalDescription(new_answer));
+ EXPECT_EQ(PeerConnectionInterface::kStable, observer_.state_);
+ }
+
+ void CreatePrAnswerAsLocalDescription() {
+ scoped_ptr<SessionDescriptionInterface> answer;
+ EXPECT_TRUE(DoCreateAnswer(answer.use()));
+
+ std::string sdp;
+ EXPECT_TRUE(answer->ToString(&sdp));
+ SessionDescriptionInterface* pr_answer =
+ webrtc::CreateSessionDescription(SessionDescriptionInterface::kPrAnswer,
+ sdp, NULL);
+ EXPECT_TRUE(DoSetLocalDescription(pr_answer));
+ EXPECT_EQ(PeerConnectionInterface::kHaveLocalPrAnswer, observer_.state_);
+ }
+
+ void CreateOfferReceiveAnswer() {
+ CreateOfferAsLocalDescription();
+ std::string sdp;
+ EXPECT_TRUE(pc_->local_description()->ToString(&sdp));
+ CreateAnswerAsRemoteDescription(sdp);
+ }
+
+ void CreateOfferAsLocalDescription() {
+ talk_base::scoped_ptr<SessionDescriptionInterface> offer;
+ ASSERT_TRUE(DoCreateOffer(offer.use()));
+ // TODO(perkj): Currently SetLocalDescription fails if any parameters in an
+ // audio codec change, even if the parameter has nothing to do with
+ // receiving. Not all parameters are serialized to SDP.
+ // Since CreatePrAnswerAsLocalDescription serialize/deserialize
+ // the SessionDescription, it is necessary to do that here to in order to
+ // get ReceiveOfferCreatePrAnswerAndAnswer and RenegotiateAudioOnly to pass.
+ // https://code.google.com/p/webrtc/issues/detail?id=1356
+ std::string sdp;
+ EXPECT_TRUE(offer->ToString(&sdp));
+ SessionDescriptionInterface* new_offer =
+ webrtc::CreateSessionDescription(
+ SessionDescriptionInterface::kOffer,
+ sdp, NULL);
+
+ EXPECT_TRUE(DoSetLocalDescription(new_offer));
+ EXPECT_EQ(PeerConnectionInterface::kHaveLocalOffer, observer_.state_);
+ }
+
+ void CreateAnswerAsRemoteDescription(const std::string& offer) {
+ webrtc::JsepSessionDescription* answer = new webrtc::JsepSessionDescription(
+ SessionDescriptionInterface::kAnswer);
+ EXPECT_TRUE(answer->Initialize(offer, NULL));
+ EXPECT_TRUE(DoSetRemoteDescription(answer));
+ EXPECT_EQ(PeerConnectionInterface::kStable, observer_.state_);
+ }
+
+ void CreatePrAnswerAndAnswerAsRemoteDescription(const std::string& offer) {
+ webrtc::JsepSessionDescription* pr_answer =
+ new webrtc::JsepSessionDescription(
+ SessionDescriptionInterface::kPrAnswer);
+ EXPECT_TRUE(pr_answer->Initialize(offer, NULL));
+ EXPECT_TRUE(DoSetRemoteDescription(pr_answer));
+ EXPECT_EQ(PeerConnectionInterface::kHaveRemotePrAnswer, observer_.state_);
+ webrtc::JsepSessionDescription* answer =
+ new webrtc::JsepSessionDescription(
+ SessionDescriptionInterface::kAnswer);
+ EXPECT_TRUE(answer->Initialize(offer, NULL));
+ EXPECT_TRUE(DoSetRemoteDescription(answer));
+ EXPECT_EQ(PeerConnectionInterface::kStable, observer_.state_);
+ }
+
+ // Help function used for waiting until a the last signaled remote stream has
+ // the same label as |stream_label|. In a few of the tests in this file we
+ // answer with the same session description as we offer and thus we can
+ // check if OnAddStream have been called with the same stream as we offer to
+ // send.
+ void WaitAndVerifyOnAddStream(const std::string& stream_label) {
+ EXPECT_EQ_WAIT(stream_label, observer_.GetLastAddedStreamLabel(), kTimeout);
+ }
+
+ // Creates an offer and applies it as a local session description.
+ // Creates an answer with the same SDP an the offer but removes all lines
+ // that start with a:ssrc"
+ void CreateOfferReceiveAnswerWithoutSsrc() {
+ CreateOfferAsLocalDescription();
+ std::string sdp;
+ EXPECT_TRUE(pc_->local_description()->ToString(&sdp));
+ SetSsrcToZero(&sdp);
+ CreateAnswerAsRemoteDescription(sdp);
+ }
+
+ scoped_refptr<FakePortAllocatorFactory> port_allocator_factory_;
+ scoped_refptr<webrtc::PeerConnectionFactoryInterface> pc_factory_;
+ scoped_refptr<PeerConnectionInterface> pc_;
+ MockPeerConnectionObserver observer_;
+};
+
+TEST_F(PeerConnectionInterfaceTest,
+ CreatePeerConnectionWithDifferentConfigurations) {
+ CreatePeerConnectionWithDifferentConfigurations();
+}
+
+TEST_F(PeerConnectionInterfaceTest, AddStreams) {
+ CreatePeerConnection();
+ AddStream(kStreamLabel1);
+ AddVoiceStream(kStreamLabel2);
+ ASSERT_EQ(2u, pc_->local_streams()->count());
+
+ // Fail to add another stream with audio since we already have an audio track.
+ scoped_refptr<MediaStreamInterface> stream(
+ pc_factory_->CreateLocalMediaStream(kStreamLabel3));
+ scoped_refptr<AudioTrackInterface> audio_track(
+ pc_factory_->CreateAudioTrack(
+ kStreamLabel3, static_cast<AudioSourceInterface*>(NULL)));
+ stream->AddTrack(audio_track.get());
+ EXPECT_FALSE(pc_->AddStream(stream, NULL));
+
+ // Remove the stream with the audio track.
+ pc_->RemoveStream(pc_->local_streams()->at(1));
+
+ // Test that we now can add the stream with the audio track.
+ EXPECT_TRUE(pc_->AddStream(stream, NULL));
+}
+
+TEST_F(PeerConnectionInterfaceTest, RemoveStream) {
+ CreatePeerConnection();
+ AddStream(kStreamLabel1);
+ ASSERT_EQ(1u, pc_->local_streams()->count());
+ pc_->RemoveStream(pc_->local_streams()->at(0));
+ EXPECT_EQ(0u, pc_->local_streams()->count());
+}
+
+TEST_F(PeerConnectionInterfaceTest, CreateOfferReceiveAnswer) {
+ InitiateCall();
+ WaitAndVerifyOnAddStream(kStreamLabel1);
+ VerifyRemoteRtpHeaderExtensions();
+}
+
+TEST_F(PeerConnectionInterfaceTest, CreateOfferReceivePrAnswerAndAnswer) {
+ CreatePeerConnection();
+ AddStream(kStreamLabel1);
+ CreateOfferAsLocalDescription();
+ std::string offer;
+ EXPECT_TRUE(pc_->local_description()->ToString(&offer));
+ CreatePrAnswerAndAnswerAsRemoteDescription(offer);
+ WaitAndVerifyOnAddStream(kStreamLabel1);
+}
+
+TEST_F(PeerConnectionInterfaceTest, ReceiveOfferCreateAnswer) {
+ CreatePeerConnection();
+ AddStream(kStreamLabel1);
+
+ CreateOfferAsRemoteDescription();
+ CreateAnswerAsLocalDescription();
+
+ WaitAndVerifyOnAddStream(kStreamLabel1);
+}
+
+TEST_F(PeerConnectionInterfaceTest, ReceiveOfferCreatePrAnswerAndAnswer) {
+ CreatePeerConnection();
+ AddStream(kStreamLabel1);
+
+ CreateOfferAsRemoteDescription();
+ CreatePrAnswerAsLocalDescription();
+ CreateAnswerAsLocalDescription();
+
+ WaitAndVerifyOnAddStream(kStreamLabel1);
+}
+
+TEST_F(PeerConnectionInterfaceTest, Renegotiate) {
+ InitiateCall();
+ ASSERT_EQ(1u, pc_->remote_streams()->count());
+ pc_->RemoveStream(pc_->local_streams()->at(0));
+ CreateOfferReceiveAnswer();
+ EXPECT_EQ(0u, pc_->remote_streams()->count());
+ AddStream(kStreamLabel1);
+ CreateOfferReceiveAnswer();
+}
+
+// Tests that after negotiating an audio only call, the respondent can perform a
+// renegotiation that removes the audio stream.
+TEST_F(PeerConnectionInterfaceTest, RenegotiateAudioOnly) {
+ CreatePeerConnection();
+ AddVoiceStream(kStreamLabel1);
+ CreateOfferAsRemoteDescription();
+ CreateAnswerAsLocalDescription();
+
+ ASSERT_EQ(1u, pc_->remote_streams()->count());
+ pc_->RemoveStream(pc_->local_streams()->at(0));
+ CreateOfferReceiveAnswer();
+ EXPECT_EQ(0u, pc_->remote_streams()->count());
+}
+
+// Test that candidates are generated and that we can parse our own candidates.
+TEST_F(PeerConnectionInterfaceTest, IceCandidates) {
+ CreatePeerConnection();
+
+ EXPECT_FALSE(pc_->AddIceCandidate(observer_.last_candidate_.get()));
+ // SetRemoteDescription takes ownership of offer.
+ SessionDescriptionInterface* offer = NULL;
+ AddStream(kStreamLabel1);
+ EXPECT_TRUE(DoCreateOffer(&offer));
+ EXPECT_TRUE(DoSetRemoteDescription(offer));
+
+ // SetLocalDescription takes ownership of answer.
+ SessionDescriptionInterface* answer = NULL;
+ EXPECT_TRUE(DoCreateAnswer(&answer));
+ EXPECT_TRUE(DoSetLocalDescription(answer));
+
+ EXPECT_TRUE_WAIT(observer_.last_candidate_.get() != NULL, kTimeout);
+ EXPECT_TRUE_WAIT(observer_.ice_complete_, kTimeout);
+
+ EXPECT_TRUE(pc_->AddIceCandidate(observer_.last_candidate_.get()));
+}
+
+// Test that the CreateOffer and CreatAnswer will fail if the track labels are
+// not unique.
+TEST_F(PeerConnectionInterfaceTest, CreateOfferAnswerWithInvalidStream) {
+ CreatePeerConnection();
+ // Create a regular offer for the CreateAnswer test later.
+ SessionDescriptionInterface* offer = NULL;
+ EXPECT_TRUE(DoCreateOffer(&offer));
+ EXPECT_TRUE(offer != NULL);
+ delete offer;
+ offer = NULL;
+
+ // Create a local stream with audio&video tracks having same label.
+ AddAudioVideoStream(kStreamLabel1, "track_label", "track_label");
+
+ // Test CreateOffer
+ EXPECT_FALSE(DoCreateOffer(&offer));
+
+ // Test CreateAnswer
+ SessionDescriptionInterface* answer = NULL;
+ EXPECT_FALSE(DoCreateAnswer(&answer));
+}
+
+// Test that we will get different SSRCs for each tracks in the offer and answer
+// we created.
+TEST_F(PeerConnectionInterfaceTest, SsrcInOfferAnswer) {
+ CreatePeerConnection();
+ // Create a local stream with audio&video tracks having different labels.
+ AddAudioVideoStream(kStreamLabel1, "audio_label", "video_label");
+
+ // Test CreateOffer
+ scoped_ptr<SessionDescriptionInterface> offer;
+ EXPECT_TRUE(DoCreateOffer(offer.use()));
+ int audio_ssrc = 0;
+ int video_ssrc = 0;
+ EXPECT_TRUE(GetFirstSsrc(GetFirstAudioContent(offer->description()),
+ &audio_ssrc));
+ EXPECT_TRUE(GetFirstSsrc(GetFirstVideoContent(offer->description()),
+ &video_ssrc));
+ EXPECT_NE(audio_ssrc, video_ssrc);
+
+ // Test CreateAnswer
+ EXPECT_TRUE(DoSetRemoteDescription(offer.release()));
+ scoped_ptr<SessionDescriptionInterface> answer;
+ EXPECT_TRUE(DoCreateAnswer(answer.use()));
+ audio_ssrc = 0;
+ video_ssrc = 0;
+ EXPECT_TRUE(GetFirstSsrc(GetFirstAudioContent(answer->description()),
+ &audio_ssrc));
+ EXPECT_TRUE(GetFirstSsrc(GetFirstVideoContent(answer->description()),
+ &video_ssrc));
+ EXPECT_NE(audio_ssrc, video_ssrc);
+}
+
+// Test that we can specify a certain track that we want statistics about.
+TEST_F(PeerConnectionInterfaceTest, GetStatsForSpecificTrack) {
+ InitiateCall();
+ ASSERT_LT(0u, pc_->remote_streams()->count());
+ ASSERT_LT(0u, pc_->remote_streams()->at(0)->GetAudioTracks().size());
+ scoped_refptr<MediaStreamTrackInterface> remote_audio =
+ pc_->remote_streams()->at(0)->GetAudioTracks()[0];
+ EXPECT_TRUE(DoGetStats(remote_audio));
+
+ // Remove the stream. Since we are sending to our selves the local
+ // and the remote stream is the same.
+ pc_->RemoveStream(pc_->local_streams()->at(0));
+ // Do a re-negotiation.
+ CreateOfferReceiveAnswer();
+
+ ASSERT_EQ(0u, pc_->remote_streams()->count());
+
+ // Test that we still can get statistics for the old track. Even if it is not
+ // sent any longer.
+ EXPECT_TRUE(DoGetStats(remote_audio));
+}
+
+// Test that we can get stats on a video track.
+TEST_F(PeerConnectionInterfaceTest, GetStatsForVideoTrack) {
+ InitiateCall();
+ ASSERT_LT(0u, pc_->remote_streams()->count());
+ ASSERT_LT(0u, pc_->remote_streams()->at(0)->GetVideoTracks().size());
+ scoped_refptr<MediaStreamTrackInterface> remote_video =
+ pc_->remote_streams()->at(0)->GetVideoTracks()[0];
+ EXPECT_TRUE(DoGetStats(remote_video));
+}
+
+// Test that we don't get statistics for an invalid track.
+TEST_F(PeerConnectionInterfaceTest, GetStatsForInvalidTrack) {
+ InitiateCall();
+ scoped_refptr<AudioTrackInterface> unknown_audio_track(
+ pc_factory_->CreateAudioTrack("unknown track", NULL));
+ EXPECT_FALSE(DoGetStats(unknown_audio_track));
+}
+
+// This test setup two RTP data channels in loop back.
+#ifdef WIN32
+// TODO(perkj): Investigate why the transport channel sometimes don't become
+// writable on Windows when we try to connect in loop back.
+TEST_F(PeerConnectionInterfaceTest, DISABLED_TestDataChannel) {
+#else
+TEST_F(PeerConnectionInterfaceTest, TestDataChannel) {
+#endif
+ FakeConstraints constraints;
+ constraints.SetAllowRtpDataChannels();
+ CreatePeerConnection(&constraints);
+ scoped_refptr<DataChannelInterface> data1 =
+ pc_->CreateDataChannel("test1", NULL);
+ scoped_refptr<DataChannelInterface> data2 =
+ pc_->CreateDataChannel("test2", NULL);
+ ASSERT_TRUE(data1 != NULL);
+ talk_base::scoped_ptr<MockDataChannelObserver> observer1(
+ new MockDataChannelObserver(data1));
+ talk_base::scoped_ptr<MockDataChannelObserver> observer2(
+ new MockDataChannelObserver(data2));
+
+ EXPECT_EQ(DataChannelInterface::kConnecting, data1->state());
+ EXPECT_EQ(DataChannelInterface::kConnecting, data2->state());
+ std::string data_to_send1 = "testing testing";
+ std::string data_to_send2 = "testing something else";
+ EXPECT_FALSE(data1->Send(DataBuffer(data_to_send1)));
+
+ CreateOfferReceiveAnswer();
+ EXPECT_TRUE_WAIT(observer1->IsOpen(), kTimeout);
+ EXPECT_TRUE_WAIT(observer2->IsOpen(), kTimeout);
+
+ EXPECT_EQ(DataChannelInterface::kOpen, data1->state());
+ EXPECT_EQ(DataChannelInterface::kOpen, data2->state());
+ EXPECT_TRUE(data1->Send(DataBuffer(data_to_send1)));
+ EXPECT_TRUE(data2->Send(DataBuffer(data_to_send2)));
+
+ EXPECT_EQ_WAIT(data_to_send1, observer1->last_message(), kTimeout);
+ EXPECT_EQ_WAIT(data_to_send2, observer2->last_message(), kTimeout);
+
+ data1->Close();
+ EXPECT_EQ(DataChannelInterface::kClosing, data1->state());
+ CreateOfferReceiveAnswer();
+ EXPECT_FALSE(observer1->IsOpen());
+ EXPECT_EQ(DataChannelInterface::kClosed, data1->state());
+ EXPECT_TRUE(observer2->IsOpen());
+
+ data_to_send2 = "testing something else again";
+ EXPECT_TRUE(data2->Send(DataBuffer(data_to_send2)));
+
+ EXPECT_EQ_WAIT(data_to_send2, observer2->last_message(), kTimeout);
+}
+
+// This test verifies that sendnig binary data over RTP data channels should
+// fail.
+#ifdef WIN32
+// TODO(perkj): Investigate why the transport channel sometimes don't become
+// writable on Windows when we try to connect in loop back.
+TEST_F(PeerConnectionInterfaceTest, DISABLED_TestSendBinaryOnRtpDataChannel) {
+#else
+TEST_F(PeerConnectionInterfaceTest, TestSendBinaryOnRtpDataChannel) {
+#endif
+ FakeConstraints constraints;
+ constraints.SetAllowRtpDataChannels();
+ CreatePeerConnection(&constraints);
+ scoped_refptr<DataChannelInterface> data1 =
+ pc_->CreateDataChannel("test1", NULL);
+ scoped_refptr<DataChannelInterface> data2 =
+ pc_->CreateDataChannel("test2", NULL);
+ ASSERT_TRUE(data1 != NULL);
+ talk_base::scoped_ptr<MockDataChannelObserver> observer1(
+ new MockDataChannelObserver(data1));
+ talk_base::scoped_ptr<MockDataChannelObserver> observer2(
+ new MockDataChannelObserver(data2));
+
+ EXPECT_EQ(DataChannelInterface::kConnecting, data1->state());
+ EXPECT_EQ(DataChannelInterface::kConnecting, data2->state());
+
+ CreateOfferReceiveAnswer();
+ EXPECT_TRUE_WAIT(observer1->IsOpen(), kTimeout);
+ EXPECT_TRUE_WAIT(observer2->IsOpen(), kTimeout);
+
+ EXPECT_EQ(DataChannelInterface::kOpen, data1->state());
+ EXPECT_EQ(DataChannelInterface::kOpen, data2->state());
+
+ talk_base::Buffer buffer("test", 4);
+ EXPECT_FALSE(data1->Send(DataBuffer(buffer, true)));
+}
+
+// This test setup a RTP data channels in loop back and test that a channel is
+// opened even if the remote end answer with a zero SSRC.
+#ifdef WIN32
+// TODO(perkj): Investigate why the transport channel sometimes don't become
+// writable on Windows when we try to connect in loop back.
+TEST_F(PeerConnectionInterfaceTest, DISABLED_TestSendOnlyDataChannel) {
+#else
+TEST_F(PeerConnectionInterfaceTest, TestSendOnlyDataChannel) {
+#endif
+ FakeConstraints constraints;
+ constraints.SetAllowRtpDataChannels();
+ CreatePeerConnection(&constraints);
+ scoped_refptr<DataChannelInterface> data1 =
+ pc_->CreateDataChannel("test1", NULL);
+ talk_base::scoped_ptr<MockDataChannelObserver> observer1(
+ new MockDataChannelObserver(data1));
+
+ CreateOfferReceiveAnswerWithoutSsrc();
+
+ EXPECT_TRUE_WAIT(observer1->IsOpen(), kTimeout);
+
+ data1->Close();
+ EXPECT_EQ(DataChannelInterface::kClosing, data1->state());
+ CreateOfferReceiveAnswerWithoutSsrc();
+ EXPECT_EQ(DataChannelInterface::kClosed, data1->state());
+ EXPECT_FALSE(observer1->IsOpen());
+}
+
+// This test that if a data channel is added in an answer a receive only channel
+// channel is created.
+TEST_F(PeerConnectionInterfaceTest, TestReceiveOnlyDataChannel) {
+ FakeConstraints constraints;
+ constraints.SetAllowRtpDataChannels();
+ CreatePeerConnection(&constraints);
+
+ std::string offer_label = "offer_channel";
+ scoped_refptr<DataChannelInterface> offer_channel =
+ pc_->CreateDataChannel(offer_label, NULL);
+
+ CreateOfferAsLocalDescription();
+
+ // Replace the data channel label in the offer and apply it as an answer.
+ std::string receive_label = "answer_channel";
+ std::string sdp;
+ EXPECT_TRUE(pc_->local_description()->ToString(&sdp));
+ talk_base::replace_substrs(offer_label.c_str(), offer_label.length(),
+ receive_label.c_str(), receive_label.length(),
+ &sdp);
+ CreateAnswerAsRemoteDescription(sdp);
+
+ // Verify that a new incoming data channel has been created and that
+ // it is open but can't we written to.
+ ASSERT_TRUE(observer_.last_datachannel_ != NULL);
+ DataChannelInterface* received_channel = observer_.last_datachannel_;
+ EXPECT_EQ(DataChannelInterface::kConnecting, received_channel->state());
+ EXPECT_EQ(receive_label, received_channel->label());
+ EXPECT_FALSE(received_channel->Send(DataBuffer("something")));
+
+ // Verify that the channel we initially offered has been rejected.
+ EXPECT_EQ(DataChannelInterface::kClosed, offer_channel->state());
+
+ // Do another offer / answer exchange and verify that the data channel is
+ // opened.
+ CreateOfferReceiveAnswer();
+ EXPECT_EQ_WAIT(DataChannelInterface::kOpen, received_channel->state(),
+ kTimeout);
+}
+
+// This test that no data channel is returned if a reliable channel is
+// requested.
+// TODO(perkj): Remove this test once reliable channels are implemented.
+TEST_F(PeerConnectionInterfaceTest, CreateReliableRtpDataChannelShouldFail) {
+ FakeConstraints constraints;
+ constraints.SetAllowRtpDataChannels();
+ CreatePeerConnection(&constraints);
+
+ std::string label = "test";
+ webrtc::DataChannelInit config;
+ config.reliable = true;
+ scoped_refptr<DataChannelInterface> channel =
+ pc_->CreateDataChannel(label, &config);
+ EXPECT_TRUE(channel == NULL);
+}
+
+// This tests that a SCTP data channel is returned using different
+// DataChannelInit configurations.
+TEST_F(PeerConnectionInterfaceTest, CreateSctpDataChannel) {
+ FakeConstraints constraints;
+ constraints.SetAllowDtlsSctpDataChannels();
+ CreatePeerConnection(&constraints);
+
+ webrtc::DataChannelInit config;
+
+ scoped_refptr<DataChannelInterface> channel =
+ pc_->CreateDataChannel("1", &config);
+ EXPECT_TRUE(channel != NULL);
+ EXPECT_TRUE(channel->reliable());
+
+ config.ordered = false;
+ channel = pc_->CreateDataChannel("2", &config);
+ EXPECT_TRUE(channel != NULL);
+ EXPECT_TRUE(channel->reliable());
+
+ config.ordered = true;
+ config.maxRetransmits = 0;
+ channel = pc_->CreateDataChannel("3", &config);
+ EXPECT_TRUE(channel != NULL);
+ EXPECT_FALSE(channel->reliable());
+
+ config.maxRetransmits = -1;
+ config.maxRetransmitTime = 0;
+ channel = pc_->CreateDataChannel("4", &config);
+ EXPECT_TRUE(channel != NULL);
+ EXPECT_FALSE(channel->reliable());
+}
+
+// This tests that no data channel is returned if both maxRetransmits and
+// maxRetransmitTime are set for SCTP data channels.
+TEST_F(PeerConnectionInterfaceTest,
+ CreateSctpDataChannelShouldFailForInvalidConfig) {
+ FakeConstraints constraints;
+ constraints.SetAllowDtlsSctpDataChannels();
+ CreatePeerConnection(&constraints);
+
+ std::string label = "test";
+ webrtc::DataChannelInit config;
+ config.maxRetransmits = 0;
+ config.maxRetransmitTime = 0;
+
+ scoped_refptr<DataChannelInterface> channel =
+ pc_->CreateDataChannel(label, &config);
+ EXPECT_TRUE(channel == NULL);
+}
+
+// The test verifies that the first id not used by existing data channels is
+// assigned to a new data channel if no id is specified.
+TEST_F(PeerConnectionInterfaceTest, AssignSctpDataChannelId) {
+ FakeConstraints constraints;
+ constraints.SetAllowDtlsSctpDataChannels();
+ CreatePeerConnection(&constraints);
+
+ webrtc::DataChannelInit config;
+
+ scoped_refptr<DataChannelInterface> channel =
+ pc_->CreateDataChannel("1", &config);
+ EXPECT_TRUE(channel != NULL);
+ EXPECT_EQ(1, channel->id());
+
+ config.id = 4;
+ channel = pc_->CreateDataChannel("4", &config);
+ EXPECT_TRUE(channel != NULL);
+ EXPECT_EQ(config.id, channel->id());
+
+ config.id = -1;
+ channel = pc_->CreateDataChannel("2", &config);
+ EXPECT_TRUE(channel != NULL);
+ EXPECT_EQ(2, channel->id());
+}
+
+// The test verifies that creating a SCTP data channel with an id already in use
+// or out of range should fail.
+TEST_F(PeerConnectionInterfaceTest,
+ CreateSctpDataChannelWithInvalidIdShouldFail) {
+ FakeConstraints constraints;
+ constraints.SetAllowDtlsSctpDataChannels();
+ CreatePeerConnection(&constraints);
+
+ webrtc::DataChannelInit config;
+
+ scoped_refptr<DataChannelInterface> channel =
+ pc_->CreateDataChannel("1", &config);
+ EXPECT_TRUE(channel != NULL);
+ EXPECT_EQ(1, channel->id());
+
+ config.id = 1;
+ channel = pc_->CreateDataChannel("x", &config);
+ EXPECT_TRUE(channel == NULL);
+
+ config.id = cricket::kMaxSctpSid;
+ channel = pc_->CreateDataChannel("max", &config);
+ EXPECT_TRUE(channel != NULL);
+ EXPECT_EQ(config.id, channel->id());
+
+ config.id = cricket::kMaxSctpSid + 1;
+ channel = pc_->CreateDataChannel("x", &config);
+ EXPECT_TRUE(channel == NULL);
+}
+
+// This test that a data channel closes when a PeerConnection is deleted/closed.
+#ifdef WIN32
+// TODO(perkj): Investigate why the transport channel sometimes don't become
+// writable on Windows when we try to connect in loop back.
+TEST_F(PeerConnectionInterfaceTest,
+ DISABLED_DataChannelCloseWhenPeerConnectionClose) {
+#else
+TEST_F(PeerConnectionInterfaceTest, DataChannelCloseWhenPeerConnectionClose) {
+#endif
+ FakeConstraints constraints;
+ constraints.SetAllowRtpDataChannels();
+ CreatePeerConnection(&constraints);
+
+ scoped_refptr<DataChannelInterface> data1 =
+ pc_->CreateDataChannel("test1", NULL);
+ scoped_refptr<DataChannelInterface> data2 =
+ pc_->CreateDataChannel("test2", NULL);
+ ASSERT_TRUE(data1 != NULL);
+ talk_base::scoped_ptr<MockDataChannelObserver> observer1(
+ new MockDataChannelObserver(data1));
+ talk_base::scoped_ptr<MockDataChannelObserver> observer2(
+ new MockDataChannelObserver(data2));
+
+ CreateOfferReceiveAnswer();
+ EXPECT_TRUE_WAIT(observer1->IsOpen(), kTimeout);
+ EXPECT_TRUE_WAIT(observer2->IsOpen(), kTimeout);
+
+ ReleasePeerConnection();
+ EXPECT_EQ(DataChannelInterface::kClosed, data1->state());
+ EXPECT_EQ(DataChannelInterface::kClosed, data2->state());
+}
+
+// This test that data channels can be rejected in an answer.
+TEST_F(PeerConnectionInterfaceTest, TestRejectDataChannelInAnswer) {
+ FakeConstraints constraints;
+ constraints.SetAllowRtpDataChannels();
+ CreatePeerConnection(&constraints);
+
+ scoped_refptr<DataChannelInterface> offer_channel(
+ pc_->CreateDataChannel("offer_channel", NULL));
+
+ CreateOfferAsLocalDescription();
+
+ // Create an answer where the m-line for data channels are rejected.
+ std::string sdp;
+ EXPECT_TRUE(pc_->local_description()->ToString(&sdp));
+ webrtc::JsepSessionDescription* answer = new webrtc::JsepSessionDescription(
+ SessionDescriptionInterface::kAnswer);
+ EXPECT_TRUE(answer->Initialize(sdp, NULL));
+ cricket::ContentInfo* data_info =
+ answer->description()->GetContentByName("data");
+ data_info->rejected = true;
+
+ DoSetRemoteDescription(answer);
+ EXPECT_EQ(DataChannelInterface::kClosed, offer_channel->state());
+}
+
+// Test that we can create a session description from an SDP string from
+// FireFox, use it as a remote session description, generate an answer and use
+// the answer as a local description.
+TEST_F(PeerConnectionInterfaceTest, ReceiveFireFoxOffer) {
+ MAYBE_SKIP_TEST(talk_base::SSLStreamAdapter::HaveDtlsSrtp);
+ FakeConstraints constraints;
+ constraints.AddMandatory(webrtc::MediaConstraintsInterface::kEnableDtlsSrtp,
+ true);
+ CreatePeerConnection(&constraints);
+ AddAudioVideoStream(kStreamLabel1, "audio_label", "video_label");
+ SessionDescriptionInterface* desc =
+ webrtc::CreateSessionDescription(SessionDescriptionInterface::kOffer,
+ webrtc::kFireFoxSdpOffer);
+ EXPECT_TRUE(DoSetSessionDescription(desc, false));
+ CreateAnswerAsLocalDescription();
+ ASSERT_TRUE(pc_->local_description() != NULL);
+ ASSERT_TRUE(pc_->remote_description() != NULL);
+
+ const cricket::ContentInfo* content =
+ cricket::GetFirstAudioContent(pc_->local_description()->description());
+ ASSERT_TRUE(content != NULL);
+ EXPECT_FALSE(content->rejected);
+
+ content =
+ cricket::GetFirstVideoContent(pc_->local_description()->description());
+ ASSERT_TRUE(content != NULL);
+ EXPECT_FALSE(content->rejected);
+
+ content =
+ cricket::GetFirstDataContent(pc_->local_description()->description());
+ ASSERT_TRUE(content != NULL);
+ EXPECT_TRUE(content->rejected);
+}
+
+// Test that we can create an audio only offer and receive an answer with a
+// limited set of audio codecs and receive an updated offer with more audio
+// codecs, where the added codecs are not supported.
+TEST_F(PeerConnectionInterfaceTest, ReceiveUpdatedAudioOfferWithBadCodecs) {
+ CreatePeerConnection();
+ AddVoiceStream("audio_label");
+ CreateOfferAsLocalDescription();
+
+ SessionDescriptionInterface* answer =
+ webrtc::CreateSessionDescription(SessionDescriptionInterface::kAnswer,
+ webrtc::kAudioSdp);
+ EXPECT_TRUE(DoSetSessionDescription(answer, false));
+
+ SessionDescriptionInterface* updated_offer =
+ webrtc::CreateSessionDescription(SessionDescriptionInterface::kOffer,
+ webrtc::kAudioSdpWithUnsupportedCodecs);
+ EXPECT_TRUE(DoSetSessionDescription(updated_offer, false));
+ CreateAnswerAsLocalDescription();
+}
+
+// Test that PeerConnection::Close changes the states to closed and all remote
+// tracks change state to ended.
+TEST_F(PeerConnectionInterfaceTest, CloseAndTestStreamsAndStates) {
+ // Initialize a PeerConnection and negotiate local and remote session
+ // description.
+ InitiateCall();
+ ASSERT_EQ(1u, pc_->local_streams()->count());
+ ASSERT_EQ(1u, pc_->remote_streams()->count());
+
+ pc_->Close();
+
+ EXPECT_EQ(PeerConnectionInterface::kClosed, pc_->signaling_state());
+ EXPECT_EQ(PeerConnectionInterface::kIceConnectionClosed,
+ pc_->ice_connection_state());
+ EXPECT_EQ(PeerConnectionInterface::kIceGatheringComplete,
+ pc_->ice_gathering_state());
+
+ EXPECT_EQ(1u, pc_->local_streams()->count());
+ EXPECT_EQ(1u, pc_->remote_streams()->count());
+
+ scoped_refptr<MediaStreamInterface> remote_stream =
+ pc_->remote_streams()->at(0);
+ EXPECT_EQ(MediaStreamTrackInterface::kEnded,
+ remote_stream->GetVideoTracks()[0]->state());
+ EXPECT_EQ(MediaStreamTrackInterface::kEnded,
+ remote_stream->GetAudioTracks()[0]->state());
+}
+
+// Test that PeerConnection methods fails gracefully after
+// PeerConnection::Close has been called.
+TEST_F(PeerConnectionInterfaceTest, CloseAndTestMethods) {
+ CreatePeerConnection();
+ AddAudioVideoStream(kStreamLabel1, "audio_label", "video_label");
+ CreateOfferAsRemoteDescription();
+ CreateAnswerAsLocalDescription();
+
+ ASSERT_EQ(1u, pc_->local_streams()->count());
+ scoped_refptr<MediaStreamInterface> local_stream =
+ pc_->local_streams()->at(0);
+
+ pc_->Close();
+
+ pc_->RemoveStream(local_stream);
+ EXPECT_FALSE(pc_->AddStream(local_stream, NULL));
+
+ ASSERT_FALSE(local_stream->GetAudioTracks().empty());
+ talk_base::scoped_refptr<webrtc::DtmfSenderInterface> dtmf_sender(
+ pc_->CreateDtmfSender(local_stream->GetAudioTracks()[0]));
+ EXPECT_FALSE(dtmf_sender->CanInsertDtmf());
+
+ EXPECT_TRUE(pc_->CreateDataChannel("test", NULL) == NULL);
+
+ EXPECT_TRUE(pc_->local_description() != NULL);
+ EXPECT_TRUE(pc_->remote_description() != NULL);
+
+ talk_base::scoped_ptr<SessionDescriptionInterface> offer;
+ EXPECT_TRUE(DoCreateOffer(offer.use()));
+ talk_base::scoped_ptr<SessionDescriptionInterface> answer;
+ EXPECT_TRUE(DoCreateAnswer(answer.use()));
+
+ std::string sdp;
+ ASSERT_TRUE(pc_->remote_description()->ToString(&sdp));
+ SessionDescriptionInterface* remote_offer =
+ webrtc::CreateSessionDescription(SessionDescriptionInterface::kOffer,
+ sdp, NULL);
+ EXPECT_FALSE(DoSetRemoteDescription(remote_offer));
+
+ ASSERT_TRUE(pc_->local_description()->ToString(&sdp));
+ SessionDescriptionInterface* local_offer =
+ webrtc::CreateSessionDescription(SessionDescriptionInterface::kOffer,
+ sdp, NULL);
+ EXPECT_FALSE(DoSetLocalDescription(local_offer));
+}
+
+// Test that GetStats can still be called after PeerConnection::Close.
+TEST_F(PeerConnectionInterfaceTest, CloseAndGetStats) {
+ InitiateCall();
+ pc_->Close();
+ DoGetStats(NULL);
+}
diff --git a/talk/app/webrtc/peerconnectionproxy.h b/talk/app/webrtc/peerconnectionproxy.h
new file mode 100644
index 0000000..f07416d
--- /dev/null
+++ b/talk/app/webrtc/peerconnectionproxy.h
@@ -0,0 +1,72 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_APP_WEBRTC_PEERCONNECTIONPROXY_H_
+#define TALK_APP_WEBRTC_PEERCONNECTIONPROXY_H_
+
+#include "talk/app/webrtc/peerconnectioninterface.h"
+#include "talk/app/webrtc/proxy.h"
+
+namespace webrtc {
+
+// Define proxy for PeerConnectionInterface.
+BEGIN_PROXY_MAP(PeerConnection)
+ PROXY_METHOD0(talk_base::scoped_refptr<StreamCollectionInterface>,
+ local_streams)
+ PROXY_METHOD0(talk_base::scoped_refptr<StreamCollectionInterface>,
+ remote_streams)
+ PROXY_METHOD2(bool, AddStream, MediaStreamInterface*,
+ const MediaConstraintsInterface*)
+ PROXY_METHOD1(void, RemoveStream, MediaStreamInterface*)
+ PROXY_METHOD1(talk_base::scoped_refptr<DtmfSenderInterface>,
+ CreateDtmfSender, AudioTrackInterface*)
+ PROXY_METHOD2(bool, GetStats, StatsObserver*, MediaStreamTrackInterface*)
+ PROXY_METHOD2(talk_base::scoped_refptr<DataChannelInterface>,
+ CreateDataChannel, const std::string&, const DataChannelInit*)
+ PROXY_CONSTMETHOD0(const SessionDescriptionInterface*, local_description)
+ PROXY_CONSTMETHOD0(const SessionDescriptionInterface*, remote_description)
+ PROXY_METHOD2(void, CreateOffer, CreateSessionDescriptionObserver*,
+ const MediaConstraintsInterface*)
+ PROXY_METHOD2(void, CreateAnswer, CreateSessionDescriptionObserver*,
+ const MediaConstraintsInterface*)
+ PROXY_METHOD2(void, SetLocalDescription, SetSessionDescriptionObserver*,
+ SessionDescriptionInterface*)
+ PROXY_METHOD2(void, SetRemoteDescription, SetSessionDescriptionObserver*,
+ SessionDescriptionInterface*)
+ PROXY_METHOD2(bool, UpdateIce, const IceServers&,
+ const MediaConstraintsInterface*)
+ PROXY_METHOD1(bool, AddIceCandidate, const IceCandidateInterface*)
+ PROXY_METHOD0(SignalingState, signaling_state)
+ PROXY_METHOD0(IceState, ice_state)
+ PROXY_METHOD0(IceConnectionState, ice_connection_state)
+ PROXY_METHOD0(IceGatheringState, ice_gathering_state)
+ PROXY_METHOD0(void, Close)
+END_PROXY()
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_PEERCONNECTIONPROXY_H_
diff --git a/talk/app/webrtc/portallocatorfactory.cc b/talk/app/webrtc/portallocatorfactory.cc
new file mode 100644
index 0000000..59ac9fb
--- /dev/null
+++ b/talk/app/webrtc/portallocatorfactory.cc
@@ -0,0 +1,92 @@
+/*
+ * libjingle
+ * Copyright 2004--2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/portallocatorfactory.h"
+
+#include "talk/base/logging.h"
+#include "talk/base/network.h"
+#include "talk/base/thread.h"
+#include "talk/p2p/base/basicpacketsocketfactory.h"
+#include "talk/p2p/client/httpportallocator.h"
+
+static const char kUserAgent[] = "PeerConnection User Agent";
+
+namespace webrtc {
+
+using talk_base::scoped_ptr;
+
+talk_base::scoped_refptr<PortAllocatorFactoryInterface>
+PortAllocatorFactory::Create(
+ talk_base::Thread* worker_thread) {
+ talk_base::RefCountedObject<PortAllocatorFactory>* allocator =
+ new talk_base::RefCountedObject<PortAllocatorFactory>(worker_thread);
+ return allocator;
+}
+
+PortAllocatorFactory::PortAllocatorFactory(talk_base::Thread* worker_thread)
+ : network_manager_(new talk_base::BasicNetworkManager()),
+ socket_factory_(new talk_base::BasicPacketSocketFactory(worker_thread)) {
+}
+
+PortAllocatorFactory::~PortAllocatorFactory() {}
+
+cricket::PortAllocator* PortAllocatorFactory::CreatePortAllocator(
+ const std::vector<StunConfiguration>& stun,
+ const std::vector<TurnConfiguration>& turn) {
+ std::vector<talk_base::SocketAddress> stun_hosts;
+ typedef std::vector<StunConfiguration>::const_iterator StunIt;
+ for (StunIt stun_it = stun.begin(); stun_it != stun.end(); ++stun_it) {
+ stun_hosts.push_back(stun_it->server);
+ }
+
+ talk_base::SocketAddress stun_addr;
+ if (!stun_hosts.empty()) {
+ stun_addr = stun_hosts.front();
+ }
+ scoped_ptr<cricket::BasicPortAllocator> allocator(
+ new cricket::BasicPortAllocator(
+ network_manager_.get(), socket_factory_.get(), stun_addr));
+
+ if (turn.size() > 0) {
+ cricket::RelayCredentials credentials(turn[0].username, turn[0].password);
+ cricket::RelayServerConfig relay_server(cricket::RELAY_TURN);
+ cricket::ProtocolType protocol;
+ if (cricket::StringToProto(turn[0].transport_type.c_str(), &protocol)) {
+ relay_server.ports.push_back(cricket::ProtocolAddress(
+ turn[0].server, protocol));
+ relay_server.credentials = credentials;
+ allocator->AddRelay(relay_server);
+ } else {
+ LOG(LS_WARNING) << "Ignoring TURN server " << turn[0].server << ". "
+ << "Reason= Incorrect " << turn[0].transport_type
+ << " transport parameter.";
+ }
+ }
+ return allocator.release();
+}
+
+} // namespace webrtc
diff --git a/talk/app/webrtc/portallocatorfactory.h b/talk/app/webrtc/portallocatorfactory.h
new file mode 100644
index 0000000..e30024c
--- /dev/null
+++ b/talk/app/webrtc/portallocatorfactory.h
@@ -0,0 +1,70 @@
+/*
+ * libjingle
+ * Copyright 2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// This file defines the default implementation of
+// PortAllocatorFactoryInterface.
+// This implementation creates instances of cricket::HTTPPortAllocator and uses
+// the BasicNetworkManager and BasicPacketSocketFactory.
+
+#ifndef TALK_APP_WEBRTC_PORTALLOCATORFACTORY_H_
+#define TALK_APP_WEBRTC_PORTALLOCATORFACTORY_H_
+
+#include "talk/app/webrtc/peerconnectioninterface.h"
+#include "talk/base/scoped_ptr.h"
+
+namespace cricket {
+class PortAllocator;
+}
+
+namespace talk_base {
+class BasicNetworkManager;
+class BasicPacketSocketFactory;
+}
+
+namespace webrtc {
+
+class PortAllocatorFactory : public PortAllocatorFactoryInterface {
+ public:
+ static talk_base::scoped_refptr<PortAllocatorFactoryInterface> Create(
+ talk_base::Thread* worker_thread);
+
+ virtual cricket::PortAllocator* CreatePortAllocator(
+ const std::vector<StunConfiguration>& stun,
+ const std::vector<TurnConfiguration>& turn);
+
+ protected:
+ explicit PortAllocatorFactory(talk_base::Thread* worker_thread);
+ ~PortAllocatorFactory();
+
+ private:
+ talk_base::scoped_ptr<talk_base::BasicNetworkManager> network_manager_;
+ talk_base::scoped_ptr<talk_base::BasicPacketSocketFactory> socket_factory_;
+};
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_PORTALLOCATORFACTORY_H_
diff --git a/talk/app/webrtc/proxy.h b/talk/app/webrtc/proxy.h
new file mode 100644
index 0000000..4db4bef
--- /dev/null
+++ b/talk/app/webrtc/proxy.h
@@ -0,0 +1,287 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// This file contains Macros for creating proxies for webrtc MediaStream and
+// PeerConnection classes.
+
+//
+// Example usage:
+//
+// class TestInterface : public talk_base::RefCountInterface {
+// public:
+// std::string FooA() = 0;
+// std::string FooB(bool arg1) const = 0;
+// std::string FooC(bool arg1)= 0;
+// };
+//
+// Note that return types can not be a const reference.
+//
+// class Test : public TestInterface {
+// ... implementation of the interface.
+// };
+//
+// BEGIN_PROXY_MAP(Test)
+// PROXY_METHOD0(std::string, FooA)
+// PROXY_CONSTMETHOD1(std::string, FooB, arg1)
+// PROXY_METHOD1(std::string, FooC, arg1)
+// END_PROXY()
+//
+// The proxy can be created using TestProxy::Create(Thread*, TestInterface*).
+
+#ifndef TALK_APP_WEBRTC_PROXY_H_
+#define TALK_APP_WEBRTC_PROXY_H_
+
+#include "talk/base/thread.h"
+
+namespace webrtc {
+
+template <typename R>
+class ReturnType {
+ public:
+ template<typename C, typename M>
+ void Invoke(C* c, M m) { r_ = (c->*m)(); }
+ template<typename C, typename M, typename T1>
+ void Invoke(C* c, M m, T1 a1) { r_ = (c->*m)(a1); }
+ template<typename C, typename M, typename T1, typename T2>
+ void Invoke(C* c, M m, T1 a1, T2 a2) { r_ = (c->*m)(a1, a2); }
+ template<typename C, typename M, typename T1, typename T2, typename T3>
+ void Invoke(C* c, M m, T1 a1, T2 a2, T3 a3) { r_ = (c->*m)(a1, a2, a3); }
+
+ R value() { return r_; }
+
+ private:
+ R r_;
+};
+
+template <>
+class ReturnType<void> {
+ public:
+ template<typename C, typename M>
+ void Invoke(C* c, M m) { (c->*m)(); }
+ template<typename C, typename M, typename T1>
+ void Invoke(C* c, M m, T1 a1) { (c->*m)(a1); }
+ template<typename C, typename M, typename T1, typename T2>
+ void Invoke(C* c, M m, T1 a1, T2 a2) { (c->*m)(a1, a2); }
+ template<typename C, typename M, typename T1, typename T2, typename T3>
+ void Invoke(C* c, M m, T1 a1, T2 a2, T3 a3) { (c->*m)(a1, a2, a3); }
+
+ void value() {}
+};
+
+template <typename C, typename R>
+class MethodCall0 : public talk_base::Message,
+ public talk_base::MessageHandler {
+ public:
+ typedef R (C::*Method)();
+ MethodCall0(C* c, Method m) : c_(c), m_(m) {}
+
+ R Marshal(talk_base::Thread* t) {
+ t->Send(this, 0);
+ return r_.value();
+ }
+
+ private:
+ void OnMessage(talk_base::Message*) { r_.Invoke(c_, m_);}
+
+ C* c_;
+ Method m_;
+ ReturnType<R> r_;
+};
+
+template <typename C, typename R>
+class ConstMethodCall0 : public talk_base::Message,
+ public talk_base::MessageHandler {
+ public:
+ typedef R (C::*Method)() const;
+ ConstMethodCall0(C* c, Method m) : c_(c), m_(m) {}
+
+ R Marshal(talk_base::Thread* t) {
+ t->Send(this, 0);
+ return r_.value();
+ }
+
+ private:
+ void OnMessage(talk_base::Message*) { r_.Invoke(c_, m_); }
+
+ C* c_;
+ Method m_;
+ ReturnType<R> r_;
+};
+
+template <typename C, typename R, typename T1>
+class MethodCall1 : public talk_base::Message,
+ public talk_base::MessageHandler {
+ public:
+ typedef R (C::*Method)(T1 a1);
+ MethodCall1(C* c, Method m, T1 a1) : c_(c), m_(m), a1_(a1) {}
+
+ R Marshal(talk_base::Thread* t) {
+ t->Send(this, 0);
+ return r_.value();
+ }
+
+ private:
+ void OnMessage(talk_base::Message*) { r_.Invoke(c_, m_, a1_); }
+
+ C* c_;
+ Method m_;
+ ReturnType<R> r_;
+ T1 a1_;
+};
+
+template <typename C, typename R, typename T1>
+class ConstMethodCall1 : public talk_base::Message,
+ public talk_base::MessageHandler {
+ public:
+ typedef R (C::*Method)(T1 a1) const;
+ ConstMethodCall1(C* c, Method m, T1 a1) : c_(c), m_(m), a1_(a1) {}
+
+ R Marshal(talk_base::Thread* t) {
+ t->Send(this, 0);
+ return r_.value();
+ }
+
+ private:
+ void OnMessage(talk_base::Message*) { r_.Invoke(c_, m_, a1_); }
+
+ C* c_;
+ Method m_;
+ ReturnType<R> r_;
+ T1 a1_;
+};
+
+template <typename C, typename R, typename T1, typename T2>
+class MethodCall2 : public talk_base::Message,
+ public talk_base::MessageHandler {
+ public:
+ typedef R (C::*Method)(T1 a1, T2 a2);
+ MethodCall2(C* c, Method m, T1 a1, T2 a2) : c_(c), m_(m), a1_(a1), a2_(a2) {}
+
+ R Marshal(talk_base::Thread* t) {
+ t->Send(this, 0);
+ return r_.value();
+ }
+
+ private:
+ void OnMessage(talk_base::Message*) { r_.Invoke(c_, m_, a1_, a2_); }
+
+ C* c_;
+ Method m_;
+ ReturnType<R> r_;
+ T1 a1_;
+ T2 a2_;
+};
+
+template <typename C, typename R, typename T1, typename T2, typename T3>
+class MethodCall3 : public talk_base::Message,
+ public talk_base::MessageHandler {
+ public:
+ typedef R (C::*Method)(T1 a1, T2 a2, T3 a3);
+ MethodCall3(C* c, Method m, T1 a1, T2 a2, T3 a3)
+ : c_(c), m_(m), a1_(a1), a2_(a2), a3_(a3) {}
+
+ R Marshal(talk_base::Thread* t) {
+ t->Send(this, 0);
+ return r_.value();
+ }
+
+ private:
+ void OnMessage(talk_base::Message*) { r_.Invoke(c_, m_, a1_, a2_, a3_); }
+
+ C* c_;
+ Method m_;
+ ReturnType<R> r_;
+ T1 a1_;
+ T2 a2_;
+ T3 a3_;
+};
+
+#define BEGIN_PROXY_MAP(c) \
+ class c##Proxy : public c##Interface {\
+ protected:\
+ typedef c##Interface C;\
+ c##Proxy(talk_base::Thread* thread, C* c)\
+ : owner_thread_(thread), \
+ c_(c) {}\
+ ~c##Proxy() {\
+ MethodCall0<c##Proxy, void> call(this, &c##Proxy::Release_s);\
+ call.Marshal(owner_thread_);\
+ }\
+ public:\
+ static talk_base::scoped_refptr<C> Create(talk_base::Thread* thread, \
+ C* c) {\
+ return new talk_base::RefCountedObject<c##Proxy>(thread, c);\
+ }\
+
+#define PROXY_METHOD0(r, method)\
+ r method() OVERRIDE {\
+ MethodCall0<C, r> call(c_.get(), &C::method);\
+ return call.Marshal(owner_thread_);\
+ }\
+
+#define PROXY_CONSTMETHOD0(r, method)\
+ r method() const OVERRIDE {\
+ ConstMethodCall0<C, r> call(c_.get(), &C::method);\
+ return call.Marshal(owner_thread_);\
+ }\
+
+#define PROXY_METHOD1(r, method, t1)\
+ r method(t1 a1) OVERRIDE {\
+ MethodCall1<C, r, t1> call(c_.get(), &C::method, a1);\
+ return call.Marshal(owner_thread_);\
+ }\
+
+#define PROXY_CONSTMETHOD1(r, method, t1)\
+ r method(t1 a1) const OVERRIDE {\
+ ConstMethodCall1<C, r, t1> call(c_.get(), &C::method, a1);\
+ return call.Marshal(owner_thread_);\
+ }\
+
+#define PROXY_METHOD2(r, method, t1, t2)\
+ r method(t1 a1, t2 a2) OVERRIDE {\
+ MethodCall2<C, r, t1, t2> call(c_.get(), &C::method, a1, a2);\
+ return call.Marshal(owner_thread_);\
+ }\
+
+#define PROXY_METHOD3(r, method, t1, t2, t3)\
+ r method(t1 a1, t2 a2, t3 a3) OVERRIDE {\
+ MethodCall3<C, r, t1, t2, t3> call(c_.get(), &C::method, a1, a2, a3);\
+ return call.Marshal(owner_thread_);\
+ }\
+
+#define END_PROXY() \
+ private:\
+ void Release_s() {\
+ c_ = NULL;\
+ }\
+ mutable talk_base::Thread* owner_thread_;\
+ talk_base::scoped_refptr<C> c_;\
+ };\
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_PROXY_H_
diff --git a/talk/app/webrtc/proxy_unittest.cc b/talk/app/webrtc/proxy_unittest.cc
new file mode 100644
index 0000000..71a583c
--- /dev/null
+++ b/talk/app/webrtc/proxy_unittest.cc
@@ -0,0 +1,170 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/proxy.h"
+
+#include <string>
+
+#include "talk/base/refcount.h"
+#include "talk/base/scoped_ptr.h"
+#include "talk/base/thread.h"
+#include "talk/base/gunit.h"
+#include "testing/base/public/gmock.h"
+
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::Exactly;
+using ::testing::InvokeWithoutArgs;
+using ::testing::Return;
+
+namespace webrtc {
+
+// Interface used for testing here.
+class FakeInterface : public talk_base::RefCountInterface {
+ public:
+ virtual void VoidMethod0() = 0;
+ virtual std::string Method0() = 0;
+ virtual std::string ConstMethod0() const = 0;
+ virtual std::string Method1(std::string s) = 0;
+ virtual std::string ConstMethod1(std::string s) const = 0;
+ virtual std::string Method2(std::string s1, std::string s2) = 0;
+
+ protected:
+ ~FakeInterface() {}
+};
+
+// Proxy for the test interface.
+BEGIN_PROXY_MAP(Fake)
+ PROXY_METHOD0(void, VoidMethod0)
+ PROXY_METHOD0(std::string, Method0)
+ PROXY_CONSTMETHOD0(std::string, ConstMethod0)
+ PROXY_METHOD1(std::string, Method1, std::string)
+ PROXY_CONSTMETHOD1(std::string, ConstMethod1, std::string)
+ PROXY_METHOD2(std::string, Method2, std::string, std::string)
+END_PROXY()
+
+// Implementation of the test interface.
+class Fake : public FakeInterface {
+ public:
+ static talk_base::scoped_refptr<Fake> Create() {
+ return new talk_base::RefCountedObject<Fake>();
+ }
+
+ MOCK_METHOD0(VoidMethod0, void());
+ MOCK_METHOD0(Method0, std::string());
+ MOCK_CONST_METHOD0(ConstMethod0, std::string());
+
+ MOCK_METHOD1(Method1, std::string(std::string));
+ MOCK_CONST_METHOD1(ConstMethod1, std::string(std::string));
+
+ MOCK_METHOD2(Method2, std::string(std::string, std::string));
+
+ protected:
+ Fake() {}
+ ~Fake() {}
+};
+
+class ProxyTest: public testing::Test {
+ public:
+ // Checks that the functions is called on the |signaling_thread_|.
+ void CheckThread() {
+ EXPECT_EQ(talk_base::Thread::Current(), signaling_thread_.get());
+ }
+
+ protected:
+ virtual void SetUp() {
+ signaling_thread_.reset(new talk_base::Thread());
+ ASSERT_TRUE(signaling_thread_->Start());
+ fake_ = Fake::Create();
+ fake_proxy_ = FakeProxy::Create(signaling_thread_.get(), fake_.get());
+ }
+
+ protected:
+ talk_base::scoped_ptr<talk_base::Thread> signaling_thread_;
+ talk_base::scoped_refptr<FakeInterface> fake_proxy_;
+ talk_base::scoped_refptr<Fake> fake_;
+};
+
+TEST_F(ProxyTest, VoidMethod0) {
+ EXPECT_CALL(*fake_, VoidMethod0())
+ .Times(Exactly(1))
+ .WillOnce(InvokeWithoutArgs(this, &ProxyTest::CheckThread));
+ fake_proxy_->VoidMethod0();
+}
+
+TEST_F(ProxyTest, Method0) {
+ EXPECT_CALL(*fake_, Method0())
+ .Times(Exactly(1))
+ .WillOnce(
+ DoAll(InvokeWithoutArgs(this, &ProxyTest::CheckThread),
+ Return("Method0")));
+ EXPECT_EQ("Method0",
+ fake_proxy_->Method0());
+}
+
+TEST_F(ProxyTest, ConstMethod0) {
+ EXPECT_CALL(*fake_, ConstMethod0())
+ .Times(Exactly(1))
+ .WillOnce(
+ DoAll(InvokeWithoutArgs(this, &ProxyTest::CheckThread),
+ Return("ConstMethod0")));
+ EXPECT_EQ("ConstMethod0",
+ fake_proxy_->ConstMethod0());
+}
+
+TEST_F(ProxyTest, Method1) {
+ const std::string arg1 = "arg1";
+ EXPECT_CALL(*fake_, Method1(arg1))
+ .Times(Exactly(1))
+ .WillOnce(
+ DoAll(InvokeWithoutArgs(this, &ProxyTest::CheckThread),
+ Return("Method1")));
+ EXPECT_EQ("Method1", fake_proxy_->Method1(arg1));
+}
+
+TEST_F(ProxyTest, ConstMethod1) {
+ const std::string arg1 = "arg1";
+ EXPECT_CALL(*fake_, ConstMethod1(arg1))
+ .Times(Exactly(1))
+ .WillOnce(
+ DoAll(InvokeWithoutArgs(this, &ProxyTest::CheckThread),
+ Return("ConstMethod1")));
+ EXPECT_EQ("ConstMethod1", fake_proxy_->ConstMethod1(arg1));
+}
+
+TEST_F(ProxyTest, Method2) {
+ const std::string arg1 = "arg1";
+ const std::string arg2 = "arg2";
+ EXPECT_CALL(*fake_, Method2(arg1, arg2))
+ .Times(Exactly(1))
+ .WillOnce(
+ DoAll(InvokeWithoutArgs(this, &ProxyTest::CheckThread),
+ Return("Method2")));
+ EXPECT_EQ("Method2", fake_proxy_->Method2(arg1, arg2));
+}
+
+} // namespace webrtc
diff --git a/talk/app/webrtc/statscollector.cc b/talk/app/webrtc/statscollector.cc
new file mode 100644
index 0000000..b994f2f
--- /dev/null
+++ b/talk/app/webrtc/statscollector.cc
@@ -0,0 +1,571 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/statscollector.h"
+
+#include <utility>
+#include <vector>
+
+#include "talk/session/media/channel.h"
+
+namespace webrtc {
+
+// The items below are in alphabetical order.
+const char StatsReport::kStatsValueNameActiveConnection[] =
+ "googActiveConnection";
+const char StatsReport::kStatsValueNameActualEncBitrate[] =
+ "googActualEncBitrate";
+const char StatsReport::kStatsValueNameAudioOutputLevel[] = "audioOutputLevel";
+const char StatsReport::kStatsValueNameAudioInputLevel[] = "audioInputLevel";
+const char StatsReport::kStatsValueNameAvailableReceiveBandwidth[] =
+ "googAvailableReceiveBandwidth";
+const char StatsReport::kStatsValueNameAvailableSendBandwidth[] =
+ "googAvailableSendBandwidth";
+const char StatsReport::kStatsValueNameBucketDelay[] = "googBucketDelay";
+const char StatsReport::kStatsValueNameBytesReceived[] = "bytesReceived";
+const char StatsReport::kStatsValueNameBytesSent[] = "bytesSent";
+const char StatsReport::kStatsValueNameChannelId[] = "googChannelId";
+const char StatsReport::kStatsValueNameCodecName[] = "googCodecName";
+const char StatsReport::kStatsValueNameComponent[] = "googComponent";
+const char StatsReport::kStatsValueNameContentName[] = "googContentName";
+// Echo metrics from the audio processing module.
+const char StatsReport::kStatsValueNameEchoCancellationQualityMin[] =
+ "googEchoCancellationQualityMin";
+const char StatsReport::kStatsValueNameEchoDelayMedian[] =
+ "googEchoCancellationEchoDelayMedian";
+const char StatsReport::kStatsValueNameEchoDelayStdDev[] =
+ "googEchoCancellationEchoDelayStdDev";
+const char StatsReport::kStatsValueNameEchoReturnLoss[] =
+ "googEchoCancellationReturnLoss";
+const char StatsReport::kStatsValueNameEchoReturnLossEnhancement[] =
+ "googEchoCancellationReturnLossEnhancement";
+
+const char StatsReport::kStatsValueNameFirsReceived[] = "googFirsReceived";
+const char StatsReport::kStatsValueNameFirsSent[] = "googFirsSent";
+const char StatsReport::kStatsValueNameFrameHeightReceived[] =
+ "googFrameHeightReceived";
+const char StatsReport::kStatsValueNameFrameHeightSent[] =
+ "googFrameHeightSent";
+const char StatsReport::kStatsValueNameFrameRateReceived[] =
+ "googFrameRateReceived";
+const char StatsReport::kStatsValueNameFrameRateDecoded[] =
+ "googFrameRateDecoded";
+const char StatsReport::kStatsValueNameFrameRateOutput[] =
+ "googFrameRateOutput";
+const char StatsReport::kStatsValueNameFrameRateInput[] = "googFrameRateInput";
+const char StatsReport::kStatsValueNameFrameRateSent[] = "googFrameRateSent";
+const char StatsReport::kStatsValueNameFrameWidthReceived[] =
+ "googFrameWidthReceived";
+const char StatsReport::kStatsValueNameFrameWidthSent[] = "googFrameWidthSent";
+const char StatsReport::kStatsValueNameInitiator[] = "googInitiator";
+const char StatsReport::kStatsValueNameJitterReceived[] = "googJitterReceived";
+const char StatsReport::kStatsValueNameLocalAddress[] = "googLocalAddress";
+const char StatsReport::kStatsValueNameNacksReceived[] = "googNacksReceived";
+const char StatsReport::kStatsValueNameNacksSent[] = "googNacksSent";
+const char StatsReport::kStatsValueNamePacketsReceived[] = "packetsReceived";
+const char StatsReport::kStatsValueNamePacketsSent[] = "packetsSent";
+const char StatsReport::kStatsValueNamePacketsLost[] = "packetsLost";
+const char StatsReport::kStatsValueNameReadable[] = "googReadable";
+const char StatsReport::kStatsValueNameRemoteAddress[] = "googRemoteAddress";
+const char StatsReport::kStatsValueNameRetransmitBitrate[] =
+ "googRetransmitBitrate";
+const char StatsReport::kStatsValueNameRtt[] = "googRtt";
+const char StatsReport::kStatsValueNameTargetEncBitrate[] =
+ "googTargetEncBitrate";
+const char StatsReport::kStatsValueNameTransmitBitrate[] =
+ "googTransmitBitrate";
+const char StatsReport::kStatsValueNameTransportId[] = "transportId";
+const char StatsReport::kStatsValueNameTransportType[] = "googTransportType";
+const char StatsReport::kStatsValueNameTrackId[] = "googTrackId";
+const char StatsReport::kStatsValueNameSsrc[] = "ssrc";
+const char StatsReport::kStatsValueNameWritable[] = "googWritable";
+
+const char StatsReport::kStatsReportTypeSession[] = "googLibjingleSession";
+const char StatsReport::kStatsReportTypeBwe[] = "VideoBwe";
+const char StatsReport::kStatsReportTypeSsrc[] = "ssrc";
+const char StatsReport::kStatsReportTypeTrack[] = "googTrack";
+const char StatsReport::kStatsReportTypeIceCandidate[] = "iceCandidate";
+const char StatsReport::kStatsReportTypeTransport[] = "googTransport";
+const char StatsReport::kStatsReportTypeComponent[] = "googComponent";
+const char StatsReport::kStatsReportTypeCandidatePair[] = "googCandidatePair";
+
+const char StatsReport::kStatsReportVideoBweId[] = "bweforvideo";
+
+// Implementations of functions in statstypes.h
+void StatsReport::AddValue(const std::string& name, const std::string& value) {
+ Value temp;
+ temp.name = name;
+ temp.value = value;
+ values.push_back(temp);
+}
+
+void StatsReport::AddValue(const std::string& name, int64 value) {
+ AddValue(name, talk_base::ToString<int64>(value));
+}
+
+void StatsReport::AddBoolean(const std::string& name, bool value) {
+ AddValue(name, value ? "true" : "false");
+}
+
+namespace {
+typedef std::map<std::string, StatsReport> StatsMap;
+
+std::string StatsId(const std::string& type, const std::string& id) {
+ return type + "_" + id;
+}
+
+bool ExtractValueFromReport(
+ const StatsReport& report,
+ const std::string& name,
+ std::string* value) {
+ StatsReport::Values::const_iterator it = report.values.begin();
+ for (; it != report.values.end(); ++it) {
+ if (it->name == name) {
+ *value = it->value;
+ return true;
+ }
+ }
+ return false;
+}
+
+template <class TrackVector>
+void CreateTrackReports(const TrackVector& tracks, StatsMap* reports) {
+ for (size_t j = 0; j < tracks.size(); ++j) {
+ webrtc::MediaStreamTrackInterface* track = tracks[j];
+ // Adds an empty track report.
+ StatsReport report;
+ report.type = StatsReport::kStatsReportTypeTrack;
+ report.id = StatsId(StatsReport::kStatsReportTypeTrack, track->id());
+ report.AddValue(StatsReport::kStatsValueNameTrackId,
+ track->id());
+ (*reports)[report.id] = report;
+ }
+}
+
+void ExtractStats(const cricket::VoiceReceiverInfo& info, StatsReport* report) {
+ report->AddValue(StatsReport::kStatsValueNameAudioOutputLevel,
+ info.audio_level);
+ report->AddValue(StatsReport::kStatsValueNameBytesReceived,
+ info.bytes_rcvd);
+ report->AddValue(StatsReport::kStatsValueNameJitterReceived,
+ info.jitter_ms);
+ report->AddValue(StatsReport::kStatsValueNamePacketsReceived,
+ info.packets_rcvd);
+ report->AddValue(StatsReport::kStatsValueNamePacketsLost,
+ info.packets_lost);
+}
+
+void ExtractStats(const cricket::VoiceSenderInfo& info, StatsReport* report) {
+ report->AddValue(StatsReport::kStatsValueNameAudioInputLevel,
+ info.audio_level);
+ report->AddValue(StatsReport::kStatsValueNameBytesSent,
+ info.bytes_sent);
+ report->AddValue(StatsReport::kStatsValueNamePacketsSent,
+ info.packets_sent);
+ report->AddValue(StatsReport::kStatsValueNameJitterReceived,
+ info.jitter_ms);
+ report->AddValue(StatsReport::kStatsValueNameRtt, info.rtt_ms);
+ report->AddValue(StatsReport::kStatsValueNameEchoCancellationQualityMin,
+ talk_base::ToString<float>(info.aec_quality_min));
+ report->AddValue(StatsReport::kStatsValueNameEchoDelayMedian,
+ info.echo_delay_median_ms);
+ report->AddValue(StatsReport::kStatsValueNameEchoDelayStdDev,
+ info.echo_delay_std_ms);
+ report->AddValue(StatsReport::kStatsValueNameEchoReturnLoss,
+ info.echo_return_loss);
+ report->AddValue(StatsReport::kStatsValueNameEchoReturnLossEnhancement,
+ info.echo_return_loss_enhancement);
+ report->AddValue(StatsReport::kStatsValueNameCodecName, info.codec_name);
+}
+
+void ExtractStats(const cricket::VideoReceiverInfo& info, StatsReport* report) {
+ report->AddValue(StatsReport::kStatsValueNameBytesReceived,
+ info.bytes_rcvd);
+ report->AddValue(StatsReport::kStatsValueNamePacketsReceived,
+ info.packets_rcvd);
+ report->AddValue(StatsReport::kStatsValueNamePacketsLost,
+ info.packets_lost);
+
+ report->AddValue(StatsReport::kStatsValueNameFirsSent,
+ info.firs_sent);
+ report->AddValue(StatsReport::kStatsValueNameNacksSent,
+ info.nacks_sent);
+ report->AddValue(StatsReport::kStatsValueNameFrameWidthReceived,
+ info.frame_width);
+ report->AddValue(StatsReport::kStatsValueNameFrameHeightReceived,
+ info.frame_height);
+ report->AddValue(StatsReport::kStatsValueNameFrameRateReceived,
+ info.framerate_rcvd);
+ report->AddValue(StatsReport::kStatsValueNameFrameRateDecoded,
+ info.framerate_decoded);
+ report->AddValue(StatsReport::kStatsValueNameFrameRateOutput,
+ info.framerate_output);
+}
+
+void ExtractStats(const cricket::VideoSenderInfo& info, StatsReport* report) {
+ report->AddValue(StatsReport::kStatsValueNameBytesSent,
+ info.bytes_sent);
+ report->AddValue(StatsReport::kStatsValueNamePacketsSent,
+ info.packets_sent);
+
+ report->AddValue(StatsReport::kStatsValueNameFirsReceived,
+ info.firs_rcvd);
+ report->AddValue(StatsReport::kStatsValueNameNacksReceived,
+ info.nacks_rcvd);
+ report->AddValue(StatsReport::kStatsValueNameFrameWidthSent,
+ info.frame_width);
+ report->AddValue(StatsReport::kStatsValueNameFrameHeightSent,
+ info.frame_height);
+ report->AddValue(StatsReport::kStatsValueNameFrameRateInput,
+ info.framerate_input);
+ report->AddValue(StatsReport::kStatsValueNameFrameRateSent,
+ info.framerate_sent);
+ report->AddValue(StatsReport::kStatsValueNameRtt, info.rtt_ms);
+ report->AddValue(StatsReport::kStatsValueNameCodecName, info.codec_name);
+}
+
+void ExtractStats(const cricket::BandwidthEstimationInfo& info,
+ double stats_gathering_started,
+ StatsReport* report) {
+ report->id = StatsReport::kStatsReportVideoBweId;
+ report->type = StatsReport::kStatsReportTypeBwe;
+
+ // Clear out stats from previous GatherStats calls if any.
+ if (report->timestamp != stats_gathering_started) {
+ report->values.clear();
+ report->timestamp = stats_gathering_started;
+ }
+
+ report->AddValue(StatsReport::kStatsValueNameAvailableSendBandwidth,
+ info.available_send_bandwidth);
+ report->AddValue(StatsReport::kStatsValueNameAvailableReceiveBandwidth,
+ info.available_recv_bandwidth);
+ report->AddValue(StatsReport::kStatsValueNameTargetEncBitrate,
+ info.target_enc_bitrate);
+ report->AddValue(StatsReport::kStatsValueNameActualEncBitrate,
+ info.actual_enc_bitrate);
+ report->AddValue(StatsReport::kStatsValueNameRetransmitBitrate,
+ info.retransmit_bitrate);
+ report->AddValue(StatsReport::kStatsValueNameTransmitBitrate,
+ info.transmit_bitrate);
+ report->AddValue(StatsReport::kStatsValueNameBucketDelay,
+ info.bucket_delay);
+}
+
+uint32 ExtractSsrc(const cricket::VoiceReceiverInfo& info) {
+ return info.ssrc;
+}
+
+uint32 ExtractSsrc(const cricket::VoiceSenderInfo& info) {
+ return info.ssrc;
+}
+
+uint32 ExtractSsrc(const cricket::VideoReceiverInfo& info) {
+ return info.ssrcs[0];
+}
+
+uint32 ExtractSsrc(const cricket::VideoSenderInfo& info) {
+ return info.ssrcs[0];
+}
+
+// Template to extract stats from a data vector.
+// ExtractSsrc and ExtractStats must be defined and overloaded for each type.
+template<typename T>
+void ExtractStatsFromList(const std::vector<T>& data,
+ const std::string& transport_id,
+ StatsCollector* collector) {
+ typename std::vector<T>::const_iterator it = data.begin();
+ for (; it != data.end(); ++it) {
+ std::string id;
+ uint32 ssrc = ExtractSsrc(*it);
+ StatsReport* report = collector->PrepareReport(ssrc, transport_id);
+ if (!report) {
+ continue;
+ }
+ ExtractStats(*it, report);
+ }
+};
+
+} // namespace
+
+StatsCollector::StatsCollector()
+ : session_(NULL), stats_gathering_started_(0) {
+}
+
+// Adds a MediaStream with tracks that can be used as a |selector| in a call
+// to GetStats.
+void StatsCollector::AddStream(MediaStreamInterface* stream) {
+ ASSERT(stream != NULL);
+
+ CreateTrackReports<AudioTrackVector>(stream->GetAudioTracks(),
+ &reports_);
+ CreateTrackReports<VideoTrackVector>(stream->GetVideoTracks(),
+ &reports_);
+}
+
+bool StatsCollector::GetStats(MediaStreamTrackInterface* track,
+ StatsReports* reports) {
+ ASSERT(reports != NULL);
+ reports->clear();
+
+ StatsMap::iterator it;
+ if (!track) {
+ for (it = reports_.begin(); it != reports_.end(); ++it) {
+ reports->push_back(it->second);
+ }
+ return true;
+ }
+
+ it = reports_.find(StatsId(StatsReport::kStatsReportTypeSession,
+ session_->id()));
+ if (it != reports_.end()) {
+ reports->push_back(it->second);
+ }
+
+ it = reports_.find(StatsId(StatsReport::kStatsReportTypeTrack, track->id()));
+
+ if (it == reports_.end()) {
+ LOG(LS_WARNING) << "No StatsReport is available for "<< track->id();
+ return false;
+ }
+
+ reports->push_back(it->second);
+
+ std::string track_id;
+ for (it = reports_.begin(); it != reports_.end(); ++it) {
+ if (it->second.type != StatsReport::kStatsReportTypeSsrc) {
+ continue;
+ }
+ if (ExtractValueFromReport(it->second,
+ StatsReport::kStatsValueNameTrackId,
+ &track_id)) {
+ if (track_id == track->id()) {
+ reports->push_back(it->second);
+ }
+ }
+ }
+
+ return true;
+}
+
+void StatsCollector::UpdateStats() {
+ double time_now = GetTimeNow();
+ // Calls to UpdateStats() that occur less than kMinGatherStatsPeriod number of
+ // ms apart will be ignored.
+ const double kMinGatherStatsPeriod = 50;
+ if (stats_gathering_started_ + kMinGatherStatsPeriod > time_now) {
+ return;
+ }
+ stats_gathering_started_ = time_now;
+
+ if (session_) {
+ ExtractSessionInfo();
+ ExtractVoiceInfo();
+ ExtractVideoInfo();
+ }
+}
+
+StatsReport* StatsCollector::PrepareReport(uint32 ssrc,
+ const std::string& transport_id) {
+ std::string ssrc_id = talk_base::ToString<uint32>(ssrc);
+ StatsMap::iterator it = reports_.find(StatsId(
+ StatsReport::kStatsReportTypeSsrc, ssrc_id));
+
+ std::string track_id;
+ if (it == reports_.end()) {
+ if (!session()->GetTrackIdBySsrc(ssrc, &track_id)) {
+ LOG(LS_ERROR) << "The SSRC " << ssrc
+ << " is not associated with a track";
+ return NULL;
+ }
+ } else {
+ // Keeps the old track id since we want to report the stats for inactive
+ // tracks.
+ ExtractValueFromReport(it->second,
+ StatsReport::kStatsValueNameTrackId,
+ &track_id);
+ }
+
+ StatsReport* report = &reports_[
+ StatsId(StatsReport::kStatsReportTypeSsrc, ssrc_id)];
+ report->id = StatsId(StatsReport::kStatsReportTypeSsrc, ssrc_id);
+ report->type = StatsReport::kStatsReportTypeSsrc;
+
+ // Clear out stats from previous GatherStats calls if any.
+ if (report->timestamp != stats_gathering_started_) {
+ report->values.clear();
+ report->timestamp = stats_gathering_started_;
+ }
+
+ report->AddValue(StatsReport::kStatsValueNameSsrc, ssrc_id);
+ report->AddValue(StatsReport::kStatsValueNameTrackId, track_id);
+ // Add the mapping of SSRC to transport.
+ report->AddValue(StatsReport::kStatsValueNameTransportId,
+ transport_id);
+ return report;
+}
+
+void StatsCollector::ExtractSessionInfo() {
+ // Extract information from the base session.
+ StatsReport report;
+ report.id = StatsId(StatsReport::kStatsReportTypeSession, session_->id());
+ report.type = StatsReport::kStatsReportTypeSession;
+ report.timestamp = stats_gathering_started_;
+ report.values.clear();
+ report.AddBoolean(StatsReport::kStatsValueNameInitiator,
+ session_->initiator());
+
+ reports_[report.id] = report;
+
+ cricket::SessionStats stats;
+ if (session_->GetStats(&stats)) {
+ // Store the proxy map away for use in SSRC reporting.
+ proxy_to_transport_ = stats.proxy_to_transport;
+
+ for (cricket::TransportStatsMap::iterator transport_iter
+ = stats.transport_stats.begin();
+ transport_iter != stats.transport_stats.end(); ++transport_iter) {
+ for (cricket::TransportChannelStatsList::iterator channel_iter
+ = transport_iter->second.channel_stats.begin();
+ channel_iter != transport_iter->second.channel_stats.end();
+ ++channel_iter) {
+ StatsReport channel_report;
+ std::ostringstream ostc;
+ ostc << "Channel-" << transport_iter->second.content_name
+ << "-" << channel_iter->component;
+ channel_report.id = ostc.str();
+ channel_report.type = StatsReport::kStatsReportTypeComponent;
+ channel_report.timestamp = stats_gathering_started_;
+ channel_report.AddValue(StatsReport::kStatsValueNameComponent,
+ channel_iter->component);
+ reports_[channel_report.id] = channel_report;
+ for (size_t i = 0;
+ i < channel_iter->connection_infos.size();
+ ++i) {
+ StatsReport report;
+ const cricket::ConnectionInfo& info
+ = channel_iter->connection_infos[i];
+ std::ostringstream ost;
+ ost << "Conn-" << transport_iter->first << "-"
+ << channel_iter->component << "-" << i;
+ report.id = ost.str();
+ report.type = StatsReport::kStatsReportTypeCandidatePair;
+ report.timestamp = stats_gathering_started_;
+ // Link from connection to its containing channel.
+ report.AddValue(StatsReport::kStatsValueNameChannelId,
+ channel_report.id);
+ report.AddValue(StatsReport::kStatsValueNameBytesSent,
+ info.sent_total_bytes);
+ report.AddValue(StatsReport::kStatsValueNameBytesReceived,
+ info.recv_total_bytes);
+ report.AddBoolean(StatsReport::kStatsValueNameWritable,
+ info.writable);
+ report.AddBoolean(StatsReport::kStatsValueNameReadable,
+ info.readable);
+ report.AddBoolean(StatsReport::kStatsValueNameActiveConnection,
+ info.best_connection);
+ report.AddValue(StatsReport::kStatsValueNameLocalAddress,
+ info.local_candidate.address().ToString());
+ report.AddValue(StatsReport::kStatsValueNameRemoteAddress,
+ info.remote_candidate.address().ToString());
+ reports_[report.id] = report;
+ }
+ }
+ }
+ }
+}
+
+void StatsCollector::ExtractVoiceInfo() {
+ if (!session_->voice_channel()) {
+ return;
+ }
+ cricket::VoiceMediaInfo voice_info;
+ if (!session_->voice_channel()->GetStats(&voice_info)) {
+ LOG(LS_ERROR) << "Failed to get voice channel stats.";
+ return;
+ }
+ std::string transport_id;
+ if (!GetTransportIdFromProxy(session_->voice_channel()->content_name(),
+ &transport_id)) {
+ LOG(LS_ERROR) << "Failed to get transport name for proxy "
+ << session_->voice_channel()->content_name();
+ return;
+ }
+ ExtractStatsFromList(voice_info.receivers, transport_id, this);
+ ExtractStatsFromList(voice_info.senders, transport_id, this);
+}
+
+void StatsCollector::ExtractVideoInfo() {
+ if (!session_->video_channel()) {
+ return;
+ }
+ cricket::VideoMediaInfo video_info;
+ if (!session_->video_channel()->GetStats(&video_info)) {
+ LOG(LS_ERROR) << "Failed to get video channel stats.";
+ return;
+ }
+ std::string transport_id;
+ if (!GetTransportIdFromProxy(session_->video_channel()->content_name(),
+ &transport_id)) {
+ LOG(LS_ERROR) << "Failed to get transport name for proxy "
+ << session_->video_channel()->content_name();
+ return;
+ }
+ ExtractStatsFromList(video_info.receivers, transport_id, this);
+ ExtractStatsFromList(video_info.senders, transport_id, this);
+ if (video_info.bw_estimations.size() != 1) {
+ LOG(LS_ERROR) << "BWEs count: " << video_info.bw_estimations.size();
+ } else {
+ StatsReport* report = &reports_[StatsReport::kStatsReportVideoBweId];
+ ExtractStats(
+ video_info.bw_estimations[0], stats_gathering_started_, report);
+ }
+}
+
+double StatsCollector::GetTimeNow() {
+ return timing_.WallTimeNow() * talk_base::kNumMillisecsPerSec;
+}
+
+bool StatsCollector::GetTransportIdFromProxy(const std::string& proxy,
+ std::string* transport) {
+ // TODO(hta): Remove handling of empty proxy name once tests do not use it.
+ if (proxy.empty()) {
+ transport->clear();
+ return true;
+ }
+ if (proxy_to_transport_.find(proxy) == proxy_to_transport_.end()) {
+ LOG(LS_ERROR) << "No transport ID mapping for " << proxy;
+ return false;
+ }
+ std::ostringstream ost;
+ // Component 1 is always used for RTP.
+ ost << "Channel-" << proxy_to_transport_[proxy] << "-1";
+ *transport = ost.str();
+ return true;
+}
+
+} // namespace webrtc
diff --git a/talk/app/webrtc/statscollector.h b/talk/app/webrtc/statscollector.h
new file mode 100644
index 0000000..03a32c4
--- /dev/null
+++ b/talk/app/webrtc/statscollector.h
@@ -0,0 +1,95 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// This file contains a class used for gathering statistics from an ongoing
+// libjingle PeerConnection.
+
+#ifndef TALK_APP_WEBRTC_STATSCOLLECTOR_H_
+#define TALK_APP_WEBRTC_STATSCOLLECTOR_H_
+
+#include <string>
+#include <map>
+
+#include "talk/app/webrtc/mediastreaminterface.h"
+#include "talk/app/webrtc/statstypes.h"
+#include "talk/app/webrtc/webrtcsession.h"
+
+#include "talk/base/timing.h"
+
+namespace webrtc {
+
+class StatsCollector {
+ public:
+ StatsCollector();
+
+ // Register the session Stats should operate on.
+ // Set to NULL if the session has ended.
+ void set_session(WebRtcSession* session) {
+ session_ = session;
+ }
+
+ // Adds a MediaStream with tracks that can be used as a |selector| in a call
+ // to GetStats.
+ void AddStream(MediaStreamInterface* stream);
+
+ // Gather statistics from the session and store them for future use.
+ void UpdateStats();
+
+ // Gets a StatsReports of the last collected stats. Note that UpdateStats must
+ // be called before this function to get the most recent stats. |selector| is
+ // a track label or empty string. The most recent reports are stored in
+ // |reports|.
+ bool GetStats(MediaStreamTrackInterface* track, StatsReports* reports);
+
+ WebRtcSession* session() { return session_; }
+ // Prepare an SSRC report for the given ssrc. Used internally.
+ StatsReport* PrepareReport(uint32 ssrc, const std::string& transport);
+ // Extracts the ID of a Transport belonging to an SSRC. Used internally.
+ bool GetTransportIdFromProxy(const std::string& proxy,
+ std::string* transport_id);
+
+ private:
+ bool CopySelectedReports(const std::string& selector, StatsReports* reports);
+
+ void ExtractSessionInfo();
+ void ExtractVoiceInfo();
+ void ExtractVideoInfo();
+ double GetTimeNow();
+ void BuildSsrcToTransportId();
+
+ // A map from the report id to the report.
+ std::map<std::string, webrtc::StatsReport> reports_;
+ // Raw pointer to the session the statistics are gathered from.
+ WebRtcSession* session_;
+ double stats_gathering_started_;
+ talk_base::Timing timing_;
+ cricket::ProxyTransportMap proxy_to_transport_;
+};
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_STATSCOLLECTOR_H_
diff --git a/talk/app/webrtc/statscollector_unittest.cc b/talk/app/webrtc/statscollector_unittest.cc
new file mode 100644
index 0000000..cce1645bc
--- /dev/null
+++ b/talk/app/webrtc/statscollector_unittest.cc
@@ -0,0 +1,442 @@
+/*
+ * libjingle
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+
+#include "talk/app/webrtc/statscollector.h"
+
+#include "talk/app/webrtc/mediastream.h"
+#include "talk/app/webrtc/videotrack.h"
+#include "talk/base/gunit.h"
+#include "talk/media/base/fakemediaengine.h"
+#include "talk/media/devices/fakedevicemanager.h"
+#include "talk/p2p/base/fakesession.h"
+#include "talk/session/media/channelmanager.h"
+#include "testing/base/public/gmock.h"
+
+using testing::_;
+using testing::DoAll;
+using testing::Return;
+using testing::ReturnNull;
+using testing::SetArgPointee;
+
+namespace cricket {
+
+class ChannelManager;
+class FakeDeviceManager;
+
+} // namespace cricket
+
+namespace {
+
+// Error return values
+const char kNotFound[] = "NOT FOUND";
+const char kNoReports[] = "NO REPORTS";
+
+class MockWebRtcSession : public webrtc::WebRtcSession {
+ public:
+ explicit MockWebRtcSession(cricket::ChannelManager* channel_manager)
+ : WebRtcSession(channel_manager, talk_base::Thread::Current(),
+ NULL, NULL, NULL) {
+ }
+ MOCK_METHOD0(video_channel, cricket::VideoChannel*());
+ MOCK_METHOD2(GetTrackIdBySsrc, bool(uint32, std::string*));
+ MOCK_METHOD1(GetStats, bool(cricket::SessionStats*));
+};
+
+class MockVideoMediaChannel : public cricket::FakeVideoMediaChannel {
+ public:
+ MockVideoMediaChannel()
+ : cricket::FakeVideoMediaChannel(NULL) {
+ }
+ // MOCK_METHOD0(transport_channel, cricket::TransportChannel*());
+ MOCK_METHOD1(GetStats, bool(cricket::VideoMediaInfo*));
+};
+
+std::string ExtractStatsValue(const std::string& type,
+ webrtc::StatsReports reports,
+ const std::string name) {
+ if (reports.empty()) {
+ return kNoReports;
+ }
+ for (size_t i = 0; i < reports.size(); ++i) {
+ if (reports[i].type != type)
+ continue;
+ webrtc::StatsReport::Values::const_iterator it =
+ reports[i].values.begin();
+ for (; it != reports[i].values.end(); ++it) {
+ if (it->name == name) {
+ return it->value;
+ }
+ }
+ }
+
+ return kNotFound;
+}
+
+// Finds the |n|-th report of type |type| in |reports|.
+// |n| starts from 1 for finding the first report.
+const webrtc::StatsReport* FindNthReportByType(webrtc::StatsReports reports,
+ const std::string& type,
+ int n) {
+ for (size_t i = 0; i < reports.size(); ++i) {
+ if (reports[i].type == type) {
+ n--;
+ if (n == 0)
+ return &reports[i];
+ }
+ }
+ return NULL;
+}
+
+const webrtc::StatsReport* FindReportById(webrtc::StatsReports reports,
+ const std::string& id) {
+ for (size_t i = 0; i < reports.size(); ++i) {
+ if (reports[i].id == id) {
+ return &reports[i];
+ }
+ }
+ return NULL;
+}
+
+std::string ExtractSsrcStatsValue(webrtc::StatsReports reports,
+ const std::string& name) {
+ return ExtractStatsValue(
+ webrtc::StatsReport::kStatsReportTypeSsrc, reports, name);
+}
+
+std::string ExtractBweStatsValue(webrtc::StatsReports reports,
+ const std::string& name) {
+ return ExtractStatsValue(
+ webrtc::StatsReport::kStatsReportTypeBwe, reports, name);
+}
+
+class StatsCollectorTest : public testing::Test {
+ protected:
+ StatsCollectorTest()
+ : media_engine_(new cricket::FakeMediaEngine),
+ channel_manager_(
+ new cricket::ChannelManager(media_engine_,
+ new cricket::FakeDeviceManager(),
+ talk_base::Thread::Current())),
+ session_(channel_manager_.get()) {
+ // By default, we ignore session GetStats calls.
+ EXPECT_CALL(session_, GetStats(_)).WillRepeatedly(Return(false));
+ }
+
+ cricket::FakeMediaEngine* media_engine_;
+ talk_base::scoped_ptr<cricket::ChannelManager> channel_manager_;
+ MockWebRtcSession session_;
+};
+
+// This test verifies that 64-bit counters are passed successfully.
+TEST_F(StatsCollectorTest, BytesCounterHandles64Bits) {
+ webrtc::StatsCollector stats; // Implementation under test.
+ MockVideoMediaChannel* media_channel = new MockVideoMediaChannel;
+ cricket::VideoChannel video_channel(talk_base::Thread::Current(),
+ media_engine_, media_channel, &session_, "", false, NULL);
+ webrtc::StatsReports reports; // returned values.
+ cricket::VideoSenderInfo video_sender_info;
+ cricket::VideoMediaInfo stats_read;
+ const uint32 kSsrcOfTrack = 1234;
+ const std::string kNameOfTrack("somename");
+ // The number of bytes must be larger than 0xFFFFFFFF for this test.
+ const int64 kBytesSent = 12345678901234LL;
+ const std::string kBytesSentString("12345678901234");
+
+ stats.set_session(&session_);
+ talk_base::scoped_refptr<webrtc::MediaStream> stream(
+ webrtc::MediaStream::Create("streamlabel"));
+ stream->AddTrack(webrtc::VideoTrack::Create(kNameOfTrack, NULL));
+ stats.AddStream(stream);
+
+ // Construct a stats value to read.
+ video_sender_info.ssrcs.push_back(1234);
+ video_sender_info.bytes_sent = kBytesSent;
+ stats_read.senders.push_back(video_sender_info);
+
+ EXPECT_CALL(session_, video_channel())
+ .WillRepeatedly(Return(&video_channel));
+ EXPECT_CALL(*media_channel, GetStats(_))
+ .WillOnce(DoAll(SetArgPointee<0>(stats_read),
+ Return(true)));
+ EXPECT_CALL(session_, GetTrackIdBySsrc(kSsrcOfTrack, _))
+ .WillOnce(DoAll(SetArgPointee<1>(kNameOfTrack),
+ Return(true)));
+ stats.UpdateStats();
+ stats.GetStats(NULL, &reports);
+ std::string result = ExtractSsrcStatsValue(reports, "bytesSent");
+ EXPECT_EQ(kBytesSentString, result);
+}
+
+// Test that BWE information is reported via stats.
+TEST_F(StatsCollectorTest, BandwidthEstimationInfoIsReported) {
+ webrtc::StatsCollector stats; // Implementation under test.
+ MockVideoMediaChannel* media_channel = new MockVideoMediaChannel;
+ cricket::VideoChannel video_channel(talk_base::Thread::Current(),
+ media_engine_, media_channel, &session_, "", false, NULL);
+ webrtc::StatsReports reports; // returned values.
+ cricket::VideoSenderInfo video_sender_info;
+ cricket::VideoMediaInfo stats_read;
+ // Set up an SSRC just to test that we get both kinds of stats back: SSRC and
+ // BWE.
+ const uint32 kSsrcOfTrack = 1234;
+ const std::string kNameOfTrack("somename");
+ const int64 kBytesSent = 12345678901234LL;
+ const std::string kBytesSentString("12345678901234");
+
+ stats.set_session(&session_);
+ talk_base::scoped_refptr<webrtc::MediaStream> stream(
+ webrtc::MediaStream::Create("streamlabel"));
+ stream->AddTrack(webrtc::VideoTrack::Create(kNameOfTrack, NULL));
+ stats.AddStream(stream);
+
+ // Construct a stats value to read.
+ video_sender_info.ssrcs.push_back(1234);
+ video_sender_info.bytes_sent = kBytesSent;
+ stats_read.senders.push_back(video_sender_info);
+ cricket::BandwidthEstimationInfo bwe;
+ const int kTargetEncBitrate = 123456;
+ const std::string kTargetEncBitrateString("123456");
+ bwe.target_enc_bitrate = kTargetEncBitrate;
+ stats_read.bw_estimations.push_back(bwe);
+
+ EXPECT_CALL(session_, video_channel())
+ .WillRepeatedly(Return(&video_channel));
+ EXPECT_CALL(*media_channel, GetStats(_))
+ .WillOnce(DoAll(SetArgPointee<0>(stats_read),
+ Return(true)));
+ EXPECT_CALL(session_, GetTrackIdBySsrc(kSsrcOfTrack, _))
+ .WillOnce(DoAll(SetArgPointee<1>(kNameOfTrack),
+ Return(true)));
+ stats.UpdateStats();
+ stats.GetStats(NULL, &reports);
+ std::string result = ExtractSsrcStatsValue(reports, "bytesSent");
+ EXPECT_EQ(kBytesSentString, result);
+ result = ExtractBweStatsValue(reports, "googTargetEncBitrate");
+ EXPECT_EQ(kTargetEncBitrateString, result);
+}
+
+// This test verifies that an object of type "googSession" always
+// exists in the returned stats.
+TEST_F(StatsCollectorTest, SessionObjectExists) {
+ webrtc::StatsCollector stats; // Implementation under test.
+ webrtc::StatsReports reports; // returned values.
+ stats.set_session(&session_);
+ EXPECT_CALL(session_, video_channel())
+ .WillRepeatedly(ReturnNull());
+ stats.UpdateStats();
+ stats.GetStats(NULL, &reports);
+ const webrtc::StatsReport* session_report = FindNthReportByType(
+ reports, webrtc::StatsReport::kStatsReportTypeSession, 1);
+ EXPECT_FALSE(session_report == NULL);
+}
+
+// This test verifies that only one object of type "googSession" exists
+// in the returned stats.
+TEST_F(StatsCollectorTest, OnlyOneSessionObjectExists) {
+ webrtc::StatsCollector stats; // Implementation under test.
+ webrtc::StatsReports reports; // returned values.
+ stats.set_session(&session_);
+ EXPECT_CALL(session_, video_channel())
+ .WillRepeatedly(ReturnNull());
+ stats.UpdateStats();
+ stats.UpdateStats();
+ stats.GetStats(NULL, &reports);
+ const webrtc::StatsReport* session_report = FindNthReportByType(
+ reports, webrtc::StatsReport::kStatsReportTypeSession, 1);
+ EXPECT_FALSE(session_report == NULL);
+ session_report = FindNthReportByType(
+ reports, webrtc::StatsReport::kStatsReportTypeSession, 2);
+ EXPECT_EQ(NULL, session_report);
+}
+
+// This test verifies that the empty track report exists in the returned stats
+// without calling StatsCollector::UpdateStats.
+TEST_F(StatsCollectorTest, TrackObjectExistsWithoutUpdateStats) {
+ webrtc::StatsCollector stats; // Implementation under test.
+ MockVideoMediaChannel* media_channel = new MockVideoMediaChannel;
+ cricket::VideoChannel video_channel(talk_base::Thread::Current(),
+ media_engine_, media_channel, &session_, "", false, NULL);
+ const std::string kTrackId("somename");
+ talk_base::scoped_refptr<webrtc::MediaStream> stream(
+ webrtc::MediaStream::Create("streamlabel"));
+ talk_base::scoped_refptr<webrtc::VideoTrack> track =
+ webrtc::VideoTrack::Create(kTrackId, NULL);
+ stream->AddTrack(track);
+ stats.AddStream(stream);
+
+ stats.set_session(&session_);
+
+ webrtc::StatsReports reports;
+
+ // Verfies the existence of the track report.
+ stats.GetStats(NULL, &reports);
+ EXPECT_EQ((size_t)1, reports.size());
+ EXPECT_EQ(std::string(webrtc::StatsReport::kStatsReportTypeTrack),
+ reports[0].type);
+
+ std::string trackValue =
+ ExtractStatsValue(webrtc::StatsReport::kStatsReportTypeTrack,
+ reports,
+ webrtc::StatsReport::kStatsValueNameTrackId);
+ EXPECT_EQ(kTrackId, trackValue);
+}
+
+// This test verifies that the empty track report exists in the returned stats
+// when StatsCollector::UpdateStats is called with ssrc stats.
+TEST_F(StatsCollectorTest, TrackAndSsrcObjectExistAfterUpdateSsrcStats) {
+ webrtc::StatsCollector stats; // Implementation under test.
+ MockVideoMediaChannel* media_channel = new MockVideoMediaChannel;
+ cricket::VideoChannel video_channel(talk_base::Thread::Current(),
+ media_engine_, media_channel, &session_, "", false, NULL);
+ const std::string kTrackId("somename");
+ talk_base::scoped_refptr<webrtc::MediaStream> stream(
+ webrtc::MediaStream::Create("streamlabel"));
+ talk_base::scoped_refptr<webrtc::VideoTrack> track =
+ webrtc::VideoTrack::Create(kTrackId, NULL);
+ stream->AddTrack(track);
+ stats.AddStream(stream);
+
+ stats.set_session(&session_);
+
+ webrtc::StatsReports reports;
+
+ // Constructs an ssrc stats update.
+ cricket::VideoSenderInfo video_sender_info;
+ cricket::VideoMediaInfo stats_read;
+ const uint32 kSsrcOfTrack = 1234;
+ const int64 kBytesSent = 12345678901234LL;
+
+ // Construct a stats value to read.
+ video_sender_info.ssrcs.push_back(1234);
+ video_sender_info.bytes_sent = kBytesSent;
+ stats_read.senders.push_back(video_sender_info);
+
+ EXPECT_CALL(session_, video_channel())
+ .WillRepeatedly(Return(&video_channel));
+ EXPECT_CALL(*media_channel, GetStats(_))
+ .WillOnce(DoAll(SetArgPointee<0>(stats_read),
+ Return(true)));
+ EXPECT_CALL(session_, GetTrackIdBySsrc(kSsrcOfTrack, _))
+ .WillOnce(DoAll(SetArgPointee<1>(kTrackId),
+ Return(true)));
+
+ stats.UpdateStats();
+ stats.GetStats(NULL, &reports);
+ // |reports| should contain one session report, one track report, and one ssrc
+ // report.
+ EXPECT_EQ((size_t)3, reports.size());
+ const webrtc::StatsReport* track_report = FindNthReportByType(
+ reports, webrtc::StatsReport::kStatsReportTypeTrack, 1);
+ EXPECT_FALSE(track_report == NULL);
+
+ stats.GetStats(track, &reports);
+ // |reports| should contain one session report, one track report, and one ssrc
+ // report.
+ EXPECT_EQ((size_t)3, reports.size());
+ track_report = FindNthReportByType(
+ reports, webrtc::StatsReport::kStatsReportTypeTrack, 1);
+ EXPECT_FALSE(track_report == NULL);
+
+ std::string ssrc_id = ExtractSsrcStatsValue(
+ reports, webrtc::StatsReport::kStatsValueNameSsrc);
+ EXPECT_EQ(talk_base::ToString<uint32>(kSsrcOfTrack), ssrc_id);
+
+ std::string track_id = ExtractSsrcStatsValue(
+ reports, webrtc::StatsReport::kStatsValueNameTrackId);
+ EXPECT_EQ(kTrackId, track_id);
+}
+
+// This test verifies that an SSRC object has the identifier of a Transport
+// stats object, and that this transport stats object exists in stats.
+TEST_F(StatsCollectorTest, TransportObjectLinkedFromSsrcObject) {
+ webrtc::StatsCollector stats; // Implementation under test.
+ MockVideoMediaChannel* media_channel = new MockVideoMediaChannel;
+ // The content_name known by the video channel.
+ const std::string kVcName("vcname");
+ cricket::VideoChannel video_channel(talk_base::Thread::Current(),
+ media_engine_, media_channel, &session_, kVcName, false, NULL);
+ const std::string kTrackId("somename");
+ talk_base::scoped_refptr<webrtc::MediaStream> stream(
+ webrtc::MediaStream::Create("streamlabel"));
+ talk_base::scoped_refptr<webrtc::VideoTrack> track =
+ webrtc::VideoTrack::Create(kTrackId, NULL);
+ stream->AddTrack(track);
+ stats.AddStream(stream);
+
+ stats.set_session(&session_);
+
+ webrtc::StatsReports reports;
+
+ // Constructs an ssrc stats update.
+ cricket::VideoSenderInfo video_sender_info;
+ cricket::VideoMediaInfo stats_read;
+ const uint32 kSsrcOfTrack = 1234;
+ const int64 kBytesSent = 12345678901234LL;
+
+ // Construct a stats value to read.
+ video_sender_info.ssrcs.push_back(1234);
+ video_sender_info.bytes_sent = kBytesSent;
+ stats_read.senders.push_back(video_sender_info);
+
+ EXPECT_CALL(session_, video_channel())
+ .WillRepeatedly(Return(&video_channel));
+ EXPECT_CALL(*media_channel, GetStats(_))
+ .WillRepeatedly(DoAll(SetArgPointee<0>(stats_read),
+ Return(true)));
+ EXPECT_CALL(session_, GetTrackIdBySsrc(kSsrcOfTrack, _))
+ .WillOnce(DoAll(SetArgPointee<1>(kTrackId),
+ Return(true)));
+
+ // Instruct the session to return stats containing the transport channel.
+ const std::string kTransportName("trspname");
+ cricket::SessionStats session_stats;
+ cricket::TransportStats transport_stats;
+ cricket::TransportChannelStats channel_stats;
+ channel_stats.component = 1;
+ transport_stats.content_name = kTransportName;
+ transport_stats.channel_stats.push_back(channel_stats);
+
+ session_stats.transport_stats[kTransportName] = transport_stats;
+ session_stats.proxy_to_transport[kVcName] = kTransportName;
+ EXPECT_CALL(session_, GetStats(_))
+ .WillRepeatedly(DoAll(SetArgPointee<0>(session_stats),
+ Return(true)));
+
+ stats.UpdateStats();
+ stats.GetStats(NULL, &reports);
+ std::string transport_id = ExtractStatsValue(
+ webrtc::StatsReport::kStatsReportTypeSsrc,
+ reports,
+ webrtc::StatsReport::kStatsValueNameTransportId);
+ ASSERT_NE(kNotFound, transport_id);
+ const webrtc::StatsReport* transport_report = FindReportById(reports,
+ transport_id);
+ ASSERT_FALSE(transport_report == NULL);
+}
+
+} // namespace
diff --git a/talk/app/webrtc/statstypes.h b/talk/app/webrtc/statstypes.h
new file mode 100644
index 0000000..62f8781
--- /dev/null
+++ b/talk/app/webrtc/statstypes.h
@@ -0,0 +1,160 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// This file contains structures used for retrieving statistics from an ongoing
+// libjingle session.
+
+#ifndef TALK_APP_WEBRTC_STATSTYPES_H_
+#define TALK_APP_WEBRTC_STATSTYPES_H_
+
+#include <string>
+#include <vector>
+
+#include "talk/base/basictypes.h"
+#include "talk/base/stringencode.h"
+
+namespace webrtc {
+
+class StatsReport {
+ public:
+ StatsReport() : timestamp(0) { }
+
+ std::string id; // See below for contents.
+ std::string type; // See below for contents.
+
+ struct Value {
+ std::string name;
+ std::string value;
+ };
+
+ void AddValue(const std::string& name, const std::string& value);
+ void AddValue(const std::string& name, int64 value);
+ void AddBoolean(const std::string& name, bool value);
+
+ double timestamp; // Time since 1970-01-01T00:00:00Z in milliseconds.
+ typedef std::vector<Value> Values;
+ Values values;
+
+ // StatsReport types.
+ // A StatsReport of |type| = "googSession" contains overall information
+ // about the thing libjingle calls a session (which may contain one
+ // or more RTP sessions.
+ static const char kStatsReportTypeSession[];
+
+ // A StatsReport of |type| = "googTransport" contains information
+ // about a libjingle "transport".
+ static const char kStatsReportTypeTransport[];
+
+ // A StatsReport of |type| = "googComponent" contains information
+ // about a libjingle "channel" (typically, RTP or RTCP for a transport).
+ // This is intended to be the same thing as an ICE "Component".
+ static const char kStatsReportTypeComponent[];
+
+ // A StatsReport of |type| = "googCandidatePair" contains information
+ // about a libjingle "connection" - a single source/destination port pair.
+ // This is intended to be the same thing as an ICE "candidate pair".
+ static const char kStatsReportTypeCandidatePair[];
+
+ // StatsReport of |type| = "VideoBWE" is statistics for video Bandwidth
+ // Estimation, which is global per-session. The |id| field is "bweforvideo"
+ // (will probably change in the future).
+ static const char kStatsReportTypeBwe[];
+
+ // StatsReport of |type| = "ssrc" is statistics for a specific rtp stream.
+ // The |id| field is the SSRC in decimal form of the rtp stream.
+ static const char kStatsReportTypeSsrc[];
+
+ // StatsReport of |type| = "googTrack" is statistics for a specific media
+ // track. The |id| field is the track id.
+ static const char kStatsReportTypeTrack[];
+
+ // StatsReport of |type| = "iceCandidate" is statistics on a specific
+ // ICE Candidate. It links to its transport.
+ static const char kStatsReportTypeIceCandidate[];
+
+ // The id of StatsReport of type VideoBWE.
+ static const char kStatsReportVideoBweId[];
+
+ // StatsValue names
+ static const char kStatsValueNameAudioOutputLevel[];
+ static const char kStatsValueNameAudioInputLevel[];
+ static const char kStatsValueNameBytesSent[];
+ static const char kStatsValueNamePacketsSent[];
+ static const char kStatsValueNameBytesReceived[];
+ static const char kStatsValueNamePacketsReceived[];
+ static const char kStatsValueNamePacketsLost[];
+ static const char kStatsValueNameTransportId[];
+ static const char kStatsValueNameLocalAddress[];
+ static const char kStatsValueNameRemoteAddress[];
+ static const char kStatsValueNameWritable[];
+ static const char kStatsValueNameReadable[];
+ static const char kStatsValueNameActiveConnection[];
+
+
+ // Internal StatsValue names
+ static const char kStatsValueNameCodecName[];
+ static const char kStatsValueNameEchoCancellationQualityMin[];
+ static const char kStatsValueNameEchoDelayMedian[];
+ static const char kStatsValueNameEchoDelayStdDev[];
+ static const char kStatsValueNameEchoReturnLoss[];
+ static const char kStatsValueNameEchoReturnLossEnhancement[];
+ static const char kStatsValueNameFirsReceived[];
+ static const char kStatsValueNameFirsSent[];
+ static const char kStatsValueNameFrameHeightReceived[];
+ static const char kStatsValueNameFrameHeightSent[];
+ static const char kStatsValueNameFrameRateReceived[];
+ static const char kStatsValueNameFrameRateDecoded[];
+ static const char kStatsValueNameFrameRateOutput[];
+ static const char kStatsValueNameFrameRateInput[];
+ static const char kStatsValueNameFrameRateSent[];
+ static const char kStatsValueNameFrameWidthReceived[];
+ static const char kStatsValueNameFrameWidthSent[];
+ static const char kStatsValueNameJitterReceived[];
+ static const char kStatsValueNameNacksReceived[];
+ static const char kStatsValueNameNacksSent[];
+ static const char kStatsValueNameRtt[];
+ static const char kStatsValueNameAvailableSendBandwidth[];
+ static const char kStatsValueNameAvailableReceiveBandwidth[];
+ static const char kStatsValueNameTargetEncBitrate[];
+ static const char kStatsValueNameActualEncBitrate[];
+ static const char kStatsValueNameRetransmitBitrate[];
+ static const char kStatsValueNameTransmitBitrate[];
+ static const char kStatsValueNameBucketDelay[];
+ static const char kStatsValueNameInitiator[];
+ static const char kStatsValueNameTransportType[];
+ static const char kStatsValueNameContentName[];
+ static const char kStatsValueNameComponent[];
+ static const char kStatsValueNameChannelId[];
+ static const char kStatsValueNameTrackId[];
+ static const char kStatsValueNameSsrc[];
+};
+
+typedef std::vector<StatsReport> StatsReports;
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_STATSTYPES_H_
diff --git a/talk/app/webrtc/streamcollection.h b/talk/app/webrtc/streamcollection.h
new file mode 100644
index 0000000..7796b42
--- /dev/null
+++ b/talk/app/webrtc/streamcollection.h
@@ -0,0 +1,125 @@
+/*
+ * libjingle
+ * Copyright 2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_APP_WEBRTC_STREAMCOLLECTION_H_
+#define TALK_APP_WEBRTC_STREAMCOLLECTION_H_
+
+#include <string>
+#include <vector>
+
+#include "talk/app/webrtc/peerconnectioninterface.h"
+
+namespace webrtc {
+
+// Implementation of StreamCollection.
+class StreamCollection : public StreamCollectionInterface {
+ public:
+ static talk_base::scoped_refptr<StreamCollection> Create() {
+ talk_base::RefCountedObject<StreamCollection>* implementation =
+ new talk_base::RefCountedObject<StreamCollection>();
+ return implementation;
+ }
+
+ static talk_base::scoped_refptr<StreamCollection> Create(
+ StreamCollection* streams) {
+ talk_base::RefCountedObject<StreamCollection>* implementation =
+ new talk_base::RefCountedObject<StreamCollection>(streams);
+ return implementation;
+ }
+
+ virtual size_t count() {
+ return media_streams_.size();
+ }
+
+ virtual MediaStreamInterface* at(size_t index) {
+ return media_streams_.at(index);
+ }
+
+ virtual MediaStreamInterface* find(const std::string& label) {
+ for (StreamVector::iterator it = media_streams_.begin();
+ it != media_streams_.end(); ++it) {
+ if ((*it)->label().compare(label) == 0) {
+ return (*it);
+ }
+ }
+ return NULL;
+ }
+
+ virtual MediaStreamTrackInterface* FindAudioTrack(
+ const std::string& id) {
+ for (size_t i = 0; i < media_streams_.size(); ++i) {
+ MediaStreamTrackInterface* track = media_streams_[i]->FindAudioTrack(id);
+ if (track) {
+ return track;
+ }
+ }
+ return NULL;
+ }
+
+ virtual MediaStreamTrackInterface* FindVideoTrack(
+ const std::string& id) {
+ for (size_t i = 0; i < media_streams_.size(); ++i) {
+ MediaStreamTrackInterface* track = media_streams_[i]->FindVideoTrack(id);
+ if (track) {
+ return track;
+ }
+ }
+ return NULL;
+ }
+
+ void AddStream(MediaStreamInterface* stream) {
+ for (StreamVector::iterator it = media_streams_.begin();
+ it != media_streams_.end(); ++it) {
+ if ((*it)->label().compare(stream->label()) == 0)
+ return;
+ }
+ media_streams_.push_back(stream);
+ }
+
+ void RemoveStream(MediaStreamInterface* remove_stream) {
+ for (StreamVector::iterator it = media_streams_.begin();
+ it != media_streams_.end(); ++it) {
+ if ((*it)->label().compare(remove_stream->label()) == 0) {
+ media_streams_.erase(it);
+ break;
+ }
+ }
+ }
+
+ protected:
+ StreamCollection() {}
+ explicit StreamCollection(StreamCollection* original)
+ : media_streams_(original->media_streams_) {
+ }
+ typedef std::vector<talk_base::scoped_refptr<MediaStreamInterface> >
+ StreamVector;
+ StreamVector media_streams_;
+};
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_STREAMCOLLECTION_H_
diff --git a/talk/app/webrtc/test/fakeaudiocapturemodule.cc b/talk/app/webrtc/test/fakeaudiocapturemodule.cc
new file mode 100644
index 0000000..4bdaf89
--- /dev/null
+++ b/talk/app/webrtc/test/fakeaudiocapturemodule.cc
@@ -0,0 +1,716 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/test/fakeaudiocapturemodule.h"
+
+#include "talk/base/common.h"
+#include "talk/base/refcount.h"
+#include "talk/base/thread.h"
+#include "talk/base/timeutils.h"
+
+// Audio sample value that is high enough that it doesn't occur naturally when
+// frames are being faked. E.g. NetEq will not generate this large sample value
+// unless it has received an audio frame containing a sample of this value.
+// Even simpler buffers would likely just contain audio sample values of 0.
+static const int kHighSampleValue = 10000;
+
+// Same value as src/modules/audio_device/main/source/audio_device_config.h in
+// https://code.google.com/p/webrtc/
+static const uint32 kAdmMaxIdleTimeProcess = 1000;
+
+// Constants here are derived by running VoE using a real ADM.
+// The constants correspond to 10ms of mono audio at 44kHz.
+static const int kTimePerFrameMs = 10;
+static const int kNumberOfChannels = 1;
+static const int kSamplesPerSecond = 44000;
+static const int kTotalDelayMs = 0;
+static const int kClockDriftMs = 0;
+static const uint32_t kMaxVolume = 14392;
+
+enum {
+ MSG_RUN_PROCESS,
+ MSG_STOP_PROCESS,
+};
+
+FakeAudioCaptureModule::FakeAudioCaptureModule(
+ talk_base::Thread* process_thread)
+ : last_process_time_ms_(0),
+ audio_callback_(NULL),
+ recording_(false),
+ playing_(false),
+ play_is_initialized_(false),
+ rec_is_initialized_(false),
+ current_mic_level_(kMaxVolume),
+ started_(false),
+ next_frame_time_(0),
+ process_thread_(process_thread),
+ frames_received_(0) {
+}
+
+FakeAudioCaptureModule::~FakeAudioCaptureModule() {
+ // Ensure that thread stops calling ProcessFrame().
+ process_thread_->Send(this, MSG_STOP_PROCESS);
+}
+
+talk_base::scoped_refptr<FakeAudioCaptureModule> FakeAudioCaptureModule::Create(
+ talk_base::Thread* process_thread) {
+ if (process_thread == NULL) return NULL;
+
+ talk_base::scoped_refptr<FakeAudioCaptureModule> capture_module(
+ new talk_base::RefCountedObject<FakeAudioCaptureModule>(process_thread));
+ if (!capture_module->Initialize()) {
+ return NULL;
+ }
+ return capture_module;
+}
+
+int FakeAudioCaptureModule::frames_received() const {
+ return frames_received_;
+}
+
+int32_t FakeAudioCaptureModule::Version(char* /*version*/,
+ uint32_t& /*remaining_buffer_in_bytes*/,
+ uint32_t& /*position*/) const {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::TimeUntilNextProcess() {
+ const uint32 current_time = talk_base::Time();
+ if (current_time < last_process_time_ms_) {
+ // TODO: wraparound could be handled more gracefully.
+ return 0;
+ }
+ const uint32 elapsed_time = current_time - last_process_time_ms_;
+ if (kAdmMaxIdleTimeProcess < elapsed_time) {
+ return 0;
+ }
+ return kAdmMaxIdleTimeProcess - elapsed_time;
+}
+
+int32_t FakeAudioCaptureModule::Process() {
+ last_process_time_ms_ = talk_base::Time();
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::ChangeUniqueId(const int32_t /*id*/) {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::ActiveAudioLayer(
+ AudioLayer* /*audio_layer*/) const {
+ ASSERT(false);
+ return 0;
+}
+
+webrtc::AudioDeviceModule::ErrorCode FakeAudioCaptureModule::LastError() const {
+ ASSERT(false);
+ return webrtc::AudioDeviceModule::kAdmErrNone;
+}
+
+int32_t FakeAudioCaptureModule::RegisterEventObserver(
+ webrtc::AudioDeviceObserver* /*event_callback*/) {
+ // Only used to report warnings and errors. This fake implementation won't
+ // generate any so discard this callback.
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::RegisterAudioCallback(
+ webrtc::AudioTransport* audio_callback) {
+ audio_callback_ = audio_callback;
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::Init() {
+ // Initialize is called by the factory method. Safe to ignore this Init call.
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::Terminate() {
+ // Clean up in the destructor. No action here, just success.
+ return 0;
+}
+
+bool FakeAudioCaptureModule::Initialized() const {
+ ASSERT(false);
+ return 0;
+}
+
+int16_t FakeAudioCaptureModule::PlayoutDevices() {
+ ASSERT(false);
+ return 0;
+}
+
+int16_t FakeAudioCaptureModule::RecordingDevices() {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::PlayoutDeviceName(
+ uint16_t /*index*/,
+ char /*name*/[webrtc::kAdmMaxDeviceNameSize],
+ char /*guid*/[webrtc::kAdmMaxGuidSize]) {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::RecordingDeviceName(
+ uint16_t /*index*/,
+ char /*name*/[webrtc::kAdmMaxDeviceNameSize],
+ char /*guid*/[webrtc::kAdmMaxGuidSize]) {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetPlayoutDevice(uint16_t /*index*/) {
+ // No playout device, just playing from file. Return success.
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetPlayoutDevice(WindowsDeviceType /*device*/) {
+ if (play_is_initialized_) {
+ return -1;
+ }
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetRecordingDevice(uint16_t /*index*/) {
+ // No recording device, just dropping audio. Return success.
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetRecordingDevice(
+ WindowsDeviceType /*device*/) {
+ if (rec_is_initialized_) {
+ return -1;
+ }
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::PlayoutIsAvailable(bool* /*available*/) {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::InitPlayout() {
+ play_is_initialized_ = true;
+ return 0;
+}
+
+bool FakeAudioCaptureModule::PlayoutIsInitialized() const {
+ return play_is_initialized_;
+}
+
+int32_t FakeAudioCaptureModule::RecordingIsAvailable(bool* /*available*/) {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::InitRecording() {
+ rec_is_initialized_ = true;
+ return 0;
+}
+
+bool FakeAudioCaptureModule::RecordingIsInitialized() const {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::StartPlayout() {
+ if (!play_is_initialized_) {
+ return -1;
+ }
+ playing_ = true;
+ UpdateProcessing();
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::StopPlayout() {
+ playing_ = false;
+ UpdateProcessing();
+ return 0;
+}
+
+bool FakeAudioCaptureModule::Playing() const {
+ return playing_;
+}
+
+int32_t FakeAudioCaptureModule::StartRecording() {
+ if (!rec_is_initialized_) {
+ return -1;
+ }
+ recording_ = true;
+ UpdateProcessing();
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::StopRecording() {
+ recording_ = false;
+ UpdateProcessing();
+ return 0;
+}
+
+bool FakeAudioCaptureModule::Recording() const {
+ return recording_;
+}
+
+int32_t FakeAudioCaptureModule::SetAGC(bool /*enable*/) {
+ // No AGC but not needed since audio is pregenerated. Return success.
+ return 0;
+}
+
+bool FakeAudioCaptureModule::AGC() const {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetWaveOutVolume(uint16_t /*volume_left*/,
+ uint16_t /*volume_right*/) {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::WaveOutVolume(
+ uint16_t* /*volume_left*/,
+ uint16_t* /*volume_right*/) const {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SpeakerIsAvailable(bool* available) {
+ // No speaker, just dropping audio. Return success.
+ *available = true;
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::InitSpeaker() {
+ // No speaker, just playing from file. Return success.
+ return 0;
+}
+
+bool FakeAudioCaptureModule::SpeakerIsInitialized() const {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::MicrophoneIsAvailable(bool* available) {
+ // No microphone, just playing from file. Return success.
+ *available = true;
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::InitMicrophone() {
+ // No microphone, just playing from file. Return success.
+ return 0;
+}
+
+bool FakeAudioCaptureModule::MicrophoneIsInitialized() const {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SpeakerVolumeIsAvailable(bool* /*available*/) {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetSpeakerVolume(uint32_t /*volume*/) {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SpeakerVolume(uint32_t* /*volume*/) const {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::MaxSpeakerVolume(
+ uint32_t* /*max_volume*/) const {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::MinSpeakerVolume(
+ uint32_t* /*min_volume*/) const {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SpeakerVolumeStepSize(
+ uint16_t* /*step_size*/) const {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::MicrophoneVolumeIsAvailable(
+ bool* /*available*/) {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetMicrophoneVolume(uint32_t /*volume*/) {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::MicrophoneVolume(uint32_t* volume) const {
+ *volume = current_mic_level_;
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::MaxMicrophoneVolume(
+ uint32_t* max_volume) const {
+ *max_volume = kMaxVolume;
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::MinMicrophoneVolume(
+ uint32_t* /*min_volume*/) const {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::MicrophoneVolumeStepSize(
+ uint16_t* /*step_size*/) const {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SpeakerMuteIsAvailable(bool* /*available*/) {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetSpeakerMute(bool /*enable*/) {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SpeakerMute(bool* /*enabled*/) const {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::MicrophoneMuteIsAvailable(bool* /*available*/) {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetMicrophoneMute(bool /*enable*/) {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::MicrophoneMute(bool* /*enabled*/) const {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::MicrophoneBoostIsAvailable(
+ bool* /*available*/) {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetMicrophoneBoost(bool /*enable*/) {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::MicrophoneBoost(bool* /*enabled*/) const {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::StereoPlayoutIsAvailable(
+ bool* available) const {
+ // No recording device, just dropping audio. Stereo can be dropped just
+ // as easily as mono.
+ *available = true;
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetStereoPlayout(bool /*enable*/) {
+ // No recording device, just dropping audio. Stereo can be dropped just
+ // as easily as mono.
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::StereoPlayout(bool* /*enabled*/) const {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::StereoRecordingIsAvailable(
+ bool* available) const {
+ // Keep thing simple. No stereo recording.
+ *available = false;
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetStereoRecording(bool enable) {
+ if (!enable) {
+ return 0;
+ }
+ return -1;
+}
+
+int32_t FakeAudioCaptureModule::StereoRecording(bool* /*enabled*/) const {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetRecordingChannel(
+ const ChannelType channel) {
+ if (channel != AudioDeviceModule::kChannelBoth) {
+ // There is no right or left in mono. I.e. kChannelBoth should be used for
+ // mono.
+ ASSERT(false);
+ return -1;
+ }
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::RecordingChannel(ChannelType* channel) const {
+ // Stereo recording not supported. However, WebRTC ADM returns kChannelBoth
+ // in that case. Do the same here.
+ *channel = AudioDeviceModule::kChannelBoth;
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetPlayoutBuffer(const BufferType /*type*/,
+ uint16_t /*size_ms*/) {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::PlayoutBuffer(BufferType* /*type*/,
+ uint16_t* /*size_ms*/) const {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::PlayoutDelay(uint16_t* delay_ms) const {
+ // No delay since audio frames are dropped.
+ *delay_ms = 0;
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::RecordingDelay(uint16_t* /*delay_ms*/) const {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::CPULoad(uint16_t* /*load*/) const {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::StartRawOutputFileRecording(
+ const char /*pcm_file_name_utf8*/[webrtc::kAdmMaxFileNameSize]) {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::StopRawOutputFileRecording() {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::StartRawInputFileRecording(
+ const char /*pcm_file_name_utf8*/[webrtc::kAdmMaxFileNameSize]) {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::StopRawInputFileRecording() {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetRecordingSampleRate(
+ const uint32_t /*samples_per_sec*/) {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::RecordingSampleRate(
+ uint32_t* /*samples_per_sec*/) const {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetPlayoutSampleRate(
+ const uint32_t /*samples_per_sec*/) {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::PlayoutSampleRate(
+ uint32_t* /*samples_per_sec*/) const {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::ResetAudioDevice() {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::SetLoudspeakerStatus(bool /*enable*/) {
+ ASSERT(false);
+ return 0;
+}
+
+int32_t FakeAudioCaptureModule::GetLoudspeakerStatus(bool* /*enabled*/) const {
+ ASSERT(false);
+ return 0;
+}
+
+void FakeAudioCaptureModule::OnMessage(talk_base::Message* msg) {
+ switch (msg->message_id) {
+ case MSG_RUN_PROCESS:
+ ProcessFrameP();
+ break;
+ case MSG_STOP_PROCESS:
+ StopProcessP();
+ break;
+ default:
+ // All existing messages should be caught. Getting here should never
+ // happen.
+ ASSERT(false);
+ }
+}
+
+bool FakeAudioCaptureModule::Initialize() {
+ // Set the send buffer samples high enough that it would not occur on the
+ // remote side unless a packet containing a sample of that magnitude has been
+ // sent to it. Note that the audio processing pipeline will likely distort the
+ // original signal.
+ SetSendBuffer(kHighSampleValue);
+ last_process_time_ms_ = talk_base::Time();
+ return true;
+}
+
+void FakeAudioCaptureModule::SetSendBuffer(int value) {
+ Sample* buffer_ptr = reinterpret_cast<Sample*>(send_buffer_);
+ const int buffer_size_in_samples = sizeof(send_buffer_) /
+ kNumberBytesPerSample;
+ for (int i = 0; i < buffer_size_in_samples; ++i) {
+ buffer_ptr[i] = value;
+ }
+}
+
+void FakeAudioCaptureModule::ResetRecBuffer() {
+ memset(rec_buffer_, 0, sizeof(rec_buffer_));
+}
+
+bool FakeAudioCaptureModule::CheckRecBuffer(int value) {
+ const Sample* buffer_ptr = reinterpret_cast<const Sample*>(rec_buffer_);
+ const int buffer_size_in_samples = sizeof(rec_buffer_) /
+ kNumberBytesPerSample;
+ for (int i = 0; i < buffer_size_in_samples; ++i) {
+ if (buffer_ptr[i] >= value) return true;
+ }
+ return false;
+}
+
+void FakeAudioCaptureModule::UpdateProcessing() {
+ const bool process = recording_ || playing_;
+ if (process) {
+ if (started_) {
+ // Already started.
+ return;
+ }
+ process_thread_->Post(this, MSG_RUN_PROCESS);
+ } else {
+ process_thread_->Send(this, MSG_STOP_PROCESS);
+ }
+}
+
+void FakeAudioCaptureModule::ProcessFrameP() {
+ ASSERT(talk_base::Thread::Current() == process_thread_);
+ if (!started_) {
+ next_frame_time_ = talk_base::Time();
+ started_ = true;
+ }
+ // Receive and send frames every kTimePerFrameMs.
+ if (audio_callback_ != NULL) {
+ if (playing_) {
+ ReceiveFrameP();
+ }
+ if (recording_) {
+ SendFrameP();
+ }
+ }
+
+ next_frame_time_ += kTimePerFrameMs;
+ const uint32 current_time = talk_base::Time();
+ const uint32 wait_time = (next_frame_time_ > current_time) ?
+ next_frame_time_ - current_time : 0;
+ process_thread_->PostDelayed(wait_time, this, MSG_RUN_PROCESS);
+}
+
+void FakeAudioCaptureModule::ReceiveFrameP() {
+ ASSERT(talk_base::Thread::Current() == process_thread_);
+ ResetRecBuffer();
+ uint32_t nSamplesOut = 0;
+ if (audio_callback_->NeedMorePlayData(kNumberSamples, kNumberBytesPerSample,
+ kNumberOfChannels, kSamplesPerSecond,
+ rec_buffer_, nSamplesOut) != 0) {
+ ASSERT(false);
+ }
+ ASSERT(nSamplesOut == kNumberSamples);
+ // The SetBuffer() function ensures that after decoding, the audio buffer
+ // should contain samples of similar magnitude (there is likely to be some
+ // distortion due to the audio pipeline). If one sample is detected to
+ // have the same or greater magnitude somewhere in the frame, an actual frame
+ // has been received from the remote side (i.e. faked frames are not being
+ // pulled).
+ if (CheckRecBuffer(kHighSampleValue)) ++frames_received_;
+}
+
+void FakeAudioCaptureModule::SendFrameP() {
+ ASSERT(talk_base::Thread::Current() == process_thread_);
+ bool key_pressed = false;
+ if (audio_callback_->RecordedDataIsAvailable(send_buffer_, kNumberSamples,
+ kNumberBytesPerSample,
+ kNumberOfChannels,
+ kSamplesPerSecond, kTotalDelayMs,
+ kClockDriftMs, current_mic_level_,
+ key_pressed,
+ current_mic_level_) != 0) {
+ ASSERT(false);
+ }
+}
+
+void FakeAudioCaptureModule::StopProcessP() {
+ ASSERT(talk_base::Thread::Current() == process_thread_);
+ started_ = false;
+ process_thread_->Clear(this);
+}
diff --git a/talk/app/webrtc/test/fakeaudiocapturemodule.h b/talk/app/webrtc/test/fakeaudiocapturemodule.h
new file mode 100644
index 0000000..c32fa1f
--- /dev/null
+++ b/talk/app/webrtc/test/fakeaudiocapturemodule.h
@@ -0,0 +1,280 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// This class implements an AudioCaptureModule that can be used to detect if
+// audio is being received properly if it is fed by another AudioCaptureModule
+// in some arbitrary audio pipeline where they are connected. It does not play
+// out or record any audio so it does not need access to any hardware and can
+// therefore be used in the gtest testing framework.
+
+// Note P postfix of a function indicates that it should only be called by the
+// processing thread.
+
+#ifndef TALK_APP_WEBRTC_TEST_FAKEAUDIOCAPTUREMODULE_H_
+#define TALK_APP_WEBRTC_TEST_FAKEAUDIOCAPTUREMODULE_H_
+
+#include "talk/base/basictypes.h"
+#include "talk/base/messagehandler.h"
+#include "talk/base/scoped_ref_ptr.h"
+#include "webrtc/common_types.h"
+#include "webrtc/modules/audio_device/include/audio_device.h"
+
+namespace talk_base {
+
+class Thread;
+
+} // namespace talk_base
+
+class FakeAudioCaptureModule
+ : public webrtc::AudioDeviceModule,
+ public talk_base::MessageHandler {
+ public:
+ typedef uint16 Sample;
+
+ // The value for the following constants have been derived by running VoE
+ // using a real ADM. The constants correspond to 10ms of mono audio at 44kHz.
+ enum{kNumberSamples = 440};
+ enum{kNumberBytesPerSample = sizeof(Sample)};
+
+ // Creates a FakeAudioCaptureModule or returns NULL on failure.
+ // |process_thread| is used to push and pull audio frames to and from the
+ // returned instance. Note: ownership of |process_thread| is not handed over.
+ static talk_base::scoped_refptr<FakeAudioCaptureModule> Create(
+ talk_base::Thread* process_thread);
+
+ // Returns the number of frames that have been successfully pulled by the
+ // instance. Note that correctly detecting success can only be done if the
+ // pulled frame was generated/pushed from a FakeAudioCaptureModule.
+ int frames_received() const;
+
+ // Following functions are inherited from webrtc::AudioDeviceModule.
+ // Only functions called by PeerConnection are implemented, the rest do
+ // nothing and return success. If a function is not expected to be called by
+ // PeerConnection an assertion is triggered if it is in fact called.
+ virtual int32_t Version(char* version,
+ uint32_t& remaining_buffer_in_bytes,
+ uint32_t& position) const;
+ virtual int32_t TimeUntilNextProcess();
+ virtual int32_t Process();
+ virtual int32_t ChangeUniqueId(const int32_t id);
+
+ virtual int32_t ActiveAudioLayer(AudioLayer* audio_layer) const;
+
+ virtual ErrorCode LastError() const;
+ virtual int32_t RegisterEventObserver(
+ webrtc::AudioDeviceObserver* event_callback);
+
+ virtual int32_t RegisterAudioCallback(webrtc::AudioTransport* audio_callback);
+
+ virtual int32_t Init();
+ virtual int32_t Terminate();
+ virtual bool Initialized() const;
+
+ virtual int16_t PlayoutDevices();
+ virtual int16_t RecordingDevices();
+ virtual int32_t PlayoutDeviceName(uint16_t index,
+ char name[webrtc::kAdmMaxDeviceNameSize],
+ char guid[webrtc::kAdmMaxGuidSize]);
+ virtual int32_t RecordingDeviceName(uint16_t index,
+ char name[webrtc::kAdmMaxDeviceNameSize],
+ char guid[webrtc::kAdmMaxGuidSize]);
+
+ virtual int32_t SetPlayoutDevice(uint16_t index);
+ virtual int32_t SetPlayoutDevice(WindowsDeviceType device);
+ virtual int32_t SetRecordingDevice(uint16_t index);
+ virtual int32_t SetRecordingDevice(WindowsDeviceType device);
+
+ virtual int32_t PlayoutIsAvailable(bool* available);
+ virtual int32_t InitPlayout();
+ virtual bool PlayoutIsInitialized() const;
+ virtual int32_t RecordingIsAvailable(bool* available);
+ virtual int32_t InitRecording();
+ virtual bool RecordingIsInitialized() const;
+
+ virtual int32_t StartPlayout();
+ virtual int32_t StopPlayout();
+ virtual bool Playing() const;
+ virtual int32_t StartRecording();
+ virtual int32_t StopRecording();
+ virtual bool Recording() const;
+
+ virtual int32_t SetAGC(bool enable);
+ virtual bool AGC() const;
+
+ virtual int32_t SetWaveOutVolume(uint16_t volume_left,
+ uint16_t volume_right);
+ virtual int32_t WaveOutVolume(uint16_t* volume_left,
+ uint16_t* volume_right) const;
+
+ virtual int32_t SpeakerIsAvailable(bool* available);
+ virtual int32_t InitSpeaker();
+ virtual bool SpeakerIsInitialized() const;
+ virtual int32_t MicrophoneIsAvailable(bool* available);
+ virtual int32_t InitMicrophone();
+ virtual bool MicrophoneIsInitialized() const;
+
+ virtual int32_t SpeakerVolumeIsAvailable(bool* available);
+ virtual int32_t SetSpeakerVolume(uint32_t volume);
+ virtual int32_t SpeakerVolume(uint32_t* volume) const;
+ virtual int32_t MaxSpeakerVolume(uint32_t* max_volume) const;
+ virtual int32_t MinSpeakerVolume(uint32_t* min_volume) const;
+ virtual int32_t SpeakerVolumeStepSize(uint16_t* step_size) const;
+
+ virtual int32_t MicrophoneVolumeIsAvailable(bool* available);
+ virtual int32_t SetMicrophoneVolume(uint32_t volume);
+ virtual int32_t MicrophoneVolume(uint32_t* volume) const;
+ virtual int32_t MaxMicrophoneVolume(uint32_t* max_volume) const;
+
+ virtual int32_t MinMicrophoneVolume(uint32_t* min_volume) const;
+ virtual int32_t MicrophoneVolumeStepSize(uint16_t* step_size) const;
+
+ virtual int32_t SpeakerMuteIsAvailable(bool* available);
+ virtual int32_t SetSpeakerMute(bool enable);
+ virtual int32_t SpeakerMute(bool* enabled) const;
+
+ virtual int32_t MicrophoneMuteIsAvailable(bool* available);
+ virtual int32_t SetMicrophoneMute(bool enable);
+ virtual int32_t MicrophoneMute(bool* enabled) const;
+
+ virtual int32_t MicrophoneBoostIsAvailable(bool* available);
+ virtual int32_t SetMicrophoneBoost(bool enable);
+ virtual int32_t MicrophoneBoost(bool* enabled) const;
+
+ virtual int32_t StereoPlayoutIsAvailable(bool* available) const;
+ virtual int32_t SetStereoPlayout(bool enable);
+ virtual int32_t StereoPlayout(bool* enabled) const;
+ virtual int32_t StereoRecordingIsAvailable(bool* available) const;
+ virtual int32_t SetStereoRecording(bool enable);
+ virtual int32_t StereoRecording(bool* enabled) const;
+ virtual int32_t SetRecordingChannel(const ChannelType channel);
+ virtual int32_t RecordingChannel(ChannelType* channel) const;
+
+ virtual int32_t SetPlayoutBuffer(const BufferType type,
+ uint16_t size_ms = 0);
+ virtual int32_t PlayoutBuffer(BufferType* type,
+ uint16_t* size_ms) const;
+ virtual int32_t PlayoutDelay(uint16_t* delay_ms) const;
+ virtual int32_t RecordingDelay(uint16_t* delay_ms) const;
+
+ virtual int32_t CPULoad(uint16_t* load) const;
+
+ virtual int32_t StartRawOutputFileRecording(
+ const char pcm_file_name_utf8[webrtc::kAdmMaxFileNameSize]);
+ virtual int32_t StopRawOutputFileRecording();
+ virtual int32_t StartRawInputFileRecording(
+ const char pcm_file_name_utf8[webrtc::kAdmMaxFileNameSize]);
+ virtual int32_t StopRawInputFileRecording();
+
+ virtual int32_t SetRecordingSampleRate(const uint32_t samples_per_sec);
+ virtual int32_t RecordingSampleRate(uint32_t* samples_per_sec) const;
+ virtual int32_t SetPlayoutSampleRate(const uint32_t samples_per_sec);
+ virtual int32_t PlayoutSampleRate(uint32_t* samples_per_sec) const;
+
+ virtual int32_t ResetAudioDevice();
+ virtual int32_t SetLoudspeakerStatus(bool enable);
+ virtual int32_t GetLoudspeakerStatus(bool* enabled) const;
+ // End of functions inherited from webrtc::AudioDeviceModule.
+
+ // The following function is inherited from talk_base::MessageHandler.
+ virtual void OnMessage(talk_base::Message* msg);
+
+ protected:
+ // The constructor is protected because the class needs to be created as a
+ // reference counted object (for memory managment reasons). It could be
+ // exposed in which case the burden of proper instantiation would be put on
+ // the creator of a FakeAudioCaptureModule instance. To create an instance of
+ // this class use the Create(..) API.
+ explicit FakeAudioCaptureModule(talk_base::Thread* process_thread);
+ // The destructor is protected because it is reference counted and should not
+ // be deleted directly.
+ virtual ~FakeAudioCaptureModule();
+
+ private:
+ // Initializes the state of the FakeAudioCaptureModule. This API is called on
+ // creation by the Create() API.
+ bool Initialize();
+ // SetBuffer() sets all samples in send_buffer_ to |value|.
+ void SetSendBuffer(int value);
+ // Resets rec_buffer_. I.e., sets all rec_buffer_ samples to 0.
+ void ResetRecBuffer();
+ // Returns true if rec_buffer_ contains one or more sample greater than or
+ // equal to |value|.
+ bool CheckRecBuffer(int value);
+
+ // Starts or stops the pushing and pulling of audio frames depending on if
+ // recording or playback has been enabled/started.
+ void UpdateProcessing();
+
+ // Periodcally called function that ensures that frames are pulled and pushed
+ // periodically if enabled/started.
+ void ProcessFrameP();
+ // Pulls frames from the registered webrtc::AudioTransport.
+ void ReceiveFrameP();
+ // Pushes frames to the registered webrtc::AudioTransport.
+ void SendFrameP();
+ // Stops the periodic calling of ProcessFrame() in a thread safe way.
+ void StopProcessP();
+
+ // The time in milliseconds when Process() was last called or 0 if no call
+ // has been made.
+ uint32 last_process_time_ms_;
+
+ // Callback for playout and recording.
+ webrtc::AudioTransport* audio_callback_;
+
+ bool recording_; // True when audio is being pushed from the instance.
+ bool playing_; // True when audio is being pulled by the instance.
+
+ bool play_is_initialized_; // True when the instance is ready to pull audio.
+ bool rec_is_initialized_; // True when the instance is ready to push audio.
+
+ // Input to and output from RecordedDataIsAvailable(..) makes it possible to
+ // modify the current mic level. The implementation does not care about the
+ // mic level so it just feeds back what it receives.
+ uint32_t current_mic_level_;
+
+ // next_frame_time_ is updated in a non-drifting manner to indicate the next
+ // wall clock time the next frame should be generated and received. started_
+ // ensures that next_frame_time_ can be initialized properly on first call.
+ bool started_;
+ uint32 next_frame_time_;
+
+ // User provided thread context.
+ talk_base::Thread* process_thread_;
+
+ // Buffer for storing samples received from the webrtc::AudioTransport.
+ char rec_buffer_[kNumberSamples * kNumberBytesPerSample];
+ // Buffer for samples to send to the webrtc::AudioTransport.
+ char send_buffer_[kNumberSamples * kNumberBytesPerSample];
+
+ // Counter of frames received that have samples of high enough amplitude to
+ // indicate that the frames are not faked somewhere in the audio pipeline
+ // (e.g. by a jitter buffer).
+ int frames_received_;
+};
+
+#endif // TALK_APP_WEBRTC_TEST_FAKEAUDIOCAPTUREMODULE_H_
diff --git a/talk/app/webrtc/test/fakeaudiocapturemodule_unittest.cc b/talk/app/webrtc/test/fakeaudiocapturemodule_unittest.cc
new file mode 100644
index 0000000..5738955
--- /dev/null
+++ b/talk/app/webrtc/test/fakeaudiocapturemodule_unittest.cc
@@ -0,0 +1,212 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/test/fakeaudiocapturemodule.h"
+
+#include <algorithm>
+
+#include "talk/base/gunit.h"
+#include "talk/base/scoped_ref_ptr.h"
+#include "talk/base/thread.h"
+
+using std::min;
+
+class FakeAdmTest : public testing::Test,
+ public webrtc::AudioTransport {
+ protected:
+ static const int kMsInSecond = 1000;
+
+ FakeAdmTest()
+ : push_iterations_(0),
+ pull_iterations_(0),
+ rec_buffer_bytes_(0) {
+ memset(rec_buffer_, 0, sizeof(rec_buffer_));
+ }
+
+ virtual void SetUp() {
+ fake_audio_capture_module_ = FakeAudioCaptureModule::Create(
+ talk_base::Thread::Current());
+ EXPECT_TRUE(fake_audio_capture_module_.get() != NULL);
+ }
+
+ // Callbacks inherited from webrtc::AudioTransport.
+ // ADM is pushing data.
+ virtual int32_t RecordedDataIsAvailable(const void* audioSamples,
+ const uint32_t nSamples,
+ const uint8_t nBytesPerSample,
+ const uint8_t nChannels,
+ const uint32_t samplesPerSec,
+ const uint32_t totalDelayMS,
+ const int32_t clockDrift,
+ const uint32_t currentMicLevel,
+ const bool keyPressed,
+ uint32_t& newMicLevel) {
+ rec_buffer_bytes_ = nSamples * nBytesPerSample;
+ if ((rec_buffer_bytes_ <= 0) ||
+ (rec_buffer_bytes_ > FakeAudioCaptureModule::kNumberSamples *
+ FakeAudioCaptureModule::kNumberBytesPerSample)) {
+ ADD_FAILURE();
+ return -1;
+ }
+ memcpy(rec_buffer_, audioSamples, rec_buffer_bytes_);
+ ++push_iterations_;
+ newMicLevel = currentMicLevel;
+ return 0;
+ }
+
+ // ADM is pulling data.
+ virtual int32_t NeedMorePlayData(const uint32_t nSamples,
+ const uint8_t nBytesPerSample,
+ const uint8_t nChannels,
+ const uint32_t samplesPerSec,
+ void* audioSamples,
+ uint32_t& nSamplesOut) {
+ ++pull_iterations_;
+ const uint32_t audio_buffer_size = nSamples * nBytesPerSample;
+ const uint32_t bytes_out = RecordedDataReceived() ?
+ CopyFromRecBuffer(audioSamples, audio_buffer_size):
+ GenerateZeroBuffer(audioSamples, audio_buffer_size);
+ nSamplesOut = bytes_out / nBytesPerSample;
+ return 0;
+ }
+
+ int push_iterations() const { return push_iterations_; }
+ int pull_iterations() const { return pull_iterations_; }
+
+ talk_base::scoped_refptr<FakeAudioCaptureModule> fake_audio_capture_module_;
+
+ private:
+ bool RecordedDataReceived() const {
+ return rec_buffer_bytes_ != 0;
+ }
+ int32_t GenerateZeroBuffer(void* audio_buffer, uint32_t audio_buffer_size) {
+ memset(audio_buffer, 0, audio_buffer_size);
+ return audio_buffer_size;
+ }
+ int32_t CopyFromRecBuffer(void* audio_buffer, uint32_t audio_buffer_size) {
+ EXPECT_EQ(audio_buffer_size, rec_buffer_bytes_);
+ const uint32_t min_buffer_size = min(audio_buffer_size, rec_buffer_bytes_);
+ memcpy(audio_buffer, rec_buffer_, min_buffer_size);
+ return min_buffer_size;
+ }
+
+ int push_iterations_;
+ int pull_iterations_;
+
+ char rec_buffer_[FakeAudioCaptureModule::kNumberSamples *
+ FakeAudioCaptureModule::kNumberBytesPerSample];
+ uint32_t rec_buffer_bytes_;
+};
+
+TEST_F(FakeAdmTest, TestProccess) {
+ // Next process call must be some time in the future (or now).
+ EXPECT_LE(0, fake_audio_capture_module_->TimeUntilNextProcess());
+ // Process call updates TimeUntilNextProcess() but there are no guarantees on
+ // timing so just check that Process can ba called successfully.
+ EXPECT_LE(0, fake_audio_capture_module_->Process());
+}
+
+TEST_F(FakeAdmTest, PlayoutTest) {
+ EXPECT_EQ(0, fake_audio_capture_module_->RegisterAudioCallback(this));
+
+ bool speaker_available = false;
+ EXPECT_EQ(0, fake_audio_capture_module_->SpeakerIsAvailable(
+ &speaker_available));
+ EXPECT_TRUE(speaker_available);
+
+ bool stereo_available = false;
+ EXPECT_EQ(0,
+ fake_audio_capture_module_->StereoPlayoutIsAvailable(
+ &stereo_available));
+ EXPECT_TRUE(stereo_available);
+
+ EXPECT_NE(0, fake_audio_capture_module_->StartPlayout());
+ EXPECT_FALSE(fake_audio_capture_module_->PlayoutIsInitialized());
+ EXPECT_FALSE(fake_audio_capture_module_->Playing());
+ EXPECT_EQ(0, fake_audio_capture_module_->StopPlayout());
+
+ EXPECT_EQ(0, fake_audio_capture_module_->InitPlayout());
+ EXPECT_TRUE(fake_audio_capture_module_->PlayoutIsInitialized());
+ EXPECT_FALSE(fake_audio_capture_module_->Playing());
+
+ EXPECT_EQ(0, fake_audio_capture_module_->StartPlayout());
+ EXPECT_TRUE(fake_audio_capture_module_->Playing());
+
+ uint16_t delay_ms = 10;
+ EXPECT_EQ(0, fake_audio_capture_module_->PlayoutDelay(&delay_ms));
+ EXPECT_EQ(0, delay_ms);
+
+ EXPECT_TRUE_WAIT(pull_iterations() > 0, kMsInSecond);
+ EXPECT_GE(0, push_iterations());
+
+ EXPECT_EQ(0, fake_audio_capture_module_->StopPlayout());
+ EXPECT_FALSE(fake_audio_capture_module_->Playing());
+}
+
+TEST_F(FakeAdmTest, RecordTest) {
+ EXPECT_EQ(0, fake_audio_capture_module_->RegisterAudioCallback(this));
+
+ bool microphone_available = false;
+ EXPECT_EQ(0, fake_audio_capture_module_->MicrophoneIsAvailable(
+ µphone_available));
+ EXPECT_TRUE(microphone_available);
+
+ bool stereo_available = false;
+ EXPECT_EQ(0, fake_audio_capture_module_->StereoRecordingIsAvailable(
+ &stereo_available));
+ EXPECT_FALSE(stereo_available);
+
+ EXPECT_NE(0, fake_audio_capture_module_->StartRecording());
+ EXPECT_FALSE(fake_audio_capture_module_->Recording());
+ EXPECT_EQ(0, fake_audio_capture_module_->StopRecording());
+
+ EXPECT_EQ(0, fake_audio_capture_module_->InitRecording());
+ EXPECT_EQ(0, fake_audio_capture_module_->StartRecording());
+ EXPECT_TRUE(fake_audio_capture_module_->Recording());
+
+ EXPECT_TRUE_WAIT(push_iterations() > 0, kMsInSecond);
+ EXPECT_GE(0, pull_iterations());
+
+ EXPECT_EQ(0, fake_audio_capture_module_->StopRecording());
+ EXPECT_FALSE(fake_audio_capture_module_->Recording());
+}
+
+TEST_F(FakeAdmTest, DuplexTest) {
+ EXPECT_EQ(0, fake_audio_capture_module_->RegisterAudioCallback(this));
+
+ EXPECT_EQ(0, fake_audio_capture_module_->InitPlayout());
+ EXPECT_EQ(0, fake_audio_capture_module_->StartPlayout());
+
+ EXPECT_EQ(0, fake_audio_capture_module_->InitRecording());
+ EXPECT_EQ(0, fake_audio_capture_module_->StartRecording());
+
+ EXPECT_TRUE_WAIT(push_iterations() > 0, kMsInSecond);
+ EXPECT_TRUE_WAIT(pull_iterations() > 0, kMsInSecond);
+
+ EXPECT_EQ(0, fake_audio_capture_module_->StopPlayout());
+ EXPECT_EQ(0, fake_audio_capture_module_->StopRecording());
+}
diff --git a/talk/app/webrtc/test/fakeconstraints.h b/talk/app/webrtc/test/fakeconstraints.h
new file mode 100644
index 0000000..0299afa
--- /dev/null
+++ b/talk/app/webrtc/test/fakeconstraints.h
@@ -0,0 +1,118 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ifndef TALK_APP_WEBRTC_TEST_FAKECONSTRAINTS_H_
+#define TALK_APP_WEBRTC_TEST_FAKECONSTRAINTS_H_
+
+#include <string>
+#include <vector>
+
+#include "talk/app/webrtc/mediaconstraintsinterface.h"
+#include "talk/base/stringencode.h"
+
+namespace webrtc {
+
+class FakeConstraints : public webrtc::MediaConstraintsInterface {
+ public:
+ FakeConstraints() { }
+ virtual ~FakeConstraints() { }
+
+ virtual const Constraints& GetMandatory() const {
+ return mandatory_;
+ }
+
+ virtual const Constraints& GetOptional() const {
+ return optional_;
+ }
+
+ template <class T>
+ void AddMandatory(const std::string& key, const T& value) {
+ mandatory_.push_back(Constraint(key, talk_base::ToString<T>(value)));
+ }
+
+ template <class T>
+ void AddOptional(const std::string& key, const T& value) {
+ optional_.push_back(Constraint(key, talk_base::ToString<T>(value)));
+ }
+
+ void SetMandatoryMinAspectRatio(double ratio) {
+ AddMandatory(MediaConstraintsInterface::kMinAspectRatio, ratio);
+ }
+
+ void SetMandatoryMinWidth(int width) {
+ AddMandatory(MediaConstraintsInterface::kMinWidth, width);
+ }
+
+ void SetMandatoryMinHeight(int height) {
+ AddMandatory(MediaConstraintsInterface::kMinHeight, height);
+ }
+
+ void SetOptionalMaxWidth(int width) {
+ AddOptional(MediaConstraintsInterface::kMaxWidth, width);
+ }
+
+ void SetMandatoryMaxFrameRate(int frame_rate) {
+ AddMandatory(MediaConstraintsInterface::kMaxFrameRate, frame_rate);
+ }
+
+ void SetMandatoryReceiveAudio(bool enable) {
+ AddMandatory(MediaConstraintsInterface::kOfferToReceiveAudio, enable);
+ }
+
+ void SetMandatoryReceiveVideo(bool enable) {
+ AddMandatory(MediaConstraintsInterface::kOfferToReceiveVideo, enable);
+ }
+
+ void SetMandatoryUseRtpMux(bool enable) {
+ AddMandatory(MediaConstraintsInterface::kUseRtpMux, enable);
+ }
+
+ void SetMandatoryIceRestart(bool enable) {
+ AddMandatory(MediaConstraintsInterface::kIceRestart, enable);
+ }
+
+ void SetAllowRtpDataChannels() {
+ AddMandatory(MediaConstraintsInterface::kEnableRtpDataChannels, true);
+ }
+
+ void SetOptionalVAD(bool enable) {
+ AddOptional(MediaConstraintsInterface::kVoiceActivityDetection, enable);
+ }
+
+ void SetAllowDtlsSctpDataChannels() {
+ AddMandatory(MediaConstraintsInterface::kEnableSctpDataChannels, true);
+ AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp, true);
+ }
+
+ private:
+ Constraints mandatory_;
+ Constraints optional_;
+};
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_TEST_FAKECONSTRAINTS_H_
diff --git a/talk/app/webrtc/test/fakeperiodicvideocapturer.h b/talk/app/webrtc/test/fakeperiodicvideocapturer.h
new file mode 100644
index 0000000..7f70ae2
--- /dev/null
+++ b/talk/app/webrtc/test/fakeperiodicvideocapturer.h
@@ -0,0 +1,89 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// FakePeriodicVideoCapturer implements a fake cricket::VideoCapturer that
+// creates video frames periodically after it has been started.
+
+#ifndef TALK_APP_WEBRTC_TEST_FAKEPERIODICVIDEOCAPTURER_H_
+#define TALK_APP_WEBRTC_TEST_FAKEPERIODICVIDEOCAPTURER_H_
+
+#include "talk/base/thread.h"
+#include "talk/media/base/fakevideocapturer.h"
+
+namespace webrtc {
+
+class FakePeriodicVideoCapturer : public cricket::FakeVideoCapturer {
+ public:
+ FakePeriodicVideoCapturer() {
+ std::vector<cricket::VideoFormat> formats;
+ formats.push_back(cricket::VideoFormat(1280, 720,
+ cricket::VideoFormat::FpsToInterval(30), cricket::FOURCC_I420));
+ formats.push_back(cricket::VideoFormat(640, 480,
+ cricket::VideoFormat::FpsToInterval(30), cricket::FOURCC_I420));
+ formats.push_back(cricket::VideoFormat(640, 360,
+ cricket::VideoFormat::FpsToInterval(30), cricket::FOURCC_I420));
+ formats.push_back(cricket::VideoFormat(320, 240,
+ cricket::VideoFormat::FpsToInterval(30), cricket::FOURCC_I420));
+ formats.push_back(cricket::VideoFormat(160, 120,
+ cricket::VideoFormat::FpsToInterval(30), cricket::FOURCC_I420));
+ ResetSupportedFormats(formats);
+ };
+
+ virtual cricket::CaptureState Start(const cricket::VideoFormat& format) {
+ cricket::CaptureState state = FakeVideoCapturer::Start(format);
+ if (state != cricket::CS_FAILED) {
+ talk_base::Thread::Current()->Post(this, MSG_CREATEFRAME);
+ }
+ return state;
+ }
+ virtual void Stop() {
+ talk_base::Thread::Current()->Clear(this);
+ }
+ // Inherited from MesageHandler.
+ virtual void OnMessage(talk_base::Message* msg) {
+ if (msg->message_id == MSG_CREATEFRAME) {
+ if (IsRunning()) {
+ CaptureFrame();
+ talk_base::Thread::Current()->PostDelayed(static_cast<int>(
+ GetCaptureFormat()->interval / talk_base::kNumNanosecsPerMillisec),
+ this, MSG_CREATEFRAME);
+ }
+ } else {
+ FakeVideoCapturer::OnMessage(msg);
+ }
+ }
+
+ private:
+ enum {
+ // Offset 0xFF to make sure this don't collide with base class messages.
+ MSG_CREATEFRAME = 0xFF
+ };
+};
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_TEST_FAKEPERIODICVIDEOCAPTURER_H_
diff --git a/talk/app/webrtc/test/fakevideotrackrenderer.h b/talk/app/webrtc/test/fakevideotrackrenderer.h
new file mode 100644
index 0000000..0030a0c
--- /dev/null
+++ b/talk/app/webrtc/test/fakevideotrackrenderer.h
@@ -0,0 +1,70 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_APP_WEBRTC_TEST_FAKEVIDEOTRACKRENDERER_H_
+#define TALK_APP_WEBRTC_TEST_FAKEVIDEOTRACKRENDERER_H_
+
+#include "talk/app/webrtc/mediastreaminterface.h"
+#include "talk/media/base/fakevideorenderer.h"
+
+namespace webrtc {
+
+class FakeVideoTrackRenderer : public VideoRendererInterface {
+ public:
+ explicit FakeVideoTrackRenderer(VideoTrackInterface* video_track)
+ : video_track_(video_track) {
+ video_track_->AddRenderer(this);
+ }
+ ~FakeVideoTrackRenderer() {
+ video_track_->RemoveRenderer(this);
+ }
+
+ // Implements VideoRendererInterface
+ virtual void SetSize(int width, int height) {
+ fake_renderer_.SetSize(width, height, 0);
+ }
+
+ virtual void RenderFrame(const cricket::VideoFrame* frame) {
+ fake_renderer_.RenderFrame(frame);
+ }
+
+ int errors() const { return fake_renderer_.errors(); }
+ int width() const { return fake_renderer_.width(); }
+ int height() const { return fake_renderer_.height(); }
+ int num_set_sizes() const { return fake_renderer_.num_set_sizes(); }
+ int num_rendered_frames() const {
+ return fake_renderer_.num_rendered_frames();
+ }
+
+ private:
+ cricket::FakeVideoRenderer fake_renderer_;
+ talk_base::scoped_refptr<VideoTrackInterface> video_track_;
+};
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_TEST_FAKEVIDEOTRACKRENDERER_H_
diff --git a/talk/app/webrtc/test/mockpeerconnectionobservers.h b/talk/app/webrtc/test/mockpeerconnectionobservers.h
new file mode 100644
index 0000000..e2de379
--- /dev/null
+++ b/talk/app/webrtc/test/mockpeerconnectionobservers.h
@@ -0,0 +1,172 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// This file contains mock implementations of observers used in PeerConnection.
+
+#ifndef TALK_APP_WEBRTC_TEST_MOCKPEERCONNECTIONOBSERVERS_H_
+#define TALK_APP_WEBRTC_TEST_MOCKPEERCONNECTIONOBSERVERS_H_
+
+#include <string>
+
+#include "talk/app/webrtc/datachannelinterface.h"
+
+namespace webrtc {
+
+class MockCreateSessionDescriptionObserver
+ : public webrtc::CreateSessionDescriptionObserver {
+ public:
+ MockCreateSessionDescriptionObserver()
+ : called_(false),
+ result_(false) {}
+ virtual ~MockCreateSessionDescriptionObserver() {}
+ virtual void OnSuccess(SessionDescriptionInterface* desc) {
+ called_ = true;
+ result_ = true;
+ desc_.reset(desc);
+ }
+ virtual void OnFailure(const std::string& error) {
+ called_ = true;
+ result_ = false;
+ }
+ bool called() const { return called_; }
+ bool result() const { return result_; }
+ SessionDescriptionInterface* release_desc() {
+ return desc_.release();
+ }
+
+ private:
+ bool called_;
+ bool result_;
+ talk_base::scoped_ptr<SessionDescriptionInterface> desc_;
+};
+
+class MockSetSessionDescriptionObserver
+ : public webrtc::SetSessionDescriptionObserver {
+ public:
+ MockSetSessionDescriptionObserver()
+ : called_(false),
+ result_(false) {}
+ virtual ~MockSetSessionDescriptionObserver() {}
+ virtual void OnSuccess() {
+ called_ = true;
+ result_ = true;
+ }
+ virtual void OnFailure(const std::string& error) {
+ called_ = true;
+ result_ = false;
+ }
+ bool called() const { return called_; }
+ bool result() const { return result_; }
+
+ private:
+ bool called_;
+ bool result_;
+};
+
+class MockDataChannelObserver : public webrtc::DataChannelObserver {
+ public:
+ explicit MockDataChannelObserver(webrtc::DataChannelInterface* channel)
+ : channel_(channel) {
+ channel_->RegisterObserver(this);
+ state_ = channel_->state();
+ }
+ virtual ~MockDataChannelObserver() {
+ channel_->UnregisterObserver();
+ }
+
+ virtual void OnStateChange() { state_ = channel_->state(); }
+ virtual void OnMessage(const DataBuffer& buffer) {
+ last_message_.assign(buffer.data.data(), buffer.data.length());
+ }
+
+ bool IsOpen() const { return state_ == DataChannelInterface::kOpen; }
+ const std::string& last_message() const { return last_message_; }
+
+ private:
+ talk_base::scoped_refptr<webrtc::DataChannelInterface> channel_;
+ DataChannelInterface::DataState state_;
+ std::string last_message_;
+};
+
+class MockStatsObserver : public webrtc::StatsObserver {
+ public:
+ MockStatsObserver()
+ : called_(false) {}
+ virtual ~MockStatsObserver() {}
+ virtual void OnComplete(const std::vector<webrtc::StatsReport>& reports) {
+ called_ = true;
+ reports_ = reports;
+ }
+
+ bool called() const { return called_; }
+ size_t number_of_reports() const { return reports_.size(); }
+
+ int AudioOutputLevel() {
+ return GetSsrcStatsValue(
+ webrtc::StatsReport::kStatsValueNameAudioOutputLevel);
+ }
+
+ int AudioInputLevel() {
+ return GetSsrcStatsValue(
+ webrtc::StatsReport::kStatsValueNameAudioInputLevel);
+ }
+
+ int BytesReceived() {
+ return GetSsrcStatsValue(
+ webrtc::StatsReport::kStatsValueNameBytesReceived);
+ }
+
+ int BytesSent() {
+ return GetSsrcStatsValue(webrtc::StatsReport::kStatsValueNameBytesSent);
+ }
+
+ private:
+ int GetSsrcStatsValue(const std::string name) {
+ if (reports_.empty()) {
+ return 0;
+ }
+ for (size_t i = 0; i < reports_.size(); ++i) {
+ if (reports_[i].type != StatsReport::kStatsReportTypeSsrc)
+ continue;
+ webrtc::StatsReport::Values::const_iterator it =
+ reports_[i].values.begin();
+ for (; it != reports_[i].values.end(); ++it) {
+ if (it->name == name) {
+ return talk_base::FromString<int>(it->value);
+ }
+ }
+ }
+ return 0;
+ }
+
+ bool called_;
+ std::vector<webrtc::StatsReport> reports_;
+};
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_TEST_MOCKPEERCONNECTIONOBSERVERS_H_
diff --git a/talk/app/webrtc/test/testsdpstrings.h b/talk/app/webrtc/test/testsdpstrings.h
new file mode 100644
index 0000000..9f95d36
--- /dev/null
+++ b/talk/app/webrtc/test/testsdpstrings.h
@@ -0,0 +1,144 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// This file contain SDP strings used for testing.
+
+#ifndef TALK_APP_WEBRTC_TEST_TESTSDPSTRINGS_H_
+#define TALK_APP_WEBRTC_TEST_TESTSDPSTRINGS_H_
+
+namespace webrtc {
+
+// SDP offer string from a Nightly FireFox build.
+static const char kFireFoxSdpOffer[] =
+ "v=0\r\n"
+ "o=Mozilla-SIPUA 23551 0 IN IP4 0.0.0.0\r\n"
+ "s=SIP Call\r\n"
+ "t=0 0\r\n"
+ "a=ice-ufrag:e5785931\r\n"
+ "a=ice-pwd:36fb7878390db89481c1d46daa4278d8\r\n"
+ "a=fingerprint:sha-256 A7:24:72:CA:6E:02:55:39:BA:66:DF:6E:CC:4C:D8:B0:1A:"
+ "BF:1A:56:65:7D:F4:03:AD:7E:77:43:2A:29:EC:93\r\n"
+ "m=audio 36993 RTP/SAVPF 109 0 8 101\r\n"
+ "c=IN IP4 74.95.2.170\r\n"
+ "a=rtpmap:109 opus/48000/2\r\n"
+ "a=ptime:20\r\n"
+ "a=rtpmap:0 PCMU/8000\r\n"
+ "a=rtpmap:8 PCMA/8000\r\n"
+ "a=rtpmap:101 telephone-event/8000\r\n"
+ "a=fmtp:101 0-15\r\n"
+ "a=sendrecv\r\n"
+ "a=candidate:0 1 UDP 2112946431 172.16.191.1 61725 typ host\r\n"
+ "a=candidate:2 1 UDP 2112487679 172.16.131.1 58798 typ host\r\n"
+ "a=candidate:4 1 UDP 2113667327 10.0.254.2 58122 typ host\r\n"
+ "a=candidate:5 1 UDP 1694302207 74.95.2.170 36993 typ srflx raddr "
+ "10.0.254.2 rport 58122\r\n"
+ "a=candidate:0 2 UDP 2112946430 172.16.191.1 55025 typ host\r\n"
+ "a=candidate:2 2 UDP 2112487678 172.16.131.1 63576 typ host\r\n"
+ "a=candidate:4 2 UDP 2113667326 10.0.254.2 50962 typ host\r\n"
+ "a=candidate:5 2 UDP 1694302206 74.95.2.170 41028 typ srflx raddr"
+ " 10.0.254.2 rport 50962\r\n"
+ "m=video 38826 RTP/SAVPF 120\r\n"
+ "c=IN IP4 74.95.2.170\r\n"
+ "a=rtpmap:120 VP8/90000\r\n"
+ "a=sendrecv\r\n"
+ "a=candidate:0 1 UDP 2112946431 172.16.191.1 62017 typ host\r\n"
+ "a=candidate:2 1 UDP 2112487679 172.16.131.1 59741 typ host\r\n"
+ "a=candidate:4 1 UDP 2113667327 10.0.254.2 62652 typ host\r\n"
+ "a=candidate:5 1 UDP 1694302207 74.95.2.170 38826 typ srflx raddr"
+ " 10.0.254.2 rport 62652\r\n"
+ "a=candidate:0 2 UDP 2112946430 172.16.191.1 63440 typ host\r\n"
+ "a=candidate:2 2 UDP 2112487678 172.16.131.1 51847 typ host\r\n"
+ "a=candidate:4 2 UDP 2113667326 10.0.254.2 58890 typ host\r\n"
+ "a=candidate:5 2 UDP 1694302206 74.95.2.170 33611 typ srflx raddr"
+ " 10.0.254.2 rport 58890\r\n"
+ "m=application 45536 SCTP/DTLS 5000\r\n"
+ "c=IN IP4 74.95.2.170\r\n"
+ "a=fmtp:5000 protocol=webrtc-datachannel;streams=16\r\n"
+ "a=sendrecv\r\n"
+ "a=candidate:0 1 UDP 2112946431 172.16.191.1 60248 typ host\r\n"
+ "a=candidate:2 1 UDP 2112487679 172.16.131.1 55925 typ host\r\n"
+ "a=candidate:4 1 UDP 2113667327 10.0.254.2 65268 typ host\r\n"
+ "a=candidate:5 1 UDP 1694302207 74.95.2.170 45536 typ srflx raddr"
+ " 10.0.254.2 rport 65268\r\n"
+ "a=candidate:0 2 UDP 2112946430 172.16.191.1 49162 typ host\r\n"
+ "a=candidate:2 2 UDP 2112487678 172.16.131.1 59635 typ host\r\n"
+ "a=candidate:4 2 UDP 2113667326 10.0.254.2 61232 typ host\r\n"
+ "a=candidate:5 2 UDP 1694302206 74.95.2.170 45468 typ srflx raddr"
+ " 10.0.254.2 rport 61232\r\n";
+
+// Audio SDP with a limited set of audio codecs.
+static const char kAudioSdp[] =
+ "v=0\r\n"
+ "o=- 7859371131 2 IN IP4 192.168.30.208\r\n"
+ "s=-\r\n"
+ "c=IN IP4 192.168.30.208\r\n"
+ "t=0 0\r\n"
+ "m=audio 16000 RTP/SAVPF 0 8 126\r\n"
+ "a=rtpmap:0 PCMU/8000\r\n"
+ "a=rtpmap:8 PCMA/8000\r\n"
+ "a=rtpmap:126 telephone-event/8000\r\n"
+ "a=sendrecv\r\n"
+ "a=rtcp:16000 IN IP4 192.168.30.208\r\n"
+ "a=rtcp-mux\r\n"
+ "a=crypto:1 AES_CM_128_HMAC_SHA1_80 "
+ "inline:tvKIFjbMQ7W0/C2RzhwN0oQglj/7GJg+frdsNRxt\r\n"
+ "a=ice-ufrag:AI2sRT3r\r\n"
+ "a=ice-pwd:lByS9z2RSQlSE9XurlvjYmEm\r\n"
+ "a=ssrc:4227871655 cname:GeAAgb6XCPNLVMX5\r\n"
+ "a=ssrc:4227871655 msid:1NFAV3iD08ioO2339rQS9pfOI9mDf6GeG9F4 a0\r\n"
+ "a=ssrc:4227871655 mslabel:1NFAV3iD08ioO2339rQS9pfOI9mDf6GeG9F4\r\n"
+ "a=ssrc:4227871655 label:1NFAV3iD08ioO2339rQS9pfOI9mDf6GeG9F4a0\r\n"
+ "a=mid:audio\r\n";
+
+static const char kAudioSdpWithUnsupportedCodecs[] =
+ "v=0\r\n"
+ "o=- 6858750541 2 IN IP4 192.168.30.208\r\n"
+ "s=-\r\n"
+ "c=IN IP4 192.168.30.208\r\n"
+ "t=0 0\r\n"
+ "m=audio 16000 RTP/SAVPF 0 8 18 110 126\r\n"
+ "a=rtpmap:0 PCMU/8000\r\n"
+ "a=rtpmap:8 PCMA/8000\r\n"
+ "a=rtpmap:18 WeirdCodec1/8000\r\n"
+ "a=rtpmap:110 WeirdCodec2/8000\r\n"
+ "a=rtpmap:126 telephone-event/8000\r\n"
+ "a=sendonly\r\n"
+ "a=rtcp:16000 IN IP4 192.168.30.208\r\n"
+ "a=rtcp-mux\r\n"
+ "a=crypto:1 AES_CM_128_HMAC_SHA1_80 "
+ "inline:tvKIFjbMQ7W0/C2RzhwN0oQglj/7GJg+frdsNRxt\r\n"
+ "a=ice-ufrag:AI2sRT3r\r\n"
+ "a=ice-pwd:lByS9z2RSQlSE9XurlvjYmEm\r\n"
+ "a=ssrc:4227871655 cname:TsmD02HRfhkJBm4m\r\n"
+ "a=ssrc:4227871655 msid:7nU0TApbB-n4dfPlCplWT9QTEsbBDS1IlpW3 a0\r\n"
+ "a=ssrc:4227871655 mslabel:7nU0TApbB-n4dfPlCplWT9QTEsbBDS1IlpW3\r\n"
+ "a=ssrc:4227871655 label:7nU0TApbB-n4dfPlCplWT9QTEsbBDS1IlpW3a0\r\n"
+ "a=mid:audio\r\n";
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_TEST_TESTSDPSTRINGS_H_
diff --git a/talk/app/webrtc/videosourceinterface.h b/talk/app/webrtc/videosourceinterface.h
new file mode 100644
index 0000000..ae968f7
--- /dev/null
+++ b/talk/app/webrtc/videosourceinterface.h
@@ -0,0 +1,57 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_APP_WEBRTC_VIDEOSOURCEINTERFACE_H_
+#define TALK_APP_WEBRTC_VIDEOSOURCEINTERFACE_H_
+
+#include "talk/app/webrtc/mediastreaminterface.h"
+#include "talk/media/base/mediachannel.h"
+
+namespace webrtc {
+
+// VideoSourceInterface is a reference counted source used for VideoTracks.
+// The same source can be used in multiple VideoTracks.
+// The methods are only supposed to be called by the PeerConnection
+// implementation.
+class VideoSourceInterface : public MediaSourceInterface {
+ public:
+ // Get access to the source implementation of cricket::VideoCapturer.
+ // This can be used for receiving frames and state notifications.
+ // But it should not be used for starting or stopping capturing.
+ virtual cricket::VideoCapturer* GetVideoCapturer() = 0;
+ // Adds |output| to the source to receive frames.
+ virtual void AddSink(cricket::VideoRenderer* output) = 0;
+ virtual void RemoveSink(cricket::VideoRenderer* output) = 0;
+ virtual const cricket::VideoOptions* options() const = 0;
+
+ protected:
+ virtual ~VideoSourceInterface() {}
+};
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_VIDEOSOURCEINTERFACE_H_
diff --git a/talk/app/webrtc/videosourceproxy.h b/talk/app/webrtc/videosourceproxy.h
new file mode 100644
index 0000000..be80077
--- /dev/null
+++ b/talk/app/webrtc/videosourceproxy.h
@@ -0,0 +1,51 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_APP_WEBRTC_VIDEOSOURCEPROXY_H_
+#define TALK_APP_WEBRTC_VIDEOSOURCEPROXY_H_
+
+#include "talk/app/webrtc/proxy.h"
+#include "talk/app/webrtc/videosourceinterface.h"
+
+namespace webrtc {
+
+// VideoSourceProxy makes sure the real VideoSourceInterface implementation is
+// destroyed on the signaling thread and marshals all method calls to the
+// signaling thread.
+BEGIN_PROXY_MAP(VideoSource)
+ PROXY_CONSTMETHOD0(SourceState, state)
+ PROXY_METHOD0(cricket::VideoCapturer*, GetVideoCapturer)
+ PROXY_METHOD1(void, AddSink, cricket::VideoRenderer*)
+ PROXY_METHOD1(void, RemoveSink, cricket::VideoRenderer*)
+ PROXY_CONSTMETHOD0(const cricket::VideoOptions*, options)
+ PROXY_METHOD1(void, RegisterObserver, ObserverInterface*)
+ PROXY_METHOD1(void, UnregisterObserver, ObserverInterface*)
+END_PROXY()
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_VIDEOSOURCEPROXY_H_
diff --git a/talk/app/webrtc/videotrack.cc b/talk/app/webrtc/videotrack.cc
new file mode 100644
index 0000000..ec17ec7
--- /dev/null
+++ b/talk/app/webrtc/videotrack.cc
@@ -0,0 +1,78 @@
+/*
+ * libjingle
+ * Copyright 2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "talk/app/webrtc/videotrack.h"
+
+#include <string>
+
+#include "talk/media/webrtc/webrtcvideocapturer.h"
+
+namespace webrtc {
+
+static const char kVideoTrackKind[] = "video";
+
+VideoTrack::VideoTrack(const std::string& label,
+ VideoSourceInterface* video_source)
+ : MediaStreamTrack<VideoTrackInterface>(label),
+ video_source_(video_source) {
+ if (video_source_)
+ video_source_->AddSink(FrameInput());
+}
+
+VideoTrack::~VideoTrack() {
+ if (video_source_)
+ video_source_->RemoveSink(FrameInput());
+}
+
+std::string VideoTrack::kind() const {
+ return kVideoTrackKind;
+}
+
+void VideoTrack::AddRenderer(VideoRendererInterface* renderer) {
+ renderers_.AddRenderer(renderer);
+}
+
+void VideoTrack::RemoveRenderer(VideoRendererInterface* renderer) {
+ renderers_.RemoveRenderer(renderer);
+}
+
+cricket::VideoRenderer* VideoTrack::FrameInput() {
+ return &renderers_;
+}
+
+bool VideoTrack::set_enabled(bool enable) {
+ renderers_.SetEnabled(enable);
+ return MediaStreamTrack<VideoTrackInterface>::set_enabled(enable);
+}
+
+talk_base::scoped_refptr<VideoTrack> VideoTrack::Create(
+ const std::string& id, VideoSourceInterface* source) {
+ talk_base::RefCountedObject<VideoTrack>* track =
+ new talk_base::RefCountedObject<VideoTrack>(id, source);
+ return track;
+}
+
+} // namespace webrtc
diff --git a/talk/app/webrtc/videotrack.h b/talk/app/webrtc/videotrack.h
new file mode 100644
index 0000000..aefeb50
--- /dev/null
+++ b/talk/app/webrtc/videotrack.h
@@ -0,0 +1,65 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_APP_WEBRTC_VIDEOTRACK_H_
+#define TALK_APP_WEBRTC_VIDEOTRACK_H_
+
+#include <string>
+
+#include "talk/app/webrtc/mediastreamtrack.h"
+#include "talk/app/webrtc/videosourceinterface.h"
+#include "talk/app/webrtc/videotrackrenderers.h"
+#include "talk/base/scoped_ref_ptr.h"
+
+namespace webrtc {
+
+class VideoTrack : public MediaStreamTrack<VideoTrackInterface> {
+ public:
+ static talk_base::scoped_refptr<VideoTrack> Create(
+ const std::string& label, VideoSourceInterface* source);
+
+ virtual void AddRenderer(VideoRendererInterface* renderer);
+ virtual void RemoveRenderer(VideoRendererInterface* renderer);
+ virtual cricket::VideoRenderer* FrameInput();
+ virtual VideoSourceInterface* GetSource() const {
+ return video_source_.get();
+ }
+ virtual bool set_enabled(bool enable);
+ virtual std::string kind() const;
+
+ protected:
+ VideoTrack(const std::string& id, VideoSourceInterface* video_source);
+ ~VideoTrack();
+
+ private:
+ VideoTrackRenderers renderers_;
+ talk_base::scoped_refptr<VideoSourceInterface> video_source_;
+};
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_VIDEOTRACK_H_
diff --git a/talk/app/webrtc/videotrack_unittest.cc b/talk/app/webrtc/videotrack_unittest.cc
new file mode 100644
index 0000000..671e360
--- /dev/null
+++ b/talk/app/webrtc/videotrack_unittest.cc
@@ -0,0 +1,84 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string>
+
+#include "talk/app/webrtc/test/fakevideotrackrenderer.h"
+#include "talk/app/webrtc/videotrack.h"
+#include "talk/base/gunit.h"
+#include "talk/base/scoped_ptr.h"
+#include "talk/media/webrtc/webrtcvideoframe.h"
+
+using webrtc::FakeVideoTrackRenderer;
+using webrtc::VideoTrack;
+using webrtc::VideoTrackInterface;
+
+// Test adding renderers to a video track and render to them by providing
+// VideoFrames to the track frame input.
+TEST(VideoTrack, RenderVideo) {
+ static const char kVideoTrackId[] = "track_id";
+ talk_base::scoped_refptr<VideoTrackInterface> video_track(
+ VideoTrack::Create(kVideoTrackId, NULL));
+ // FakeVideoTrackRenderer register itself to |video_track|
+ talk_base::scoped_ptr<FakeVideoTrackRenderer> renderer_1(
+ new FakeVideoTrackRenderer(video_track.get()));
+
+ cricket::VideoRenderer* render_input = video_track->FrameInput();
+ ASSERT_FALSE(render_input == NULL);
+
+ render_input->SetSize(123, 123, 0);
+ EXPECT_EQ(1, renderer_1->num_set_sizes());
+ EXPECT_EQ(123, renderer_1->width());
+ EXPECT_EQ(123, renderer_1->height());
+
+ cricket::WebRtcVideoFrame frame;
+ frame.InitToBlack(123, 123, 1, 1, 0, 0);
+ render_input->RenderFrame(&frame);
+ EXPECT_EQ(1, renderer_1->num_rendered_frames());
+
+ // FakeVideoTrackRenderer register itself to |video_track|
+ talk_base::scoped_ptr<FakeVideoTrackRenderer> renderer_2(
+ new FakeVideoTrackRenderer(video_track.get()));
+
+ render_input->RenderFrame(&frame);
+
+ EXPECT_EQ(1, renderer_1->num_set_sizes());
+ EXPECT_EQ(123, renderer_1->width());
+ EXPECT_EQ(123, renderer_1->height());
+ EXPECT_EQ(1, renderer_2->num_set_sizes());
+ EXPECT_EQ(123, renderer_2->width());
+ EXPECT_EQ(123, renderer_2->height());
+
+ EXPECT_EQ(2, renderer_1->num_rendered_frames());
+ EXPECT_EQ(1, renderer_2->num_rendered_frames());
+
+ video_track->RemoveRenderer(renderer_1.get());
+ render_input->RenderFrame(&frame);
+
+ EXPECT_EQ(2, renderer_1->num_rendered_frames());
+ EXPECT_EQ(2, renderer_2->num_rendered_frames());
+}
diff --git a/talk/app/webrtc/videotrackrenderers.cc b/talk/app/webrtc/videotrackrenderers.cc
new file mode 100644
index 0000000..b0e0c1f
--- /dev/null
+++ b/talk/app/webrtc/videotrackrenderers.cc
@@ -0,0 +1,94 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "talk/app/webrtc/videotrackrenderers.h"
+
+namespace webrtc {
+
+VideoTrackRenderers::VideoTrackRenderers()
+ : width_(0),
+ height_(0),
+ enabled_(true) {
+}
+
+VideoTrackRenderers::~VideoTrackRenderers() {
+}
+
+void VideoTrackRenderers::AddRenderer(VideoRendererInterface* renderer) {
+ talk_base::CritScope cs(&critical_section_);
+ std::vector<RenderObserver>::iterator it = renderers_.begin();
+ for (; it != renderers_.end(); ++it) {
+ if (it->renderer_ == renderer)
+ return;
+ }
+ renderers_.push_back(RenderObserver(renderer));
+}
+
+void VideoTrackRenderers::RemoveRenderer(VideoRendererInterface* renderer) {
+ talk_base::CritScope cs(&critical_section_);
+ std::vector<RenderObserver>::iterator it = renderers_.begin();
+ for (; it != renderers_.end(); ++it) {
+ if (it->renderer_ == renderer) {
+ renderers_.erase(it);
+ return;
+ }
+ }
+}
+
+void VideoTrackRenderers::SetEnabled(bool enable) {
+ talk_base::CritScope cs(&critical_section_);
+ enabled_ = enable;
+}
+
+bool VideoTrackRenderers::SetSize(int width, int height, int reserved) {
+ talk_base::CritScope cs(&critical_section_);
+ width_ = width;
+ height_ = height;
+ std::vector<RenderObserver>::iterator it = renderers_.begin();
+ for (; it != renderers_.end(); ++it) {
+ it->renderer_->SetSize(width, height);
+ it->size_set_ = true;
+ }
+ return true;
+}
+
+bool VideoTrackRenderers::RenderFrame(const cricket::VideoFrame* frame) {
+ talk_base::CritScope cs(&critical_section_);
+ if (!enabled_) {
+ return true;
+ }
+ std::vector<RenderObserver>::iterator it = renderers_.begin();
+ for (; it != renderers_.end(); ++it) {
+ if (!it->size_set_) {
+ it->renderer_->SetSize(width_, height_);
+ it->size_set_ = true;
+ }
+ it->renderer_->RenderFrame(frame);
+ }
+ return true;
+}
+
+} // namespace webrtc
diff --git a/talk/app/webrtc/videotrackrenderers.h b/talk/app/webrtc/videotrackrenderers.h
new file mode 100644
index 0000000..4bcf6a3
--- /dev/null
+++ b/talk/app/webrtc/videotrackrenderers.h
@@ -0,0 +1,77 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_APP_WEBRTC_VIDEOTRACKRENDERERS_H_
+#define TALK_APP_WEBRTC_VIDEOTRACKRENDERERS_H_
+
+#include <vector>
+
+#include "talk/app/webrtc/mediastreaminterface.h"
+#include "talk/base/criticalsection.h"
+#include "talk/media/base/videorenderer.h"
+
+namespace webrtc {
+
+// Class used for rendering cricket::VideoFrames to multiple renderers of type
+// VideoRendererInterface.
+// Each VideoTrack owns a VideoTrackRenderers instance.
+// The class is thread safe. Rendering to the added VideoRendererInterfaces is
+// done on the same thread as the cricket::VideoRenderer.
+class VideoTrackRenderers : public cricket::VideoRenderer {
+ public:
+ VideoTrackRenderers();
+ ~VideoTrackRenderers();
+
+ // Implements cricket::VideoRenderer
+ virtual bool SetSize(int width, int height, int reserved);
+ virtual bool RenderFrame(const cricket::VideoFrame* frame);
+
+ void AddRenderer(VideoRendererInterface* renderer);
+ void RemoveRenderer(VideoRendererInterface* renderer);
+ void SetEnabled(bool enable);
+
+ private:
+ struct RenderObserver {
+ explicit RenderObserver(VideoRendererInterface* renderer)
+ : renderer_(renderer),
+ size_set_(false) {
+ }
+ VideoRendererInterface* renderer_;
+ bool size_set_;
+ };
+
+ int width_;
+ int height_;
+ bool enabled_;
+ std::vector<RenderObserver> renderers_;
+
+ talk_base::CriticalSection critical_section_; // Protects the above variables
+};
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_VIDEOTRACKRENDERERS_H_
diff --git a/talk/app/webrtc/webrtc.scons b/talk/app/webrtc/webrtc.scons
new file mode 100644
index 0000000..0cbe756
--- /dev/null
+++ b/talk/app/webrtc/webrtc.scons
@@ -0,0 +1,88 @@
+# -*- Python -*-
+import talk
+
+Import('env')
+
+# For peerconnection, we need additional flags only for GCC 4.6+.
+peerconnection_lin_ccflags = []
+
+if env.Bit('linux'):
+ # Detect the GCC version and update peerconnection flags.
+ (major, minor, rev) = env.GetGccVersion()
+ if major > 4 or (major == 4 and minor >= 6):
+ peerconnection_lin_ccflags = ['-Wno-error=unused-but-set-variable']
+
+
+if env.Bit('have_webrtc_voice') and env.Bit('have_webrtc_video'):
+ # local sources
+ talk.Library(
+ env,
+ name = 'peerconnection',
+ srcs = [
+ 'audiotrack.cc',
+ 'jsepicecandidate.cc',
+ 'jsepsessiondescription.cc',
+ 'mediaconstraintsinterface.cc',
+ 'mediastream.cc',
+ 'mediastreamhandler.cc',
+ 'mediastreamproxy.cc',
+ 'mediastreamsignaling.cc',
+ 'mediastreamtrackproxy.cc',
+ 'peerconnectionfactory.cc',
+ 'peerconnection.cc',
+ 'portallocatorfactory.cc',
+ 'roapmessages.cc',
+ 'roapsession.cc',
+ 'roapsignaling.cc',
+ 'videorendererimpl.cc',
+ 'videotrack.cc',
+ 'webrtcsdp.cc',
+ 'webrtcsession.cc',
+ ],
+ lin_ccflags = peerconnection_lin_ccflags
+ )
+
+ talk.Unittest(
+ env,
+ name = 'peerconnection',
+ srcs = [
+ 'test/fakeaudiocapturemodule.cc',
+ 'test/fakeaudiocapturemodule_unittest.cc',
+ 'test/fakevideocapturemodule.cc',
+ 'test/fileframesource.cc',
+ 'test/i420framesource.cc',
+ 'test/staticframesource.cc',
+ 'jsepsessiondescription_unittest.cc',
+ 'mediastream_unittest.cc',
+ 'mediastreamhandler_unittest.cc',
+ 'mediastreamsignaling_unittest.cc',
+ 'peerconnectioninterface_unittest.cc',
+ 'peerconnection_unittest.cc',
+ 'peerconnectionfactory_unittest.cc',
+ 'roapmessages_unittest.cc',
+ 'roapsession_unittest.cc',
+ 'roapsignaling_unittest.cc',
+ 'webrtcsdp_unittest.cc',
+ 'webrtcsession_unittest.cc',
+ ],
+ libs = [
+ 'base',
+ 'expat',
+ 'json',
+ 'p2p',
+ 'phone',
+ 'srtp',
+ 'xmllite',
+ 'xmpp',
+ 'yuvscaler',
+ 'peerconnection',
+ ],
+ win_link_flags = [('', '/nodefaultlib:libcmt')[env.Bit('debug')]],
+ lin_libs = [
+ 'sound',
+ ],
+ mac_libs = [
+ 'crypto',
+ 'ssl',
+ ],
+ )
diff --git a/talk/app/webrtc/webrtcsdp.cc b/talk/app/webrtc/webrtcsdp.cc
new file mode 100644
index 0000000..f91db8d
--- /dev/null
+++ b/talk/app/webrtc/webrtcsdp.cc
@@ -0,0 +1,2885 @@
+/*
+ * libjingle
+ * Copyright 2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/webrtcsdp.h"
+
+#include <limits.h>
+#include <stdio.h>
+#include <algorithm>
+#include <string>
+#include <vector>
+
+#include "talk/app/webrtc/jsepicecandidate.h"
+#include "talk/app/webrtc/jsepsessiondescription.h"
+#include "talk/base/common.h"
+#include "talk/base/logging.h"
+#include "talk/base/messagedigest.h"
+#include "talk/base/stringutils.h"
+#include "talk/media/base/codec.h"
+#include "talk/media/base/constants.h"
+#include "talk/media/base/cryptoparams.h"
+#include "talk/p2p/base/candidate.h"
+#include "talk/p2p/base/constants.h"
+#include "talk/p2p/base/port.h"
+#include "talk/session/media/mediasession.h"
+#include "talk/session/media/mediasessionclient.h"
+
+using cricket::AudioContentDescription;
+using cricket::Candidate;
+using cricket::Candidates;
+using cricket::ContentDescription;
+using cricket::ContentInfo;
+using cricket::CryptoParams;
+using cricket::DataContentDescription;
+using cricket::ICE_CANDIDATE_COMPONENT_RTP;
+using cricket::ICE_CANDIDATE_COMPONENT_RTCP;
+using cricket::kCodecParamMaxBitrate;
+using cricket::kCodecParamMaxPTime;
+using cricket::kCodecParamMaxQuantization;
+using cricket::kCodecParamMinBitrate;
+using cricket::kCodecParamMinPTime;
+using cricket::kCodecParamPTime;
+using cricket::kCodecParamSPropStereo;
+using cricket::kCodecParamStereo;
+using cricket::kCodecParamUseInbandFec;
+using cricket::kCodecParamSctpProtocol;
+using cricket::kCodecParamSctpStreams;
+using cricket::kWildcardPayloadType;
+using cricket::MediaContentDescription;
+using cricket::MediaType;
+using cricket::NS_JINGLE_ICE_UDP;
+using cricket::RtpHeaderExtension;
+using cricket::SsrcGroup;
+using cricket::StreamParams;
+using cricket::StreamParamsVec;
+using cricket::TransportDescription;
+using cricket::TransportInfo;
+using cricket::VideoContentDescription;
+using talk_base::SocketAddress;
+
+typedef std::vector<RtpHeaderExtension> RtpHeaderExtensions;
+
+namespace cricket {
+class SessionDescription;
+}
+
+namespace webrtc {
+
+// Line type
+// RFC 4566
+// An SDP session description consists of a number of lines of text of
+// the form:
+// <type>=<value>
+// where <type> MUST be exactly one case-significant character.
+static const int kLinePrefixLength = 2; // Lenght of <type>=
+static const char kLineTypeVersion = 'v';
+static const char kLineTypeOrigin = 'o';
+static const char kLineTypeSessionName = 's';
+static const char kLineTypeSessionInfo = 'i';
+static const char kLineTypeSessionUri = 'u';
+static const char kLineTypeSessionEmail = 'e';
+static const char kLineTypeSessionPhone = 'p';
+static const char kLineTypeSessionBandwidth = 'b';
+static const char kLineTypeTiming = 't';
+static const char kLineTypeRepeatTimes = 'r';
+static const char kLineTypeTimeZone = 'z';
+static const char kLineTypeEncryptionKey = 'k';
+static const char kLineTypeMedia = 'm';
+static const char kLineTypeConnection = 'c';
+static const char kLineTypeAttributes = 'a';
+
+// Attributes
+static const char kAttributeGroup[] = "group";
+static const char kAttributeMid[] = "mid";
+static const char kAttributeRtcpMux[] = "rtcp-mux";
+static const char kAttributeSsrc[] = "ssrc";
+static const char kSsrcAttributeCname[] = "cname";
+static const char kAttributeExtmap[] = "extmap";
+// draft-alvestrand-mmusic-msid-01
+// a=msid-semantic: WMS
+static const char kAttributeMsidSemantics[] = "msid-semantic";
+static const char kMediaStreamSemantic[] = "WMS";
+static const char kSsrcAttributeMsid[] = "msid";
+static const char kDefaultMsid[] = "default";
+static const char kMsidAppdataAudio[] = "a";
+static const char kMsidAppdataVideo[] = "v";
+static const char kMsidAppdataData[] = "d";
+static const char kSsrcAttributeMslabel[] = "mslabel";
+static const char kSSrcAttributeLabel[] = "label";
+static const char kAttributeSsrcGroup[] = "ssrc-group";
+static const char kAttributeCrypto[] = "crypto";
+static const char kAttributeCandidate[] = "candidate";
+static const char kAttributeCandidateTyp[] = "typ";
+static const char kAttributeCandidateRaddr[] = "raddr";
+static const char kAttributeCandidateRport[] = "rport";
+static const char kAttributeCandidateUsername[] = "username";
+static const char kAttributeCandidatePassword[] = "password";
+static const char kAttributeCandidateGeneration[] = "generation";
+static const char kAttributeFingerprint[] = "fingerprint";
+static const char kAttributeFmtp[] = "fmtp";
+static const char kAttributeRtpmap[] = "rtpmap";
+static const char kAttributeRtcp[] = "rtcp";
+static const char kAttributeIceUfrag[] = "ice-ufrag";
+static const char kAttributeIcePwd[] = "ice-pwd";
+static const char kAttributeIceLite[] = "ice-lite";
+static const char kAttributeIceOption[] = "ice-options";
+static const char kAttributeSendOnly[] = "sendonly";
+static const char kAttributeRecvOnly[] = "recvonly";
+static const char kAttributeRtcpFb[] = "rtcp-fb";
+static const char kAttributeSendRecv[] = "sendrecv";
+static const char kAttributeInactive[] = "inactive";
+
+// Experimental flags
+static const char kAttributeXGoogleFlag[] = "x-google-flag";
+static const char kValueConference[] = "conference";
+static const char kAttributeXGoogleBufferLatency[] =
+ "x-google-buffer-latency";
+
+// Candidate
+static const char kCandidateHost[] = "host";
+static const char kCandidateSrflx[] = "srflx";
+// TODO: How to map the prflx with circket candidate type
+// static const char kCandidatePrflx[] = "prflx";
+static const char kCandidateRelay[] = "relay";
+
+static const char kSdpDelimiterEqual = '=';
+static const char kSdpDelimiterSpace = ' ';
+static const char kSdpDelimiterColon = ':';
+static const char kSdpDelimiterSemicolon = ';';
+static const char kSdpDelimiterSlash = '/';
+static const char kNewLine = '\n';
+static const char kReturn = '\r';
+static const char kLineBreak[] = "\r\n";
+
+// TODO: Generate the Session and Time description
+// instead of hardcoding.
+static const char kSessionVersion[] = "v=0";
+// RFC 4566
+static const char kSessionOriginUsername[] = "-";
+static const char kSessionOriginSessionId[] = "0";
+static const char kSessionOriginSessionVersion[] = "0";
+static const char kSessionOriginNettype[] = "IN";
+static const char kSessionOriginAddrtype[] = "IP4";
+static const char kSessionOriginAddress[] = "127.0.0.1";
+static const char kSessionName[] = "s=-";
+static const char kTimeDescription[] = "t=0 0";
+static const char kAttrGroup[] = "a=group:BUNDLE";
+static const char kConnectionNettype[] = "IN";
+static const char kConnectionAddrtype[] = "IP4";
+static const char kMediaTypeVideo[] = "video";
+static const char kMediaTypeAudio[] = "audio";
+static const char kMediaTypeData[] = "application";
+static const char kMediaPortRejected[] = "0";
+static const char kDefaultAddress[] = "0.0.0.0";
+static const char kDefaultPort[] = "1";
+// RFC 3556
+static const char kApplicationSpecificMaximum[] = "AS";
+
+static const int kDefaultVideoClockrate = 90000;
+
+// ISAC special-case.
+static const char kIsacCodecName[] = "ISAC"; // From webrtcvoiceengine.cc
+static const int kIsacWbDefaultRate = 32000; // From acm_common_defs.h
+static const int kIsacSwbDefaultRate = 56000; // From acm_common_defs.h
+
+static const int kDefaultSctpFmt = 5000;
+static const char kDefaultSctpFmtProtocol[] = "webrtc-datachannel";
+
+struct SsrcInfo {
+ SsrcInfo()
+ : msid_identifier(kDefaultMsid),
+ // TODO(ronghuawu): What should we do if the appdata doesn't appear?
+ // Create random string (which will be used as track label later)?
+ msid_appdata(talk_base::CreateRandomString(8)) {
+ }
+ uint32 ssrc_id;
+ std::string cname;
+ std::string msid_identifier;
+ std::string msid_appdata;
+
+ // For backward compatibility.
+ // TODO(ronghuawu): Remove below 2 fields once all the clients support msid.
+ std::string label;
+ std::string mslabel;
+};
+typedef std::vector<SsrcInfo> SsrcInfoVec;
+typedef std::vector<SsrcGroup> SsrcGroupVec;
+
+// Serializes the passed in SessionDescription to a SDP string.
+// desc - The SessionDescription object to be serialized.
+static std::string SdpSerializeSessionDescription(
+ const JsepSessionDescription& jdesc);
+template <class T>
+static void AddFmtpLine(const T& codec, std::string* message);
+static void BuildMediaDescription(const ContentInfo* content_info,
+ const TransportInfo* transport_info,
+ const MediaType media_type,
+ std::string* message);
+static void BuildSctpContentAttributes(std::string* message);
+static void BuildRtpContentAttributes(
+ const MediaContentDescription* media_desc,
+ const MediaType media_type,
+ std::string* message);
+static void BuildRtpMap(const MediaContentDescription* media_desc,
+ const MediaType media_type,
+ std::string* message);
+static void BuildCandidate(const std::vector<Candidate>& candidates,
+ std::string* message);
+static void BuildIceOptions(const std::vector<std::string>& transport_options,
+ std::string* message);
+
+static bool ParseSessionDescription(const std::string& message, size_t* pos,
+ std::string* session_id,
+ std::string* session_version,
+ bool* supports_msid,
+ TransportDescription* session_td,
+ RtpHeaderExtensions* session_extmaps,
+ cricket::SessionDescription* desc,
+ SdpParseError* error);
+static bool ParseGroupAttribute(const std::string& line,
+ cricket::SessionDescription* desc,
+ SdpParseError* error);
+static bool ParseMediaDescription(
+ const std::string& message,
+ const TransportDescription& session_td,
+ const RtpHeaderExtensions& session_extmaps,
+ bool supports_msid,
+ size_t* pos, cricket::SessionDescription* desc,
+ std::vector<JsepIceCandidate*>* candidates,
+ SdpParseError* error);
+static bool ParseContent(const std::string& message,
+ const MediaType media_type,
+ int mline_index,
+ const std::string& protocol,
+ const std::vector<int>& codec_preference,
+ size_t* pos,
+ std::string* content_name,
+ MediaContentDescription* media_desc,
+ TransportDescription* transport,
+ std::vector<JsepIceCandidate*>* candidates,
+ SdpParseError* error);
+static bool ParseSsrcAttribute(const std::string& line,
+ SsrcInfoVec* ssrc_infos,
+ SdpParseError* error);
+static bool ParseSsrcGroupAttribute(const std::string& line,
+ SsrcGroupVec* ssrc_groups,
+ SdpParseError* error);
+static bool ParseCryptoAttribute(const std::string& line,
+ MediaContentDescription* media_desc,
+ SdpParseError* error);
+static bool ParseRtpmapAttribute(const std::string& line,
+ const MediaType media_type,
+ const std::vector<int>& codec_preference,
+ MediaContentDescription* media_desc,
+ SdpParseError* error);
+static bool ParseFmtpAttributes(const std::string& line,
+ const MediaType media_type,
+ MediaContentDescription* media_desc,
+ SdpParseError* error);
+static bool ParseFmtpParam(const std::string& line, std::string* parameter,
+ std::string* value, SdpParseError* error);
+static bool ParseCandidate(const std::string& message, Candidate* candidate,
+ SdpParseError* error, bool is_raw);
+static bool ParseRtcpFbAttribute(const std::string& line,
+ const MediaType media_type,
+ MediaContentDescription* media_desc,
+ SdpParseError* error);
+static bool ParseIceOptions(const std::string& line,
+ std::vector<std::string>* transport_options,
+ SdpParseError* error);
+static bool ParseExtmap(const std::string& line,
+ RtpHeaderExtension* extmap,
+ SdpParseError* error);
+static bool ParseFingerprintAttribute(const std::string& line,
+ talk_base::SSLFingerprint** fingerprint,
+ SdpParseError* error);
+
+// Helper functions
+
+// Below ParseFailed*** functions output the line that caused the parsing
+// failure and the detailed reason (|description|) of the failure to |error|.
+// The functions always return false so that they can be used directly in the
+// following way when error happens:
+// "return ParseFailed***(...);"
+
+// The line starting at |line_start| of |message| is the failing line.
+// The reason for the failure should be provided in the |description|.
+// An example of a description could be "unknown character".
+static bool ParseFailed(const std::string& message,
+ size_t line_start,
+ const std::string& description,
+ SdpParseError* error) {
+ // Get the first line of |message| from |line_start|.
+ std::string first_line = message;
+ size_t line_end = message.find(kNewLine, line_start);
+ if (line_end != std::string::npos) {
+ if (line_end > 0 && (message.at(line_end - 1) == kReturn)) {
+ --line_end;
+ }
+ first_line = message.substr(line_start, (line_end - line_start));
+ }
+
+ if (error) {
+ error->line = first_line;
+ error->description = description;
+ }
+ LOG(LS_ERROR) << "Failed to parse: \"" << first_line
+ << "\". Reason: " << description;
+ return false;
+}
+
+// |line| is the failing line. The reason for the failure should be
+// provided in the |description|.
+static bool ParseFailed(const std::string& line,
+ const std::string& description,
+ SdpParseError* error) {
+ return ParseFailed(line, 0, description, error);
+}
+
+// Parses failure where the failing SDP line isn't know or there are multiple
+// failing lines.
+static bool ParseFailed(const std::string& description,
+ SdpParseError* error) {
+ return ParseFailed("", description, error);
+}
+
+// |line| is the failing line. The failure is due to the fact that |line|
+// doesn't have |expected_fields| fields.
+static bool ParseFailedExpectFieldNum(const std::string& line,
+ int expected_fields,
+ SdpParseError* error) {
+ std::ostringstream description;
+ description << "Expects " << expected_fields << " fields.";
+ return ParseFailed(line, description.str(), error);
+}
+
+// |line| is the failing line. The failure is due to the fact that |line| has
+// less than |expected_min_fields| fields.
+static bool ParseFailedExpectMinFieldNum(const std::string& line,
+ int expected_min_fields,
+ SdpParseError* error) {
+ std::ostringstream description;
+ description << "Expects at least " << expected_min_fields << " fields.";
+ return ParseFailed(line, description.str(), error);
+}
+
+// |line| is the failing line. The failure is due to the fact that it failed to
+// get the value of |attribute|.
+static bool ParseFailedGetValue(const std::string& line,
+ const std::string& attribute,
+ SdpParseError* error) {
+ std::ostringstream description;
+ description << "Failed to get the value of attribute: " << attribute;
+ return ParseFailed(line, description.str(), error);
+}
+
+// The line starting at |line_start| of |message| is the failing line. The
+// failure is due to the line type (e.g. the "m" part of the "m-line")
+// not matching what is expected. The expected line type should be
+// provided as |line_type|.
+static bool ParseFailedExpectLine(const std::string& message,
+ size_t line_start,
+ const char line_type,
+ const std::string& line_value,
+ SdpParseError* error) {
+ std::ostringstream description;
+ description << "Expect line: " << line_type << "=" << line_value;
+ return ParseFailed(message, line_start, description.str(), error);
+}
+
+static bool AddLine(const std::string& line, std::string* message) {
+ if (!message)
+ return false;
+
+ message->append(line);
+ message->append(kLineBreak);
+ return true;
+}
+
+static bool GetLine(const std::string& message,
+ size_t* pos,
+ std::string* line) {
+ size_t line_begin = *pos;
+ size_t line_end = message.find(kNewLine, line_begin);
+ if (line_end == std::string::npos) {
+ return false;
+ }
+ // Update the new start position
+ *pos = line_end + 1;
+ if (line_end > 0 && (message.at(line_end - 1) == kReturn)) {
+ --line_end;
+ }
+ *line = message.substr(line_begin, (line_end - line_begin));
+ const char* cline = line->c_str();
+ // RFC 4566
+ // An SDP session description consists of a number of lines of text of
+ // the form:
+ // <type>=<value>
+ // where <type> MUST be exactly one case-significant character and
+ // <value> is structured text whose format depends on <type>.
+ // Whitespace MUST NOT be used on either side of the "=" sign.
+ if (cline[0] == kSdpDelimiterSpace ||
+ cline[1] != kSdpDelimiterEqual ||
+ cline[2] == kSdpDelimiterSpace) {
+ *pos = line_begin;
+ return false;
+ }
+ return true;
+}
+
+// Init |os| to "|type|=|value|".
+static void InitLine(const char type,
+ const std::string& value,
+ std::ostringstream* os) {
+ os->str("");
+ *os << type << kSdpDelimiterEqual << value;
+}
+
+// Init |os| to "a=|attribute|".
+static void InitAttrLine(const std::string& attribute, std::ostringstream* os) {
+ InitLine(kLineTypeAttributes, attribute, os);
+}
+
+// Writes a SDP attribute line based on |attribute| and |value| to |message|.
+static void AddAttributeLine(const std::string& attribute, int value,
+ std::string* message) {
+ std::ostringstream os;
+ InitAttrLine(attribute, &os);
+ os << kSdpDelimiterColon << value;
+ AddLine(os.str(), message);
+}
+
+// Returns the first line of the message without the line breaker.
+static bool GetFirstLine(const std::string& message, std::string* line) {
+ size_t pos = 0;
+ if (!GetLine(message, &pos, line)) {
+ // If GetLine failed, just return the full |message|.
+ *line = message;
+ }
+ return true;
+}
+
+static bool IsLineType(const std::string& message,
+ const char type,
+ size_t line_start) {
+ if (message.size() < line_start + kLinePrefixLength) {
+ return false;
+ }
+ const char* cmessage = message.c_str();
+ return (cmessage[line_start] == type &&
+ cmessage[line_start + 1] == kSdpDelimiterEqual);
+}
+
+static bool IsLineType(const std::string& line,
+ const char type) {
+ return IsLineType(line, type, 0);
+}
+
+static bool GetLineWithType(const std::string& message, size_t* pos,
+ std::string* line, const char type) {
+ if (!IsLineType(message, type, *pos)) {
+ return false;
+ }
+
+ if (!GetLine(message, pos, line))
+ return false;
+
+ return true;
+}
+
+static bool HasAttribute(const std::string& line,
+ const std::string& attribute) {
+ return (line.compare(kLinePrefixLength, attribute.size(), attribute) == 0);
+}
+
+// Verifies the candiate to be of the format candidate:<blah>
+static bool IsRawCandidate(const std::string& line) {
+ // Checking candiadte-attribute is starting with "candidate" str.
+ if (line.compare(0, strlen(kAttributeCandidate), kAttributeCandidate) != 0) {
+ return false;
+ }
+ const size_t first_candidate = line.find(kSdpDelimiterColon);
+ if (first_candidate == std::string::npos)
+ return false;
+ // In this format we only expecting one candiate. If any additional
+ // candidates present, whole string will be discared.
+ const size_t any_other = line.find(kSdpDelimiterColon, first_candidate + 1);
+ return (any_other == std::string::npos);
+}
+
+static bool AddSsrcLine(uint32 ssrc_id, const std::string& attribute,
+ const std::string& value, std::string* message) {
+ // RFC 5576
+ // a=ssrc:<ssrc-id> <attribute>:<value>
+ std::ostringstream os;
+ InitAttrLine(kAttributeSsrc, &os);
+ os << kSdpDelimiterColon << ssrc_id << kSdpDelimiterSpace
+ << attribute << kSdpDelimiterColon << value;
+ return AddLine(os.str(), message);
+}
+
+// Split the message into two parts by the first delimiter.
+static bool SplitByDelimiter(const std::string& message,
+ const char delimiter,
+ std::string* field1,
+ std::string* field2) {
+ // Find the first delimiter
+ size_t pos = message.find(delimiter);
+ if (pos == std::string::npos) {
+ return false;
+ }
+ *field1 = message.substr(0, pos);
+ // The rest is the value.
+ *field2 = message.substr(pos + 1);
+ return true;
+}
+
+// Get value only from <attribute>:<value>.
+static bool GetValue(const std::string& message, const std::string& attribute,
+ std::string* value, SdpParseError* error) {
+ std::string leftpart;
+ if (!SplitByDelimiter(message, kSdpDelimiterColon, &leftpart, value)) {
+ return ParseFailedGetValue(message, attribute, error);
+ }
+ // The left part should end with the expected attribute.
+ if (leftpart.length() < attribute.length() ||
+ leftpart.compare(leftpart.length() - attribute.length(),
+ attribute.length(), attribute) != 0) {
+ return ParseFailedGetValue(message, attribute, error);
+ }
+ return true;
+}
+
+static bool CaseInsensitiveFind(std::string str1, std::string str2) {
+ std::transform(str1.begin(), str1.end(), str1.begin(),
+ ::tolower);
+ std::transform(str2.begin(), str2.end(), str2.begin(),
+ ::tolower);
+ return str1.find(str2) != std::string::npos;
+}
+
+void CreateTracksFromSsrcInfos(const SsrcInfoVec& ssrc_infos,
+ StreamParamsVec* tracks) {
+ ASSERT(tracks != NULL);
+ for (SsrcInfoVec::const_iterator ssrc_info = ssrc_infos.begin();
+ ssrc_info != ssrc_infos.end(); ++ssrc_info) {
+ if (ssrc_info->cname.empty()) {
+ continue;
+ }
+
+ std::string sync_label;
+ std::string track_id;
+ if (ssrc_info->msid_identifier == kDefaultMsid &&
+ !ssrc_info->mslabel.empty()) {
+ // If there's no msid and there's mslabel, we consider this is a sdp from
+ // a older version of client that doesn't support msid.
+ // In that case, we use the mslabel and label to construct the track.
+ sync_label = ssrc_info->mslabel;
+ track_id = ssrc_info->label;
+ } else {
+ sync_label = ssrc_info->msid_identifier;
+ // The appdata consists of the "id" attribute of a MediaStreamTrack, which
+ // is corresponding to the "id" attribute of StreamParams.
+ track_id = ssrc_info->msid_appdata;
+ }
+ if (sync_label.empty() || track_id.empty()) {
+ ASSERT(false);
+ continue;
+ }
+
+ StreamParamsVec::iterator track = tracks->begin();
+ for (; track != tracks->end(); ++track) {
+ if (track->id == track_id) {
+ break;
+ }
+ }
+ if (track == tracks->end()) {
+ // If we don't find an existing track, create a new one.
+ tracks->push_back(StreamParams());
+ track = tracks->end() - 1;
+ }
+ track->add_ssrc(ssrc_info->ssrc_id);
+ track->cname = ssrc_info->cname;
+ track->sync_label = sync_label;
+ track->id = track_id;
+ }
+}
+
+void GetMediaStreamLabels(const ContentInfo* content,
+ std::set<std::string>* labels) {
+ const MediaContentDescription* media_desc =
+ static_cast<const MediaContentDescription*> (
+ content->description);
+ const cricket::StreamParamsVec& streams = media_desc->streams();
+ for (cricket::StreamParamsVec::const_iterator it = streams.begin();
+ it != streams.end(); ++it) {
+ labels->insert(it->sync_label);
+ }
+}
+
+// RFC 5245
+// It is RECOMMENDED that default candidates be chosen based on the
+// likelihood of those candidates to work with the peer that is being
+// contacted. It is RECOMMENDED that relayed > reflexive > host.
+static const int kPreferenceUnknown = 0;
+static const int kPreferenceHost = 1;
+static const int kPreferenceReflexive = 2;
+static const int kPreferenceRelayed = 3;
+
+static int GetCandidatePreferenceFromType(const std::string& type) {
+ int preference = kPreferenceUnknown;
+ if (type == cricket::LOCAL_PORT_TYPE) {
+ preference = kPreferenceHost;
+ } else if (type == cricket::STUN_PORT_TYPE) {
+ preference = kPreferenceReflexive;
+ } else if (type == cricket::RELAY_PORT_TYPE) {
+ preference = kPreferenceRelayed;
+ } else {
+ ASSERT(false);
+ }
+ return preference;
+}
+
+// Get ip and port of the default destination from the |candidates| with
+// the given value of |component_id|.
+// RFC 5245
+// The value of |component_id| currently supported are 1 (RTP) and 2 (RTCP).
+// TODO: Decide the default destination in webrtcsession and
+// pass it down via SessionDescription.
+static bool GetDefaultDestination(const std::vector<Candidate>& candidates,
+ int component_id, std::string* port, std::string* ip) {
+ *port = kDefaultPort;
+ *ip = kDefaultAddress;
+ int current_preference = kPreferenceUnknown;
+ for (std::vector<Candidate>::const_iterator it = candidates.begin();
+ it != candidates.end(); ++it) {
+ if (it->component() != component_id) {
+ continue;
+ }
+ const int preference = GetCandidatePreferenceFromType(it->type());
+ // See if this candidate is more preferable then the current one.
+ if (preference <= current_preference) {
+ continue;
+ }
+ current_preference = preference;
+ *port = it->address().PortAsString();
+ *ip = it->address().ipaddr().ToString();
+ }
+ return true;
+}
+
+// Update the media default destination.
+static void UpdateMediaDefaultDestination(
+ const std::vector<Candidate>& candidates, std::string* mline) {
+ // RFC 4566
+ // m=<media> <port> <proto> <fmt> ...
+ std::vector<std::string> fields;
+ talk_base::split(*mline, kSdpDelimiterSpace, &fields);
+ if (fields.size() < 3) {
+ return;
+ }
+
+ bool is_rtp =
+ fields[2].empty() ||
+ talk_base::starts_with(fields[2].data(),
+ cricket::kMediaProtocolRtpPrefix);
+
+ std::ostringstream os;
+ std::string rtp_port, rtp_ip;
+ if (GetDefaultDestination(candidates, ICE_CANDIDATE_COMPONENT_RTP,
+ &rtp_port, &rtp_ip)) {
+ // Found default RTP candidate.
+ // RFC 5245
+ // The default candidates are added to the SDP as the default
+ // destination for media. For streams based on RTP, this is done by
+ // placing the IP address and port of the RTP candidate into the c and m
+ // lines, respectively.
+
+ // Update the port in the m line.
+ // If this is a m-line with port equal to 0, we don't change it.
+ if (fields[1] != kMediaPortRejected) {
+ mline->replace(fields[0].size() + 1,
+ fields[1].size(),
+ rtp_port);
+ }
+ // Add the c line.
+ // RFC 4566
+ // c=<nettype> <addrtype> <connection-address>
+ InitLine(kLineTypeConnection, kConnectionNettype, &os);
+ os << " " << kConnectionAddrtype << " " << rtp_ip;
+ AddLine(os.str(), mline);
+ }
+
+ if (is_rtp) {
+ std::string rtcp_port, rtcp_ip;
+ if (GetDefaultDestination(candidates, ICE_CANDIDATE_COMPONENT_RTCP,
+ &rtcp_port, &rtcp_ip)) {
+ // Found default RTCP candidate.
+ // RFC 5245
+ // If the agent is utilizing RTCP, it MUST encode the RTCP candidate
+ // using the a=rtcp attribute as defined in RFC 3605.
+
+ // RFC 3605
+ // rtcp-attribute = "a=rtcp:" port [nettype space addrtype space
+ // connection-address] CRLF
+ InitAttrLine(kAttributeRtcp, &os);
+ os << kSdpDelimiterColon
+ << rtcp_port << " "
+ << kConnectionNettype << " "
+ << kConnectionAddrtype << " "
+ << rtcp_ip;
+ AddLine(os.str(), mline);
+ }
+ }
+}
+
+// Get candidates according to the mline index from SessionDescriptionInterface.
+static void GetCandidatesByMindex(const SessionDescriptionInterface& desci,
+ int mline_index,
+ std::vector<Candidate>* candidates) {
+ if (!candidates) {
+ return;
+ }
+ const IceCandidateCollection* cc = desci.candidates(mline_index);
+ for (size_t i = 0; i < cc->count(); ++i) {
+ const IceCandidateInterface* candidate = cc->at(i);
+ candidates->push_back(candidate->candidate());
+ }
+}
+
+std::string SdpSerialize(const JsepSessionDescription& jdesc) {
+ std::string sdp = SdpSerializeSessionDescription(jdesc);
+
+ std::string sdp_with_candidates;
+ size_t pos = 0;
+ std::string line;
+ int mline_index = -1;
+ while (GetLine(sdp, &pos, &line)) {
+ if (IsLineType(line, kLineTypeMedia)) {
+ ++mline_index;
+ std::vector<Candidate> candidates;
+ GetCandidatesByMindex(jdesc, mline_index, &candidates);
+ // Media line may append other lines inside the
+ // UpdateMediaDefaultDestination call, so add the kLineBreak here first.
+ line.append(kLineBreak);
+ UpdateMediaDefaultDestination(candidates, &line);
+ sdp_with_candidates.append(line);
+ // Build the a=candidate lines.
+ BuildCandidate(candidates, &sdp_with_candidates);
+ } else {
+ // Copy old line to new sdp without change.
+ AddLine(line, &sdp_with_candidates);
+ }
+ }
+ sdp = sdp_with_candidates;
+
+ return sdp;
+}
+
+std::string SdpSerializeSessionDescription(
+ const JsepSessionDescription& jdesc) {
+ const cricket::SessionDescription* desc = jdesc.description();
+ if (!desc) {
+ return "";
+ }
+
+ std::string message;
+
+ // Session Description.
+ AddLine(kSessionVersion, &message);
+ // Session Origin
+ // RFC 4566
+ // o=<username> <sess-id> <sess-version> <nettype> <addrtype>
+ // <unicast-address>
+ std::ostringstream os;
+ InitLine(kLineTypeOrigin, kSessionOriginUsername, &os);
+ const std::string session_id = jdesc.session_id().empty() ?
+ kSessionOriginSessionId : jdesc.session_id();
+ const std::string session_version = jdesc.session_version().empty() ?
+ kSessionOriginSessionVersion : jdesc.session_version();
+ os << " " << session_id << " " << session_version << " "
+ << kSessionOriginNettype << " " << kSessionOriginAddrtype << " "
+ << kSessionOriginAddress;
+ AddLine(os.str(), &message);
+ AddLine(kSessionName, &message);
+
+ // Time Description.
+ AddLine(kTimeDescription, &message);
+
+ // Group
+ if (desc->HasGroup(cricket::GROUP_TYPE_BUNDLE)) {
+ std::string group_line = kAttrGroup;
+ const cricket::ContentGroup* group =
+ desc->GetGroupByName(cricket::GROUP_TYPE_BUNDLE);
+ ASSERT(group != NULL);
+ const cricket::ContentNames& content_names = group->content_names();
+ for (cricket::ContentNames::const_iterator it = content_names.begin();
+ it != content_names.end(); ++it) {
+ group_line.append(" ");
+ group_line.append(*it);
+ }
+ AddLine(group_line, &message);
+ }
+
+ // MediaStream semantics
+ InitAttrLine(kAttributeMsidSemantics, &os);
+ os << kSdpDelimiterColon << " " << kMediaStreamSemantic;
+ std::set<std::string> media_stream_labels;
+ const ContentInfo* audio_content = GetFirstAudioContent(desc);
+ if (audio_content)
+ GetMediaStreamLabels(audio_content, &media_stream_labels);
+ const ContentInfo* video_content = GetFirstVideoContent(desc);
+ if (video_content)
+ GetMediaStreamLabels(video_content, &media_stream_labels);
+ for (std::set<std::string>::const_iterator it =
+ media_stream_labels.begin(); it != media_stream_labels.end(); ++it) {
+ os << " " << *it;
+ }
+ AddLine(os.str(), &message);
+
+ if (audio_content) {
+ BuildMediaDescription(audio_content,
+ desc->GetTransportInfoByName(audio_content->name),
+ cricket::MEDIA_TYPE_AUDIO, &message);
+ }
+
+
+ if (video_content) {
+ BuildMediaDescription(video_content,
+ desc->GetTransportInfoByName(video_content->name),
+ cricket::MEDIA_TYPE_VIDEO, &message);
+ }
+
+ const ContentInfo* data_content = GetFirstDataContent(desc);
+ if (data_content) {
+ BuildMediaDescription(data_content,
+ desc->GetTransportInfoByName(data_content->name),
+ cricket::MEDIA_TYPE_DATA, &message);
+ }
+
+
+ return message;
+}
+
+// Serializes the passed in IceCandidateInterface to a SDP string.
+// candidate - The candidate to be serialized.
+std::string SdpSerializeCandidate(
+ const IceCandidateInterface& candidate) {
+ std::string message;
+ std::vector<cricket::Candidate> candidates;
+ candidates.push_back(candidate.candidate());
+ BuildCandidate(candidates, &message);
+ return message;
+}
+
+bool SdpDeserialize(const std::string& message,
+ JsepSessionDescription* jdesc,
+ SdpParseError* error) {
+ std::string session_id;
+ std::string session_version;
+ TransportDescription session_td(NS_JINGLE_ICE_UDP, Candidates());
+ RtpHeaderExtensions session_extmaps;
+ cricket::SessionDescription* desc = new cricket::SessionDescription();
+ std::vector<JsepIceCandidate*> candidates;
+ size_t current_pos = 0;
+ bool supports_msid = false;
+
+ // Session Description
+ if (!ParseSessionDescription(message, ¤t_pos, &session_id,
+ &session_version, &supports_msid, &session_td,
+ &session_extmaps, desc, error)) {
+ delete desc;
+ return false;
+ }
+
+ // Media Description
+ if (!ParseMediaDescription(message, session_td, session_extmaps,
+ supports_msid, ¤t_pos, desc, &candidates,
+ error)) {
+ delete desc;
+ for (std::vector<JsepIceCandidate*>::const_iterator
+ it = candidates.begin(); it != candidates.end(); ++it) {
+ delete *it;
+ }
+ return false;
+ }
+
+ jdesc->Initialize(desc, session_id, session_version);
+
+ for (std::vector<JsepIceCandidate*>::const_iterator
+ it = candidates.begin(); it != candidates.end(); ++it) {
+ jdesc->AddCandidate(*it);
+ delete *it;
+ }
+ return true;
+}
+
+bool SdpDeserializeCandidate(const std::string& message,
+ JsepIceCandidate* jcandidate,
+ SdpParseError* error) {
+ ASSERT(jcandidate != NULL);
+ Candidate candidate;
+ if (!ParseCandidate(message, &candidate, error, true)) {
+ return false;
+ }
+ jcandidate->SetCandidate(candidate);
+ return true;
+}
+
+bool ParseCandidate(const std::string& message, Candidate* candidate,
+ SdpParseError* error, bool is_raw) {
+ ASSERT(candidate != NULL);
+
+ // Get the first line from |message|.
+ std::string first_line;
+ GetFirstLine(message, &first_line);
+
+ size_t start_pos = kLinePrefixLength; // Starting position to parse.
+ if (IsRawCandidate(first_line)) {
+ // From WebRTC draft section 4.8.1.1 candidate-attribute will be
+ // just candidate:<candidate> not a=candidate:<blah>CRLF
+ start_pos = 0;
+ } else if (!IsLineType(first_line, kLineTypeAttributes) ||
+ !HasAttribute(first_line, kAttributeCandidate)) {
+ // Must start with a=candidate line.
+ // Expecting to be of the format a=candidate:<blah>CRLF.
+ if (is_raw) {
+ std::ostringstream description;
+ description << "Expect line: "
+ << kAttributeCandidate
+ << ":" << "<candidate-str>";
+ return ParseFailed(first_line, 0, description.str(), error);
+ } else {
+ return ParseFailedExpectLine(first_line, 0, kLineTypeAttributes,
+ kAttributeCandidate, error);
+ }
+ }
+
+ std::vector<std::string> fields;
+ talk_base::split(first_line.substr(start_pos),
+ kSdpDelimiterSpace, &fields);
+ // RFC 5245
+ // a=candidate:<foundation> <component-id> <transport> <priority>
+ // <connection-address> <port> typ <candidate-types>
+ // [raddr <connection-address>] [rport <port>]
+ // *(SP extension-att-name SP extension-att-value)
+ const size_t expected_min_fields = 8;
+ if (fields.size() < expected_min_fields ||
+ (fields[6] != kAttributeCandidateTyp)) {
+ return ParseFailedExpectMinFieldNum(first_line, expected_min_fields, error);
+ }
+ std::string foundation;
+ if (!GetValue(fields[0], kAttributeCandidate, &foundation, error)) {
+ return false;
+ }
+ const int component_id = talk_base::FromString<int>(fields[1]);
+ const std::string transport = fields[2];
+ const uint32 priority = talk_base::FromString<uint32>(fields[3]);
+ const std::string connection_address = fields[4];
+ const int port = talk_base::FromString<int>(fields[5]);
+ SocketAddress address(connection_address, port);
+
+ cricket::ProtocolType protocol;
+ if (!StringToProto(transport.c_str(), &protocol)) {
+ return ParseFailed(first_line, "Unsupported transport type.", error);
+ }
+
+ std::string candidate_type;
+ const std::string type = fields[7];
+ if (type == kCandidateHost) {
+ candidate_type = cricket::LOCAL_PORT_TYPE;
+ } else if (type == kCandidateSrflx) {
+ candidate_type = cricket::STUN_PORT_TYPE;
+ } else if (type == kCandidateRelay) {
+ candidate_type = cricket::RELAY_PORT_TYPE;
+ } else {
+ return ParseFailed(first_line, "Unsupported candidate type.", error);
+ }
+
+ size_t current_position = expected_min_fields;
+ SocketAddress related_address;
+ // The 2 optional fields for related address
+ // [raddr <connection-address>] [rport <port>]
+ if (fields.size() >= (current_position + 2) &&
+ fields[current_position] == kAttributeCandidateRaddr) {
+ related_address.SetIP(fields[++current_position]);
+ ++current_position;
+ }
+ if (fields.size() >= (current_position + 2) &&
+ fields[current_position] == kAttributeCandidateRport) {
+ related_address.SetPort(
+ talk_base::FromString<int>(fields[++current_position]));
+ ++current_position;
+ }
+
+ // Extension
+ // Empty string as the candidate username and password.
+ // Will be updated later with the ice-ufrag and ice-pwd.
+ // TODO: Remove the username/password extension, which is currently
+ // kept for backwards compatibility.
+ std::string username;
+ std::string password;
+ uint32 generation = 0;
+ for (size_t i = current_position; i + 1 < fields.size(); ++i) {
+ // RFC 5245
+ // *(SP extension-att-name SP extension-att-value)
+ if (fields[i] == kAttributeCandidateGeneration) {
+ generation = talk_base::FromString<uint32>(fields[++i]);
+ } else if (fields[i] == kAttributeCandidateUsername) {
+ username = fields[++i];
+ } else if (fields[i] == kAttributeCandidatePassword) {
+ password = fields[++i];
+ } else {
+ // Skip the unknown extension.
+ ++i;
+ }
+ }
+
+ // Empty string as the candidate id and network name.
+ const std::string id;
+ const std::string network_name;
+ *candidate = Candidate(id, component_id, cricket::ProtoToString(protocol),
+ address, priority, username, password, candidate_type, network_name,
+ generation, foundation);
+ candidate->set_related_address(related_address);
+ return true;
+}
+
+bool ParseIceOptions(const std::string& line,
+ std::vector<std::string>* transport_options,
+ SdpParseError* error) {
+ std::string ice_options;
+ if (!GetValue(line, kAttributeIceOption, &ice_options, error)) {
+ return false;
+ }
+ std::vector<std::string> fields;
+ talk_base::split(ice_options, kSdpDelimiterSpace, &fields);
+ for (size_t i = 0; i < fields.size(); ++i) {
+ transport_options->push_back(fields[i]);
+ }
+ return true;
+}
+
+bool ParseExtmap(const std::string& line, RtpHeaderExtension* extmap,
+ SdpParseError* error) {
+ // RFC 5285
+ // a=extmap:<value>["/"<direction>] <URI> <extensionattributes>
+ std::vector<std::string> fields;
+ talk_base::split(line.substr(kLinePrefixLength),
+ kSdpDelimiterSpace, &fields);
+ const size_t expected_min_fields = 2;
+ if (fields.size() < expected_min_fields) {
+ return ParseFailedExpectMinFieldNum(line, expected_min_fields, error);
+ }
+ std::string uri = fields[1];
+
+ std::string value_direction;
+ if (!GetValue(fields[0], kAttributeExtmap, &value_direction, error)) {
+ return false;
+ }
+ std::vector<std::string> sub_fields;
+ talk_base::split(value_direction, kSdpDelimiterSlash, &sub_fields);
+ int value = talk_base::FromString<int>(sub_fields[0]);
+
+ *extmap = RtpHeaderExtension(uri, value);
+ return true;
+}
+
+void BuildMediaDescription(const ContentInfo* content_info,
+ const TransportInfo* transport_info,
+ const MediaType media_type,
+ std::string* message) {
+ ASSERT(message != NULL);
+ if (content_info == NULL || message == NULL) {
+ return;
+ }
+ // TODO: Rethink if we should use sprintfn instead of stringstream.
+ // According to the style guide, streams should only be used for logging.
+ // http://google-styleguide.googlecode.com/svn/
+ // trunk/cppguide.xml?showone=Streams#Streams
+ std::ostringstream os;
+ const MediaContentDescription* media_desc =
+ static_cast<const MediaContentDescription*> (
+ content_info->description);
+ ASSERT(media_desc != NULL);
+
+ bool is_sctp = (media_desc->protocol() == cricket::kMediaProtocolDtlsSctp);
+
+ // RFC 4566
+ // m=<media> <port> <proto> <fmt>
+ // fmt is a list of payload type numbers that MAY be used in the session.
+ const char* type = NULL;
+ if (media_type == cricket::MEDIA_TYPE_AUDIO)
+ type = kMediaTypeAudio;
+ else if (media_type == cricket::MEDIA_TYPE_VIDEO)
+ type = kMediaTypeVideo;
+ else if (media_type == cricket::MEDIA_TYPE_DATA)
+ type = kMediaTypeData;
+ else
+ ASSERT(false);
+
+ std::string fmt;
+ if (media_type == cricket::MEDIA_TYPE_VIDEO) {
+ const VideoContentDescription* video_desc =
+ static_cast<const VideoContentDescription*>(media_desc);
+ for (std::vector<cricket::VideoCodec>::const_iterator it =
+ video_desc->codecs().begin();
+ it != video_desc->codecs().end(); ++it) {
+ fmt.append(" ");
+ fmt.append(talk_base::ToString<int>(it->id));
+ }
+ } else if (media_type == cricket::MEDIA_TYPE_AUDIO) {
+ const AudioContentDescription* audio_desc =
+ static_cast<const AudioContentDescription*>(media_desc);
+ for (std::vector<cricket::AudioCodec>::const_iterator it =
+ audio_desc->codecs().begin();
+ it != audio_desc->codecs().end(); ++it) {
+ fmt.append(" ");
+ fmt.append(talk_base::ToString<int>(it->id));
+ }
+ } else if (media_type == cricket::MEDIA_TYPE_DATA) {
+ if (is_sctp) {
+ fmt.append(" ");
+ // TODO(jiayl): Replace the hard-coded string with the fmt read out of the
+ // ContentDescription.
+ fmt.append(talk_base::ToString<int>(kDefaultSctpFmt));
+ } else {
+ const DataContentDescription* data_desc =
+ static_cast<const DataContentDescription*>(media_desc);
+ for (std::vector<cricket::DataCodec>::const_iterator it =
+ data_desc->codecs().begin();
+ it != data_desc->codecs().end(); ++it) {
+ fmt.append(" ");
+ fmt.append(talk_base::ToString<int>(it->id));
+ }
+ }
+ }
+ // The fmt must never be empty. If no codecs are found, set the fmt attribute
+ // to 0.
+ if (fmt.empty()) {
+ fmt = " 0";
+ }
+
+ // The port number in the m line will be updated later when associate with
+ // the candidates.
+ // RFC 3264
+ // To reject an offered stream, the port number in the corresponding stream in
+ // the answer MUST be set to zero.
+ const std::string port = content_info->rejected ?
+ kMediaPortRejected : kDefaultPort;
+
+ talk_base::SSLFingerprint* fp = (transport_info) ?
+ transport_info->description.identity_fingerprint.get() : NULL;
+
+ InitLine(kLineTypeMedia, type, &os);
+ os << " " << port << " " << media_desc->protocol() << fmt;
+ AddLine(os.str(), message);
+
+ // Use the transport_info to build the media level ice-ufrag and ice-pwd.
+ if (transport_info) {
+ // RFC 5245
+ // ice-pwd-att = "ice-pwd" ":" password
+ // ice-ufrag-att = "ice-ufrag" ":" ufrag
+ // ice-ufrag
+ InitAttrLine(kAttributeIceUfrag, &os);
+ os << kSdpDelimiterColon << transport_info->description.ice_ufrag;
+ AddLine(os.str(), message);
+ // ice-pwd
+ InitAttrLine(kAttributeIcePwd, &os);
+ os << kSdpDelimiterColon << transport_info->description.ice_pwd;
+ AddLine(os.str(), message);
+
+ // draft-petithuguenin-mmusic-ice-attributes-level-03
+ BuildIceOptions(transport_info->description.transport_options, message);
+
+ // RFC 4572
+ // fingerprint-attribute =
+ // "fingerprint" ":" hash-func SP fingerprint
+ if (fp) {
+ // Insert the fingerprint attribute.
+ InitAttrLine(kAttributeFingerprint, &os);
+ os << kSdpDelimiterColon
+ << fp->algorithm << kSdpDelimiterSpace
+ << fp->GetRfc4572Fingerprint();
+
+ AddLine(os.str(), message);
+ }
+ }
+
+ // RFC 3388
+ // mid-attribute = "a=mid:" identification-tag
+ // identification-tag = token
+ // Use the content name as the mid identification-tag.
+ InitAttrLine(kAttributeMid, &os);
+ os << kSdpDelimiterColon << content_info->name;
+ AddLine(os.str(), message);
+
+ if (is_sctp) {
+ BuildSctpContentAttributes(message);
+ } else {
+ BuildRtpContentAttributes(media_desc, media_type, message);
+ }
+}
+
+void BuildSctpContentAttributes(std::string* message) {
+ cricket::DataCodec sctp_codec(kDefaultSctpFmt, kDefaultSctpFmtProtocol, 0);
+ sctp_codec.SetParam(kCodecParamSctpProtocol, kDefaultSctpFmtProtocol);
+ sctp_codec.SetParam(kCodecParamSctpStreams, cricket::kMaxSctpSid + 1);
+ AddFmtpLine(sctp_codec, message);
+}
+
+void BuildRtpContentAttributes(
+ const MediaContentDescription* media_desc,
+ const MediaType media_type,
+ std::string* message) {
+ std::ostringstream os;
+ // RFC 5285
+ // a=extmap:<value>["/"<direction>] <URI> <extensionattributes>
+ // The definitions MUST be either all session level or all media level. This
+ // implementation uses all media level.
+ for (size_t i = 0; i < media_desc->rtp_header_extensions().size(); ++i) {
+ InitAttrLine(kAttributeExtmap, &os);
+ os << kSdpDelimiterColon << media_desc->rtp_header_extensions()[i].id
+ << kSdpDelimiterSpace << media_desc->rtp_header_extensions()[i].uri;
+ AddLine(os.str(), message);
+ }
+
+ // RFC 3264
+ // a=sendrecv || a=sendonly || a=sendrecv || a=inactive
+
+ cricket::MediaContentDirection direction = media_desc->direction();
+ if (media_desc->streams().empty() && direction == cricket::MD_SENDRECV) {
+ direction = cricket::MD_RECVONLY;
+ }
+
+ switch (direction) {
+ case cricket::MD_INACTIVE:
+ InitAttrLine(kAttributeInactive, &os);
+ break;
+ case cricket::MD_SENDONLY:
+ InitAttrLine(kAttributeSendOnly, &os);
+ break;
+ case cricket::MD_RECVONLY:
+ InitAttrLine(kAttributeRecvOnly, &os);
+ break;
+ case cricket::MD_SENDRECV:
+ default:
+ InitAttrLine(kAttributeSendRecv, &os);
+ break;
+ }
+ AddLine(os.str(), message);
+
+ // RFC 4566
+ // b=AS:<bandwidth>
+ if (media_desc->bandwidth() >= 1000) {
+ InitLine(kLineTypeSessionBandwidth, kApplicationSpecificMaximum, &os);
+ os << kSdpDelimiterColon << (media_desc->bandwidth() / 1000);
+ AddLine(os.str(), message);
+ }
+
+ // RFC 5761
+ // a=rtcp-mux
+ if (media_desc->rtcp_mux()) {
+ InitAttrLine(kAttributeRtcpMux, &os);
+ AddLine(os.str(), message);
+ }
+
+ // RFC 4568
+ // a=crypto:<tag> <crypto-suite> <key-params> [<session-params>]
+ for (std::vector<CryptoParams>::const_iterator it =
+ media_desc->cryptos().begin();
+ it != media_desc->cryptos().end(); ++it) {
+ InitAttrLine(kAttributeCrypto, &os);
+ os << kSdpDelimiterColon << it->tag << " " << it->cipher_suite << " "
+ << it->key_params;
+ if (!it->session_params.empty()) {
+ os << " " << it->session_params;
+ }
+ AddLine(os.str(), message);
+ }
+
+ // RFC 4566
+ // a=rtpmap:<payload type> <encoding name>/<clock rate>
+ // [/<encodingparameters>]
+ BuildRtpMap(media_desc, media_type, message);
+
+ // Specify latency for buffered mode.
+ // a=x-google-buffer-latency:<value>
+ if (media_desc->buffered_mode_latency() != cricket::kBufferedModeDisabled) {
+ std::ostringstream os;
+ InitAttrLine(kAttributeXGoogleBufferLatency, &os);
+ os << kSdpDelimiterColon << media_desc->buffered_mode_latency();
+ AddLine(os.str(), message);
+ }
+
+ for (StreamParamsVec::const_iterator track = media_desc->streams().begin();
+ track != media_desc->streams().end(); ++track) {
+ // Require that the track belongs to a media stream,
+ // ie the sync_label is set. This extra check is necessary since the
+ // MediaContentDescription always contains a streamparam with an ssrc even
+ // if no track or media stream have been created.
+ if (track->sync_label.empty()) continue;
+
+ // Build the ssrc-group lines.
+ for (size_t i = 0; i < track->ssrc_groups.size(); ++i) {
+ // RFC 5576
+ // a=ssrc-group:<semantics> <ssrc-id> ...
+ if (track->ssrc_groups[i].ssrcs.empty()) {
+ continue;
+ }
+ std::ostringstream os;
+ InitAttrLine(kAttributeSsrcGroup, &os);
+ os << kSdpDelimiterColon << track->ssrc_groups[i].semantics;
+ std::vector<uint32>::const_iterator ssrc =
+ track->ssrc_groups[i].ssrcs.begin();
+ for (; ssrc != track->ssrc_groups[i].ssrcs.end(); ++ssrc) {
+ os << kSdpDelimiterSpace << talk_base::ToString<uint32>(*ssrc);
+ }
+ AddLine(os.str(), message);
+ }
+ // Build the ssrc lines for each ssrc.
+ for (size_t i = 0; i < track->ssrcs.size(); ++i) {
+ uint32 ssrc = track->ssrcs[i];
+ // RFC 5576
+ // a=ssrc:<ssrc-id> cname:<value>
+ AddSsrcLine(ssrc, kSsrcAttributeCname,
+ track->cname, message);
+
+ // draft-alvestrand-mmusic-msid-00
+ // a=ssrc:<ssrc-id> msid:identifier [appdata]
+ // The appdata consists of the "id" attribute of a MediaStreamTrack, which
+ // is corresponding to the "name" attribute of StreamParams.
+ std::string appdata = track->id;
+ std::ostringstream os;
+ InitAttrLine(kAttributeSsrc, &os);
+ os << kSdpDelimiterColon << ssrc << kSdpDelimiterSpace
+ << kSsrcAttributeMsid << kSdpDelimiterColon << track->sync_label
+ << kSdpDelimiterSpace << appdata;
+ AddLine(os.str(), message);
+
+ // TODO(ronghuawu): Remove below code which is for backward compatibility.
+ // draft-alvestrand-rtcweb-mid-01
+ // a=ssrc:<ssrc-id> mslabel:<value>
+ // The label isn't yet defined.
+ // a=ssrc:<ssrc-id> label:<value>
+ AddSsrcLine(ssrc, kSsrcAttributeMslabel, track->sync_label, message);
+ AddSsrcLine(ssrc, kSSrcAttributeLabel, track->id, message);
+ }
+ }
+}
+
+void WriteFmtpHeader(int payload_type, std::ostringstream* os) {
+ // fmtp header: a=fmtp:|payload_type| <parameters>
+ // Add a=fmtp
+ InitAttrLine(kAttributeFmtp, os);
+ // Add :|payload_type|
+ *os << kSdpDelimiterColon << payload_type;
+}
+
+void WriteRtcpFbHeader(int payload_type, std::ostringstream* os) {
+ // rtcp-fb header: a=rtcp-fb:|payload_type|
+ // <parameters>/<ccm <ccm_parameters>>
+ // Add a=rtcp-fb
+ InitAttrLine(kAttributeRtcpFb, os);
+ // Add :
+ *os << kSdpDelimiterColon;
+ if (payload_type == kWildcardPayloadType) {
+ *os << "*";
+ } else {
+ *os << payload_type;
+ }
+}
+
+void WriteFmtpParameter(const std::string& parameter_name,
+ const std::string& parameter_value,
+ std::ostringstream* os) {
+ // fmtp parameters: |parameter_name|=|parameter_value|
+ *os << parameter_name << kSdpDelimiterEqual << parameter_value;
+}
+
+void WriteFmtpParameters(const cricket::CodecParameterMap& parameters,
+ std::ostringstream* os) {
+ for (cricket::CodecParameterMap::const_iterator fmtp = parameters.begin();
+ fmtp != parameters.end(); ++fmtp) {
+ // Each new parameter, except the first one starts with ";" and " ".
+ if (fmtp != parameters.begin()) {
+ *os << kSdpDelimiterSemicolon;
+ }
+ *os << kSdpDelimiterSpace;
+ WriteFmtpParameter(fmtp->first, fmtp->second, os);
+ }
+}
+
+bool IsFmtpParam(const std::string& name) {
+ const char* kFmtpParams[] = {
+ kCodecParamMinPTime, kCodecParamSPropStereo,
+ kCodecParamStereo, kCodecParamUseInbandFec,
+ kCodecParamMaxBitrate, kCodecParamMinBitrate, kCodecParamMaxQuantization,
+ kCodecParamSctpProtocol, kCodecParamSctpStreams
+ };
+ for (size_t i = 0; i < ARRAY_SIZE(kFmtpParams); ++i) {
+ if (_stricmp(name.c_str(), kFmtpParams[i]) == 0) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// Retreives fmtp parameters from |params|, which may contain other parameters
+// as well, and puts them in |fmtp_parameters|.
+void GetFmtpParams(const cricket::CodecParameterMap& params,
+ cricket::CodecParameterMap* fmtp_parameters) {
+ for (cricket::CodecParameterMap::const_iterator iter = params.begin();
+ iter != params.end(); ++iter) {
+ if (IsFmtpParam(iter->first)) {
+ (*fmtp_parameters)[iter->first] = iter->second;
+ }
+ }
+}
+
+template <class T>
+void AddFmtpLine(const T& codec, std::string* message) {
+ cricket::CodecParameterMap fmtp_parameters;
+ GetFmtpParams(codec.params, &fmtp_parameters);
+ if (fmtp_parameters.empty()) {
+ // No need to add an fmtp if it will have no (optional) parameters.
+ return;
+ }
+ std::ostringstream os;
+ WriteFmtpHeader(codec.id, &os);
+ WriteFmtpParameters(fmtp_parameters, &os);
+ AddLine(os.str(), message);
+ return;
+}
+
+template <class T>
+void AddRtcpFbLines(const T& codec, std::string* message) {
+ for (std::vector<cricket::FeedbackParam>::const_iterator iter =
+ codec.feedback_params.params().begin();
+ iter != codec.feedback_params.params().end(); ++iter) {
+ std::ostringstream os;
+ WriteRtcpFbHeader(codec.id, &os);
+ os << " " << iter->id();
+ if (!iter->param().empty()) {
+ os << " " << iter->param();
+ }
+ AddLine(os.str(), message);
+ }
+}
+
+bool GetMinValue(const std::vector<int>& values, int* value) {
+ if (values.empty()) {
+ return false;
+ }
+ std::vector<int>::const_iterator found =
+ std::min_element(values.begin(), values.end());
+ *value = *found;
+ return true;
+}
+
+bool GetParameter(const std::string& name,
+ const cricket::CodecParameterMap& params, int* value) {
+ std::map<std::string, std::string>::const_iterator found =
+ params.find(name);
+ if (found == params.end()) {
+ return false;
+ }
+ *value = talk_base::FromString<int>(found->second);
+ return true;
+}
+
+void BuildRtpMap(const MediaContentDescription* media_desc,
+ const MediaType media_type,
+ std::string* message) {
+ ASSERT(message != NULL);
+ ASSERT(media_desc != NULL);
+ std::ostringstream os;
+ if (media_type == cricket::MEDIA_TYPE_VIDEO) {
+ const VideoContentDescription* video_desc =
+ static_cast<const VideoContentDescription*>(media_desc);
+ for (std::vector<cricket::VideoCodec>::const_iterator it =
+ video_desc->codecs().begin();
+ it != video_desc->codecs().end(); ++it) {
+ // RFC 4566
+ // a=rtpmap:<payload type> <encoding name>/<clock rate>
+ // [/<encodingparameters>]
+ if (it->id != kWildcardPayloadType) {
+ InitAttrLine(kAttributeRtpmap, &os);
+ os << kSdpDelimiterColon << it->id << " " << it->name
+ << "/" << kDefaultVideoClockrate;
+ AddLine(os.str(), message);
+ }
+ AddRtcpFbLines(*it, message);
+ AddFmtpLine(*it, message);
+ }
+ } else if (media_type == cricket::MEDIA_TYPE_AUDIO) {
+ const AudioContentDescription* audio_desc =
+ static_cast<const AudioContentDescription*>(media_desc);
+ std::vector<int> ptimes;
+ std::vector<int> maxptimes;
+ int max_minptime = 0;
+ for (std::vector<cricket::AudioCodec>::const_iterator it =
+ audio_desc->codecs().begin();
+ it != audio_desc->codecs().end(); ++it) {
+ ASSERT(!it->name.empty());
+ // RFC 4566
+ // a=rtpmap:<payload type> <encoding name>/<clock rate>
+ // [/<encodingparameters>]
+ InitAttrLine(kAttributeRtpmap, &os);
+ os << kSdpDelimiterColon << it->id << " ";
+ os << it->name << "/" << it->clockrate;
+ if (it->channels != 1) {
+ os << "/" << it->channels;
+ }
+ AddLine(os.str(), message);
+ AddRtcpFbLines(*it, message);
+ AddFmtpLine(*it, message);
+ int minptime = 0;
+ if (GetParameter(kCodecParamMinPTime, it->params, &minptime)) {
+ max_minptime = std::max(minptime, max_minptime);
+ }
+ int ptime;
+ if (GetParameter(kCodecParamPTime, it->params, &ptime)) {
+ ptimes.push_back(ptime);
+ }
+ int maxptime;
+ if (GetParameter(kCodecParamMaxPTime, it->params, &maxptime)) {
+ maxptimes.push_back(maxptime);
+ }
+ }
+ // Populate the maxptime attribute with the smallest maxptime of all codecs
+ // under the same m-line.
+ int min_maxptime = INT_MAX;
+ if (GetMinValue(maxptimes, &min_maxptime)) {
+ AddAttributeLine(kCodecParamMaxPTime, min_maxptime, message);
+ }
+ ASSERT(min_maxptime > max_minptime);
+ // Populate the ptime attribute with the smallest ptime or the largest
+ // minptime, whichever is the largest, for all codecs under the same m-line.
+ int ptime = INT_MAX;
+ if (GetMinValue(ptimes, &ptime)) {
+ ptime = std::min(ptime, min_maxptime);
+ ptime = std::max(ptime, max_minptime);
+ AddAttributeLine(kCodecParamPTime, ptime, message);
+ }
+ } else if (media_type == cricket::MEDIA_TYPE_DATA) {
+ const DataContentDescription* data_desc =
+ static_cast<const DataContentDescription*>(media_desc);
+ for (std::vector<cricket::DataCodec>::const_iterator it =
+ data_desc->codecs().begin();
+ it != data_desc->codecs().end(); ++it) {
+ // RFC 4566
+ // a=rtpmap:<payload type> <encoding name>/<clock rate>
+ // [/<encodingparameters>]
+ InitAttrLine(kAttributeRtpmap, &os);
+ os << kSdpDelimiterColon << it->id << " "
+ << it->name << "/" << it->clockrate;
+ AddLine(os.str(), message);
+ }
+ }
+}
+
+void BuildCandidate(const std::vector<Candidate>& candidates,
+ std::string* message) {
+ std::ostringstream os;
+
+ for (std::vector<Candidate>::const_iterator it = candidates.begin();
+ it != candidates.end(); ++it) {
+ // RFC 5245
+ // a=candidate:<foundation> <component-id> <transport> <priority>
+ // <connection-address> <port> typ <candidate-types>
+ // [raddr <connection-address>] [rport <port>]
+ // *(SP extension-att-name SP extension-att-value)
+ std::string type;
+ // Map the cricket candidate type to "host" / "srflx" / "prflx" / "relay"
+ if (it->type() == cricket::LOCAL_PORT_TYPE) {
+ type = kCandidateHost;
+ } else if (it->type() == cricket::STUN_PORT_TYPE) {
+ type = kCandidateSrflx;
+ } else if (it->type() == cricket::RELAY_PORT_TYPE) {
+ type = kCandidateRelay;
+ } else {
+ ASSERT(false);
+ }
+
+ InitAttrLine(kAttributeCandidate, &os);
+ os << kSdpDelimiterColon
+ << it->foundation() << " " << it->component() << " "
+ << it->protocol() << " " << it->priority() << " "
+ << it->address().ipaddr().ToString() << " "
+ << it->address().PortAsString() << " "
+ << kAttributeCandidateTyp << " " << type << " ";
+
+ // Related address
+ if (!it->related_address().IsNil()) {
+ os << kAttributeCandidateRaddr << " "
+ << it->related_address().ipaddr().ToString() << " "
+ << kAttributeCandidateRport << " "
+ << it->related_address().PortAsString() << " ";
+ }
+
+ // Extensions
+ os << kAttributeCandidateGeneration << " " << it->generation();
+
+ AddLine(os.str(), message);
+ }
+}
+
+void BuildIceOptions(const std::vector<std::string>& transport_options,
+ std::string* message) {
+ if (!transport_options.empty()) {
+ std::ostringstream os;
+ InitAttrLine(kAttributeIceOption, &os);
+ os << kSdpDelimiterColon << transport_options[0];
+ for (size_t i = 1; i < transport_options.size(); ++i) {
+ os << kSdpDelimiterSpace << transport_options[i];
+ }
+ AddLine(os.str(), message);
+ }
+}
+
+bool ParseSessionDescription(const std::string& message, size_t* pos,
+ std::string* session_id,
+ std::string* session_version,
+ bool* supports_msid,
+ TransportDescription* session_td,
+ RtpHeaderExtensions* session_extmaps,
+ cricket::SessionDescription* desc,
+ SdpParseError* error) {
+ std::string line;
+
+ // RFC 4566
+ // v= (protocol version)
+ if (!GetLineWithType(message, pos, &line, kLineTypeVersion)) {
+ return ParseFailedExpectLine(message, *pos, kLineTypeVersion,
+ std::string(), error);
+ }
+ // RFC 4566
+ // o=<username> <sess-id> <sess-version> <nettype> <addrtype>
+ // <unicast-address>
+ if (!GetLineWithType(message, pos, &line, kLineTypeOrigin)) {
+ return ParseFailedExpectLine(message, *pos, kLineTypeOrigin,
+ std::string(), error);
+ }
+ std::vector<std::string> fields;
+ talk_base::split(line.substr(kLinePrefixLength),
+ kSdpDelimiterSpace, &fields);
+ const size_t expected_fields = 6;
+ if (fields.size() != expected_fields) {
+ return ParseFailedExpectFieldNum(line, expected_fields, error);
+ }
+ *session_id = fields[1];
+ *session_version = fields[2];
+
+ // RFC 4566
+ // s= (session name)
+ if (!GetLineWithType(message, pos, &line, kLineTypeSessionName)) {
+ return ParseFailedExpectLine(message, *pos, kLineTypeSessionName,
+ std::string(), error);
+ }
+
+ // Optional lines
+ // Those are the optional lines, so shouldn't return false if not present.
+ // RFC 4566
+ // i=* (session information)
+ GetLineWithType(message, pos, &line, kLineTypeSessionInfo);
+
+ // RFC 4566
+ // u=* (URI of description)
+ GetLineWithType(message, pos, &line, kLineTypeSessionUri);
+
+ // RFC 4566
+ // e=* (email address)
+ GetLineWithType(message, pos, &line, kLineTypeSessionEmail);
+
+ // RFC 4566
+ // p=* (phone number)
+ GetLineWithType(message, pos, &line, kLineTypeSessionPhone);
+
+ // RFC 4566
+ // c=* (connection information -- not required if included in
+ // all media)
+ GetLineWithType(message, pos, &line, kLineTypeConnection);
+
+ // RFC 4566
+ // b=* (zero or more bandwidth information lines)
+ while (GetLineWithType(message, pos, &line, kLineTypeSessionBandwidth)) {
+ // By pass zero or more b lines.
+ }
+
+ // RFC 4566
+ // One or more time descriptions ("t=" and "r=" lines; see below)
+ // t= (time the session is active)
+ // r=* (zero or more repeat times)
+ // Ensure there's at least one time description
+ if (!GetLineWithType(message, pos, &line, kLineTypeTiming)) {
+ return ParseFailedExpectLine(message, *pos, kLineTypeTiming, std::string(),
+ error);
+ }
+
+ while (GetLineWithType(message, pos, &line, kLineTypeRepeatTimes)) {
+ // By pass zero or more r lines.
+ }
+
+ // Go through the rest of the time descriptions
+ while (GetLineWithType(message, pos, &line, kLineTypeTiming)) {
+ while (GetLineWithType(message, pos, &line, kLineTypeRepeatTimes)) {
+ // By pass zero or more r lines.
+ }
+ }
+
+ // RFC 4566
+ // z=* (time zone adjustments)
+ GetLineWithType(message, pos, &line, kLineTypeTimeZone);
+
+ // RFC 4566
+ // k=* (encryption key)
+ GetLineWithType(message, pos, &line, kLineTypeEncryptionKey);
+
+ // RFC 4566
+ // a=* (zero or more session attribute lines)
+ while (GetLineWithType(message, pos, &line, kLineTypeAttributes)) {
+ if (HasAttribute(line, kAttributeGroup)) {
+ if (!ParseGroupAttribute(line, desc, error)) {
+ return false;
+ }
+ } else if (HasAttribute(line, kAttributeIceUfrag)) {
+ if (!GetValue(line, kAttributeIceUfrag,
+ &(session_td->ice_ufrag), error)) {
+ return false;
+ }
+ } else if (HasAttribute(line, kAttributeIcePwd)) {
+ if (!GetValue(line, kAttributeIcePwd, &(session_td->ice_pwd), error)) {
+ return false;
+ }
+ } else if (HasAttribute(line, kAttributeIceLite)) {
+ session_td->ice_mode = cricket::ICEMODE_LITE;
+ } else if (HasAttribute(line, kAttributeIceOption)) {
+ if (!ParseIceOptions(line, &(session_td->transport_options), error)) {
+ return false;
+ }
+ } else if (HasAttribute(line, kAttributeFingerprint)) {
+ if (session_td->identity_fingerprint.get()) {
+ return ParseFailed(
+ line,
+ "Can't have multiple fingerprint attributes at the same level.",
+ error);
+ }
+ talk_base::SSLFingerprint* fingerprint = NULL;
+ if (!ParseFingerprintAttribute(line, &fingerprint, error)) {
+ return false;
+ }
+ session_td->identity_fingerprint.reset(fingerprint);
+ } else if (HasAttribute(line, kAttributeMsidSemantics)) {
+ std::string semantics;
+ if (!GetValue(line, kAttributeMsidSemantics, &semantics, error)) {
+ return false;
+ }
+ *supports_msid = CaseInsensitiveFind(semantics, kMediaStreamSemantic);
+ } else if (HasAttribute(line, kAttributeExtmap)) {
+ RtpHeaderExtension extmap;
+ if (!ParseExtmap(line, &extmap, error)) {
+ return false;
+ }
+ session_extmaps->push_back(extmap);
+ }
+ }
+
+ return true;
+}
+
+bool ParseGroupAttribute(const std::string& line,
+ cricket::SessionDescription* desc,
+ SdpParseError* error) {
+ ASSERT(desc != NULL);
+
+ // RFC 5888 and draft-holmberg-mmusic-sdp-bundle-negotiation-00
+ // a=group:BUNDLE video voice
+ std::vector<std::string> fields;
+ talk_base::split(line.substr(kLinePrefixLength),
+ kSdpDelimiterSpace, &fields);
+ std::string semantics;
+ if (!GetValue(fields[0], kAttributeGroup, &semantics, error)) {
+ return false;
+ }
+ cricket::ContentGroup group(semantics);
+ for (size_t i = 1; i < fields.size(); ++i) {
+ group.AddContentName(fields[i]);
+ }
+ desc->AddGroup(group);
+ return true;
+}
+
+static bool ParseFingerprintAttribute(const std::string& line,
+ talk_base::SSLFingerprint** fingerprint,
+ SdpParseError* error) {
+ if (!IsLineType(line, kLineTypeAttributes) ||
+ !HasAttribute(line, kAttributeFingerprint)) {
+ return ParseFailedExpectLine(line, 0, kLineTypeAttributes,
+ kAttributeFingerprint, error);
+ }
+
+ std::vector<std::string> fields;
+ talk_base::split(line.substr(kLinePrefixLength),
+ kSdpDelimiterSpace, &fields);
+ const size_t expected_fields = 2;
+ if (fields.size() != expected_fields) {
+ return ParseFailedExpectFieldNum(line, expected_fields, error);
+ }
+
+ // The first field here is "fingerprint:<hash>.
+ std::string algorithm;
+ if (!GetValue(fields[0], kAttributeFingerprint, &algorithm, error)) {
+ return false;
+ }
+
+ // Downcase the algorithm. Note that we don't need to downcase the
+ // fingerprint because hex_decode can handle upper-case.
+ std::transform(algorithm.begin(), algorithm.end(), algorithm.begin(),
+ ::tolower);
+
+ // The second field is the digest value. De-hexify it.
+ *fingerprint = talk_base::SSLFingerprint::CreateFromRfc4572(
+ algorithm, fields[1]);
+ if (!*fingerprint) {
+ return ParseFailed(line,
+ "Failed to create fingerprint from the digest.",
+ error);
+ }
+
+ return true;
+}
+
+// RFC 3551
+// PT encoding media type clock rate channels
+// name (Hz)
+// 0 PCMU A 8,000 1
+// 1 reserved A
+// 2 reserved A
+// 3 GSM A 8,000 1
+// 4 G723 A 8,000 1
+// 5 DVI4 A 8,000 1
+// 6 DVI4 A 16,000 1
+// 7 LPC A 8,000 1
+// 8 PCMA A 8,000 1
+// 9 G722 A 8,000 1
+// 10 L16 A 44,100 2
+// 11 L16 A 44,100 1
+// 12 QCELP A 8,000 1
+// 13 CN A 8,000 1
+// 14 MPA A 90,000 (see text)
+// 15 G728 A 8,000 1
+// 16 DVI4 A 11,025 1
+// 17 DVI4 A 22,050 1
+// 18 G729 A 8,000 1
+struct StaticPayloadAudioCodec {
+ const char* name;
+ int clockrate;
+ int channels;
+};
+static const StaticPayloadAudioCodec kStaticPayloadAudioCodecs[] = {
+ { "PCMU", 8000, 1 },
+ { "reserved", 0, 0 },
+ { "reserved", 0, 0 },
+ { "GSM", 8000, 1 },
+ { "G723", 8000, 1 },
+ { "DVI4", 8000, 1 },
+ { "DVI4", 16000, 1 },
+ { "LPC", 8000, 1 },
+ { "PCMA", 8000, 1 },
+ { "G722", 8000, 1 },
+ { "L16", 44100, 2 },
+ { "L16", 44100, 1 },
+ { "QCELP", 8000, 1 },
+ { "CN", 8000, 1 },
+ { "MPA", 90000, 1 },
+ { "G728", 8000, 1 },
+ { "DVI4", 11025, 1 },
+ { "DVI4", 22050, 1 },
+ { "G729", 8000, 1 },
+};
+
+void MaybeCreateStaticPayloadAudioCodecs(
+ const std::vector<int>& fmts, AudioContentDescription* media_desc) {
+ if (!media_desc) {
+ return;
+ }
+ int preference = fmts.size();
+ std::vector<int>::const_iterator it = fmts.begin();
+ bool add_new_codec = false;
+ for (; it != fmts.end(); ++it) {
+ int payload_type = *it;
+ if (!media_desc->HasCodec(payload_type) &&
+ payload_type >= 0 &&
+ payload_type < ARRAY_SIZE(kStaticPayloadAudioCodecs)) {
+ std::string encoding_name = kStaticPayloadAudioCodecs[payload_type].name;
+ int clock_rate = kStaticPayloadAudioCodecs[payload_type].clockrate;
+ int channels = kStaticPayloadAudioCodecs[payload_type].channels;
+ media_desc->AddCodec(cricket::AudioCodec(payload_type, encoding_name,
+ clock_rate, 0, channels,
+ preference));
+ add_new_codec = true;
+ }
+ --preference;
+ }
+ if (add_new_codec) {
+ media_desc->SortCodecs();
+ }
+}
+
+template <class C>
+static C* ParseContentDescription(const std::string& message,
+ const MediaType media_type,
+ int mline_index,
+ const std::string& protocol,
+ const std::vector<int>& codec_preference,
+ size_t* pos,
+ std::string* content_name,
+ TransportDescription* transport,
+ std::vector<JsepIceCandidate*>* candidates,
+ webrtc::SdpParseError* error) {
+ C* media_desc = new C();
+ switch (media_type) {
+ case cricket::MEDIA_TYPE_AUDIO:
+ *content_name = cricket::CN_AUDIO;
+ break;
+ case cricket::MEDIA_TYPE_VIDEO:
+ *content_name = cricket::CN_VIDEO;
+ break;
+ case cricket::MEDIA_TYPE_DATA:
+ *content_name = cricket::CN_DATA;
+ break;
+ default:
+ ASSERT(false);
+ break;
+ }
+ if (!ParseContent(message, media_type, mline_index, protocol,
+ codec_preference, pos, content_name,
+ media_desc, transport, candidates, error)) {
+ delete media_desc;
+ return NULL;
+ }
+ // Sort the codecs according to the m-line fmt list.
+ media_desc->SortCodecs();
+ return media_desc;
+}
+
+bool ParseMediaDescription(const std::string& message,
+ const TransportDescription& session_td,
+ const RtpHeaderExtensions& session_extmaps,
+ bool supports_msid,
+ size_t* pos,
+ cricket::SessionDescription* desc,
+ std::vector<JsepIceCandidate*>* candidates,
+ SdpParseError* error) {
+ ASSERT(desc != NULL);
+ std::string line;
+ int mline_index = -1;
+
+ // Zero or more media descriptions
+ // RFC 4566
+ // m=<media> <port> <proto> <fmt>
+ while (GetLineWithType(message, pos, &line, kLineTypeMedia)) {
+ ++mline_index;
+
+ std::vector<std::string> fields;
+ talk_base::split(line.substr(kLinePrefixLength),
+ kSdpDelimiterSpace, &fields);
+ const size_t expected_min_fields = 4;
+ if (fields.size() < expected_min_fields) {
+ return ParseFailedExpectMinFieldNum(line, expected_min_fields, error);
+ }
+ bool rejected = false;
+ // RFC 3264
+ // To reject an offered stream, the port number in the corresponding stream
+ // in the answer MUST be set to zero.
+ if (fields[1] == kMediaPortRejected) {
+ rejected = true;
+ }
+
+ std::string protocol = fields[2];
+ bool is_sctp = (protocol == cricket::kMediaProtocolDtlsSctp);
+
+ // <fmt>
+ std::vector<int> codec_preference;
+ for (size_t j = 3 ; j < fields.size(); ++j) {
+ codec_preference.push_back(talk_base::FromString<int>(fields[j]));
+ }
+
+ // Make a temporary TransportDescription based on |session_td|.
+ // Some of this gets overwritten by ParseContent.
+ TransportDescription transport(NS_JINGLE_ICE_UDP,
+ session_td.transport_options,
+ session_td.ice_ufrag,
+ session_td.ice_pwd,
+ session_td.ice_mode,
+ session_td.identity_fingerprint.get(),
+ Candidates());
+
+ talk_base::scoped_ptr<MediaContentDescription> content;
+ std::string content_name;
+ if (HasAttribute(line, kMediaTypeVideo)) {
+ content.reset(ParseContentDescription<VideoContentDescription>(
+ message, cricket::MEDIA_TYPE_VIDEO, mline_index, protocol,
+ codec_preference, pos, &content_name,
+ &transport, candidates, error));
+ } else if (HasAttribute(line, kMediaTypeAudio)) {
+ content.reset(ParseContentDescription<AudioContentDescription>(
+ message, cricket::MEDIA_TYPE_AUDIO, mline_index, protocol,
+ codec_preference, pos, &content_name,
+ &transport, candidates, error));
+ MaybeCreateStaticPayloadAudioCodecs(
+ codec_preference,
+ static_cast<AudioContentDescription*>(content.get()));
+ } else if (HasAttribute(line, kMediaTypeData)) {
+ content.reset(ParseContentDescription<DataContentDescription>(
+ message, cricket::MEDIA_TYPE_DATA, mline_index, protocol,
+ codec_preference, pos, &content_name,
+ &transport, candidates, error));
+ } else {
+ LOG(LS_WARNING) << "Unsupported media type: " << line;
+ continue;
+ }
+ if (!content.get()) {
+ // ParseContentDescription returns NULL if failed.
+ return false;
+ }
+
+ if (!is_sctp) {
+ // Make sure to set the media direction correctly. If the direction is not
+ // MD_RECVONLY or Inactive and no streams are parsed,
+ // a default MediaStream will be created to prepare for receiving media.
+ if (supports_msid && content->streams().empty() &&
+ content->direction() == cricket::MD_SENDRECV) {
+ content->set_direction(cricket::MD_RECVONLY);
+ }
+
+ // Set the extmap.
+ if (!session_extmaps.empty() &&
+ !content->rtp_header_extensions().empty()) {
+ return ParseFailed("",
+ "The a=extmap MUST be either all session level or "
+ "all media level.",
+ error);
+ }
+ for (size_t i = 0; i < session_extmaps.size(); ++i) {
+ content->AddRtpHeaderExtension(session_extmaps[i]);
+ }
+ }
+ content->set_protocol(protocol);
+ desc->AddContent(content_name,
+ is_sctp ? cricket::NS_JINGLE_DRAFT_SCTP :
+ cricket::NS_JINGLE_RTP,
+ rejected,
+ content.release());
+ // Create TransportInfo with the media level "ice-pwd" and "ice-ufrag".
+ TransportInfo transport_info(content_name, transport);
+
+ if (!desc->AddTransportInfo(transport_info)) {
+ std::ostringstream description;
+ description << "Failed to AddTransportInfo with content name: "
+ << content_name;
+ return ParseFailed("", description.str(), error);
+ }
+ }
+ return true;
+}
+
+bool VerifyCodec(const cricket::Codec& codec) {
+ // Codec has not been populated correctly unless the name has been set. This
+ // can happen if an SDP has an fmtp or rtcp-fb with a payload type but doesn't
+ // have a corresponding "rtpmap" line.
+ cricket::Codec default_codec;
+ return default_codec.name != codec.name;
+}
+
+bool VerifyAudioCodecs(const AudioContentDescription* audio_desc) {
+ const std::vector<cricket::AudioCodec>& codecs = audio_desc->codecs();
+ for (std::vector<cricket::AudioCodec>::const_iterator iter = codecs.begin();
+ iter != codecs.end(); ++iter) {
+ if (!VerifyCodec(*iter)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool VerifyVideoCodecs(const VideoContentDescription* video_desc) {
+ const std::vector<cricket::VideoCodec>& codecs = video_desc->codecs();
+ for (std::vector<cricket::VideoCodec>::const_iterator iter = codecs.begin();
+ iter != codecs.end(); ++iter) {
+ if (!VerifyCodec(*iter)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void AddParameters(const cricket::CodecParameterMap& parameters,
+ cricket::Codec* codec) {
+ for (cricket::CodecParameterMap::const_iterator iter =
+ parameters.begin(); iter != parameters.end(); ++iter) {
+ codec->SetParam(iter->first, iter->second);
+ }
+}
+
+void AddFeedbackParameter(const cricket::FeedbackParam& feedback_param,
+ cricket::Codec* codec) {
+ codec->AddFeedbackParam(feedback_param);
+}
+
+void AddFeedbackParameters(const cricket::FeedbackParams& feedback_params,
+ cricket::Codec* codec) {
+ for (std::vector<cricket::FeedbackParam>::const_iterator iter =
+ feedback_params.params().begin();
+ iter != feedback_params.params().end(); ++iter) {
+ codec->AddFeedbackParam(*iter);
+ }
+}
+
+// Gets the current codec setting associated with |payload_type|. If there
+// is no AudioCodec associated with that payload type it returns an empty codec
+// with that payload type.
+template <class T>
+T GetCodec(const std::vector<T>& codecs, int payload_type) {
+ for (typename std::vector<T>::const_iterator codec = codecs.begin();
+ codec != codecs.end(); ++codec) {
+ if (codec->id == payload_type) {
+ return *codec;
+ }
+ }
+ T ret_val = T();
+ ret_val.id = payload_type;
+ return ret_val;
+}
+
+// Updates or creates a new codec entry in the audio description.
+template <class T, class U>
+void AddOrReplaceCodec(MediaContentDescription* content_desc, const U& codec) {
+ T* desc = static_cast<T*>(content_desc);
+ std::vector<U> codecs = desc->codecs();
+ bool found = false;
+
+ typename std::vector<U>::iterator iter;
+ for (iter = codecs.begin(); iter != codecs.end(); ++iter) {
+ if (iter->id == codec.id) {
+ *iter = codec;
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ desc->AddCodec(codec);
+ return;
+ }
+ desc->set_codecs(codecs);
+}
+
+// Adds or updates existing codec corresponding to |payload_type| according
+// to |parameters|.
+template <class T, class U>
+void UpdateCodec(MediaContentDescription* content_desc, int payload_type,
+ const cricket::CodecParameterMap& parameters) {
+ // Codec might already have been populated (from rtpmap).
+ U new_codec = GetCodec(static_cast<T*>(content_desc)->codecs(), payload_type);
+ AddParameters(parameters, &new_codec);
+ AddOrReplaceCodec<T, U>(content_desc, new_codec);
+}
+
+// Adds or updates existing codec corresponding to |payload_type| according
+// to |feedback_param|.
+template <class T, class U>
+void UpdateCodec(MediaContentDescription* content_desc, int payload_type,
+ const cricket::FeedbackParam& feedback_param) {
+ // Codec might already have been populated (from rtpmap).
+ U new_codec = GetCodec(static_cast<T*>(content_desc)->codecs(), payload_type);
+ AddFeedbackParameter(feedback_param, &new_codec);
+ AddOrReplaceCodec<T, U>(content_desc, new_codec);
+}
+
+bool PopWildcardCodec(std::vector<cricket::VideoCodec>* codecs,
+ cricket::VideoCodec* wildcard_codec) {
+ for (std::vector<cricket::VideoCodec>::iterator iter = codecs->begin();
+ iter != codecs->end(); ++iter) {
+ if (iter->id == kWildcardPayloadType) {
+ *wildcard_codec = *iter;
+ codecs->erase(iter);
+ return true;
+ }
+ }
+ return false;
+}
+
+void UpdateFromWildcardVideoCodecs(VideoContentDescription* video_desc) {
+ std::vector<cricket::VideoCodec> codecs = video_desc->codecs();
+ cricket::VideoCodec wildcard_codec;
+ if (!PopWildcardCodec(&codecs, &wildcard_codec)) {
+ return;
+ }
+ for (std::vector<cricket::VideoCodec>::iterator iter = codecs.begin();
+ iter != codecs.end(); ++iter) {
+ cricket::VideoCodec& codec = *iter;
+ AddFeedbackParameters(wildcard_codec.feedback_params, &codec);
+ }
+ video_desc->set_codecs(codecs);
+}
+
+void AddAudioAttribute(const std::string& name, const std::string& value,
+ AudioContentDescription* audio_desc) {
+ if (value.empty()) {
+ return;
+ }
+ std::vector<cricket::AudioCodec> codecs = audio_desc->codecs();
+ for (std::vector<cricket::AudioCodec>::iterator iter = codecs.begin();
+ iter != codecs.end(); ++iter) {
+ iter->params[name] = value;
+ }
+ audio_desc->set_codecs(codecs);
+}
+
+bool ParseContent(const std::string& message,
+ const MediaType media_type,
+ int mline_index,
+ const std::string& protocol,
+ const std::vector<int>& codec_preference,
+ size_t* pos,
+ std::string* content_name,
+ MediaContentDescription* media_desc,
+ TransportDescription* transport,
+ std::vector<JsepIceCandidate*>* candidates,
+ SdpParseError* error) {
+ ASSERT(media_desc != NULL);
+ ASSERT(content_name != NULL);
+ ASSERT(transport != NULL);
+
+ // The media level "ice-ufrag" and "ice-pwd".
+ // The candidates before update the media level "ice-pwd" and "ice-ufrag".
+ Candidates candidates_orig;
+ std::string line;
+ std::string mline_id;
+ // Tracks created out of the ssrc attributes.
+ StreamParamsVec tracks;
+ SsrcInfoVec ssrc_infos;
+ SsrcGroupVec ssrc_groups;
+ std::string maxptime_as_string;
+ std::string ptime_as_string;
+
+ bool is_rtp =
+ protocol.empty() ||
+ talk_base::starts_with(protocol.data(),
+ cricket::kMediaProtocolRtpPrefix);
+
+ // Loop until the next m line
+ while (!IsLineType(message, kLineTypeMedia, *pos)) {
+ if (!GetLine(message, pos, &line)) {
+ if (*pos >= message.size()) {
+ break; // Done parsing
+ } else {
+ return ParseFailed(message, *pos, "Can't find valid SDP line.", error);
+ }
+ }
+
+ if (IsLineType(line, kLineTypeSessionBandwidth)) {
+ std::string bandwidth;
+ if (HasAttribute(line, kApplicationSpecificMaximum)) {
+ if (!GetValue(line, kApplicationSpecificMaximum, &bandwidth, error)) {
+ return false;
+ } else {
+ media_desc->set_bandwidth(
+ talk_base::FromString<int>(bandwidth) * 1000);
+ }
+ }
+ continue;
+ }
+
+ // RFC 4566
+ // b=* (zero or more bandwidth information lines)
+ if (IsLineType(line, kLineTypeSessionBandwidth)) {
+ std::string bandwidth;
+ if (HasAttribute(line, kApplicationSpecificMaximum)) {
+ if (!GetValue(line, kApplicationSpecificMaximum, &bandwidth, error)) {
+ return false;
+ } else {
+ media_desc->set_bandwidth(
+ talk_base::FromString<int>(bandwidth) * 1000);
+ }
+ }
+ continue;
+ }
+
+ if (!IsLineType(line, kLineTypeAttributes)) {
+ // TODO: Handle other lines if needed.
+ LOG(LS_INFO) << "Ignored line: " << line;
+ continue;
+ }
+
+ // Handle attributes common to SCTP and RTP.
+ if (HasAttribute(line, kAttributeMid)) {
+ // RFC 3388
+ // mid-attribute = "a=mid:" identification-tag
+ // identification-tag = token
+ // Use the mid identification-tag as the content name.
+ if (!GetValue(line, kAttributeMid, &mline_id, error)) {
+ return false;
+ }
+ *content_name = mline_id;
+ } else if (HasAttribute(line, kAttributeCandidate)) {
+ Candidate candidate;
+ if (!ParseCandidate(line, &candidate, error, false)) {
+ return false;
+ }
+ candidates_orig.push_back(candidate);
+ } else if (HasAttribute(line, kAttributeIceUfrag)) {
+ if (!GetValue(line, kAttributeIceUfrag, &transport->ice_ufrag, error)) {
+ return false;
+ }
+ } else if (HasAttribute(line, kAttributeIcePwd)) {
+ if (!GetValue(line, kAttributeIcePwd, &transport->ice_pwd, error)) {
+ return false;
+ }
+ } else if (HasAttribute(line, kAttributeIceOption)) {
+ if (!ParseIceOptions(line, &transport->transport_options, error)) {
+ return false;
+ }
+ } else if (HasAttribute(line, kAttributeFmtp)) {
+ if (!ParseFmtpAttributes(line, media_type, media_desc, error)) {
+ return false;
+ }
+ } else if (HasAttribute(line, kAttributeFingerprint)) {
+ talk_base::SSLFingerprint* fingerprint = NULL;
+
+ if (!ParseFingerprintAttribute(line, &fingerprint, error)) {
+ return false;
+ }
+ transport->identity_fingerprint.reset(fingerprint);
+ } else if (is_rtp) {
+ //
+ // RTP specific attrubtes
+ //
+ if (HasAttribute(line, kAttributeRtcpMux)) {
+ media_desc->set_rtcp_mux(true);
+ } else if (HasAttribute(line, kAttributeSsrcGroup)) {
+ if (!ParseSsrcGroupAttribute(line, &ssrc_groups, error)) {
+ return false;
+ }
+ } else if (HasAttribute(line, kAttributeSsrc)) {
+ if (!ParseSsrcAttribute(line, &ssrc_infos, error)) {
+ return false;
+ }
+ } else if (HasAttribute(line, kAttributeCrypto)) {
+ if (!ParseCryptoAttribute(line, media_desc, error)) {
+ return false;
+ }
+ } else if (HasAttribute(line, kAttributeRtpmap)) {
+ if (!ParseRtpmapAttribute(line, media_type, codec_preference,
+ media_desc, error)) {
+ return false;
+ }
+ } else if (HasAttribute(line, kCodecParamMaxPTime)) {
+ if (!GetValue(line, kCodecParamMaxPTime, &maxptime_as_string, error)) {
+ return false;
+ }
+ } else if (HasAttribute(line, kAttributeRtcpFb)) {
+ if (!ParseRtcpFbAttribute(line, media_type, media_desc, error)) {
+ return false;
+ }
+ } else if (HasAttribute(line, kCodecParamPTime)) {
+ if (!GetValue(line, kCodecParamPTime, &ptime_as_string, error)) {
+ return false;
+ }
+ } else if (HasAttribute(line, kAttributeSendOnly)) {
+ media_desc->set_direction(cricket::MD_SENDONLY);
+ } else if (HasAttribute(line, kAttributeRecvOnly)) {
+ media_desc->set_direction(cricket::MD_RECVONLY);
+ } else if (HasAttribute(line, kAttributeInactive)) {
+ media_desc->set_direction(cricket::MD_INACTIVE);
+ } else if (HasAttribute(line, kAttributeSendRecv)) {
+ media_desc->set_direction(cricket::MD_SENDRECV);
+ } else if (HasAttribute(line, kAttributeExtmap)) {
+ RtpHeaderExtension extmap;
+ if (!ParseExtmap(line, &extmap, error)) {
+ return false;
+ }
+ media_desc->AddRtpHeaderExtension(extmap);
+ } else if (HasAttribute(line, kAttributeXGoogleFlag)) {
+ // Experimental attribute. Conference mode activates more aggressive
+ // AEC and NS settings.
+ // TODO: expose API to set these directly.
+ std::string flag_value;
+ if (!GetValue(line, kAttributeXGoogleFlag, &flag_value, error)) {
+ return false;
+ }
+ if (flag_value.compare(kValueConference) == 0)
+ media_desc->set_conference_mode(true);
+ } else if (HasAttribute(line, kAttributeXGoogleBufferLatency)) {
+ // Experimental attribute.
+ // TODO: expose API to set this directly.
+ std::string flag_value;
+ if (!GetValue(line, kAttributeXGoogleBufferLatency, &flag_value,
+ error)) {
+ return false;
+ }
+ int buffer_latency = 0;
+ if (!talk_base::FromString(flag_value, &buffer_latency) ||
+ buffer_latency < 0) {
+ return ParseFailed(message, "Invalid buffer latency.", error);
+ }
+ media_desc->set_buffered_mode_latency(buffer_latency);
+ }
+ } else {
+ // Only parse lines that we are interested of.
+ LOG(LS_INFO) << "Ignored line: " << line;
+ continue;
+ }
+ }
+
+ // Create tracks from the |ssrc_infos|.
+ CreateTracksFromSsrcInfos(ssrc_infos, &tracks);
+
+ // Add the ssrc group to the track.
+ for (SsrcGroupVec::iterator ssrc_group = ssrc_groups.begin();
+ ssrc_group != ssrc_groups.end(); ++ssrc_group) {
+ if (ssrc_group->ssrcs.empty()) {
+ continue;
+ }
+ uint32 ssrc = ssrc_group->ssrcs.front();
+ for (StreamParamsVec::iterator track = tracks.begin();
+ track != tracks.end(); ++track) {
+ if (track->has_ssrc(ssrc)) {
+ track->ssrc_groups.push_back(*ssrc_group);
+ }
+ }
+ }
+
+ // Add the new tracks to the |media_desc|.
+ for (StreamParamsVec::iterator track = tracks.begin();
+ track != tracks.end(); ++track) {
+ media_desc->AddStream(*track);
+ }
+
+ if (media_type == cricket::MEDIA_TYPE_AUDIO) {
+ AudioContentDescription* audio_desc =
+ static_cast<AudioContentDescription*>(media_desc);
+ // Verify audio codec ensures that no audio codec has been populated with
+ // only fmtp.
+ if (!VerifyAudioCodecs(audio_desc)) {
+ return ParseFailed("Failed to parse audio codecs correctly.", error);
+ }
+ AddAudioAttribute(kCodecParamMaxPTime, maxptime_as_string, audio_desc);
+ AddAudioAttribute(kCodecParamPTime, ptime_as_string, audio_desc);
+ }
+
+ if (media_type == cricket::MEDIA_TYPE_VIDEO) {
+ VideoContentDescription* video_desc =
+ static_cast<VideoContentDescription*>(media_desc);
+ UpdateFromWildcardVideoCodecs(video_desc);
+ // Verify video codec ensures that no video codec has been populated with
+ // only rtcp-fb.
+ if (!VerifyVideoCodecs(video_desc)) {
+ return ParseFailed("Failed to parse video codecs correctly.", error);
+ }
+ }
+
+ // RFC 5245
+ // Update the candidates with the media level "ice-pwd" and "ice-ufrag".
+ for (Candidates::iterator it = candidates_orig.begin();
+ it != candidates_orig.end(); ++it) {
+ ASSERT((*it).username().empty());
+ (*it).set_username(transport->ice_ufrag);
+ ASSERT((*it).password().empty());
+ (*it).set_password(transport->ice_pwd);
+ candidates->push_back(
+ new JsepIceCandidate(mline_id, mline_index, *it));
+ }
+ return true;
+}
+
+bool ParseSsrcAttribute(const std::string& line, SsrcInfoVec* ssrc_infos,
+ SdpParseError* error) {
+ ASSERT(ssrc_infos != NULL);
+ // RFC 5576
+ // a=ssrc:<ssrc-id> <attribute>
+ // a=ssrc:<ssrc-id> <attribute>:<value>
+ std::string field1, field2;
+ if (!SplitByDelimiter(line.substr(kLinePrefixLength),
+ kSdpDelimiterSpace,
+ &field1,
+ &field2)) {
+ const size_t expected_fields = 2;
+ return ParseFailedExpectFieldNum(line, expected_fields, error);
+ }
+
+ // ssrc:<ssrc-id>
+ std::string ssrc_id_s;
+ if (!GetValue(field1, kAttributeSsrc, &ssrc_id_s, error)) {
+ return false;
+ }
+ uint32 ssrc_id = talk_base::FromString<uint32>(ssrc_id_s);
+
+ std::string attribute;
+ std::string value;
+ if (!SplitByDelimiter(field2, kSdpDelimiterColon,
+ &attribute, &value)) {
+ std::ostringstream description;
+ description << "Failed to get the ssrc attribute value from " << field2
+ << ". Expected format <attribute>:<value>.";
+ return ParseFailed(line, description.str(), error);
+ }
+
+ // Check if there's already an item for this |ssrc_id|. Create a new one if
+ // there isn't.
+ SsrcInfoVec::iterator ssrc_info = ssrc_infos->begin();
+ for (; ssrc_info != ssrc_infos->end(); ++ssrc_info) {
+ if (ssrc_info->ssrc_id == ssrc_id) {
+ break;
+ }
+ }
+ if (ssrc_info == ssrc_infos->end()) {
+ SsrcInfo info;
+ info.ssrc_id = ssrc_id;
+ ssrc_infos->push_back(info);
+ ssrc_info = ssrc_infos->end() - 1;
+ }
+
+ // Store the info to the |ssrc_info|.
+ if (attribute == kSsrcAttributeCname) {
+ // RFC 5576
+ // cname:<value>
+ ssrc_info->cname = value;
+ } else if (attribute == kSsrcAttributeMsid) {
+ // draft-alvestrand-mmusic-msid-00
+ // "msid:" identifier [ " " appdata ]
+ std::vector<std::string> fields;
+ talk_base::split(value, kSdpDelimiterSpace, &fields);
+ if (fields.size() < 1 || fields.size() > 2) {
+ return ParseFailed(line,
+ "Expected format \"msid:<identifier>[ <appdata>]\".",
+ error);
+ }
+ ssrc_info->msid_identifier = fields[0];
+ if (fields.size() == 2) {
+ ssrc_info->msid_appdata = fields[1];
+ }
+ } else if (attribute == kSsrcAttributeMslabel) {
+ // draft-alvestrand-rtcweb-mid-01
+ // mslabel:<value>
+ ssrc_info->mslabel = value;
+ } else if (attribute == kSSrcAttributeLabel) {
+ // The label isn't defined.
+ // label:<value>
+ ssrc_info->label = value;
+ }
+ return true;
+}
+
+bool ParseSsrcGroupAttribute(const std::string& line,
+ SsrcGroupVec* ssrc_groups,
+ SdpParseError* error) {
+ ASSERT(ssrc_groups != NULL);
+ // RFC 5576
+ // a=ssrc-group:<semantics> <ssrc-id> ...
+ std::vector<std::string> fields;
+ talk_base::split(line.substr(kLinePrefixLength),
+ kSdpDelimiterSpace, &fields);
+ const size_t expected_min_fields = 2;
+ if (fields.size() < expected_min_fields) {
+ return ParseFailedExpectMinFieldNum(line, expected_min_fields, error);
+ }
+ std::string semantics;
+ if (!GetValue(fields[0], kAttributeSsrcGroup, &semantics, error)) {
+ return false;
+ }
+ std::vector<uint32> ssrcs;
+ for (size_t i = 1; i < fields.size(); ++i) {
+ uint32 ssrc = talk_base::FromString<uint32>(fields[i]);
+ ssrcs.push_back(ssrc);
+ }
+ ssrc_groups->push_back(SsrcGroup(semantics, ssrcs));
+ return true;
+}
+
+bool ParseCryptoAttribute(const std::string& line,
+ MediaContentDescription* media_desc,
+ SdpParseError* error) {
+ std::vector<std::string> fields;
+ talk_base::split(line.substr(kLinePrefixLength),
+ kSdpDelimiterSpace, &fields);
+ // RFC 4568
+ // a=crypto:<tag> <crypto-suite> <key-params> [<session-params>]
+ const size_t expected_min_fields = 3;
+ if (fields.size() < expected_min_fields) {
+ return ParseFailedExpectMinFieldNum(line, expected_min_fields, error);
+ }
+ std::string tag_value;
+ if (!GetValue(fields[0], kAttributeCrypto, &tag_value, error)) {
+ return false;
+ }
+ int tag = talk_base::FromString<int>(tag_value);
+ const std::string crypto_suite = fields[1];
+ const std::string key_params = fields[2];
+ std::string session_params;
+ if (fields.size() > 3) {
+ session_params = fields[3];
+ }
+ media_desc->AddCrypto(CryptoParams(tag, crypto_suite, key_params,
+ session_params));
+ return true;
+}
+
+// Updates or creates a new codec entry in the audio description with according
+// to |name|, |clockrate|, |bitrate|, |channels| and |preference|.
+void UpdateCodec(int payload_type, const std::string& name, int clockrate,
+ int bitrate, int channels, int preference,
+ AudioContentDescription* audio_desc) {
+ // Codec may already be populated with (only) optional parameters
+ // (from an fmtp).
+ cricket::AudioCodec codec = GetCodec(audio_desc->codecs(), payload_type);
+ codec.name = name;
+ codec.clockrate = clockrate;
+ codec.bitrate = bitrate;
+ codec.channels = channels;
+ codec.preference = preference;
+ AddOrReplaceCodec<AudioContentDescription, cricket::AudioCodec>(audio_desc,
+ codec);
+}
+
+// Updates or creates a new codec entry in the video description according to
+// |name|, |width|, |height|, |framerate| and |preference|.
+void UpdateCodec(int payload_type, const std::string& name, int width,
+ int height, int framerate, int preference,
+ VideoContentDescription* video_desc) {
+ // Codec may already be populated with (only) optional parameters
+ // (from an fmtp).
+ cricket::VideoCodec codec = GetCodec(video_desc->codecs(), payload_type);
+ codec.name = name;
+ codec.width = width;
+ codec.height = height;
+ codec.framerate = framerate;
+ codec.preference = preference;
+ AddOrReplaceCodec<VideoContentDescription, cricket::VideoCodec>(video_desc,
+ codec);
+}
+
+bool ParseRtpmapAttribute(const std::string& line,
+ const MediaType media_type,
+ const std::vector<int>& codec_preference,
+ MediaContentDescription* media_desc,
+ SdpParseError* error) {
+ std::vector<std::string> fields;
+ talk_base::split(line.substr(kLinePrefixLength),
+ kSdpDelimiterSpace, &fields);
+ // RFC 4566
+ // a=rtpmap:<payload type> <encoding name>/<clock rate>[/<encodingparameters>]
+ const size_t expected_min_fields = 2;
+ if (fields.size() < expected_min_fields) {
+ return ParseFailedExpectMinFieldNum(line, expected_min_fields, error);
+ }
+ std::string payload_type_value;
+ if (!GetValue(fields[0], kAttributeRtpmap, &payload_type_value, error)) {
+ return false;
+ }
+ const int payload_type = talk_base::FromString<int>(payload_type_value);
+
+ // Set the preference order depending on the order of the pl type in the
+ // <fmt> of the m-line.
+ const int preference = codec_preference.end() -
+ std::find(codec_preference.begin(), codec_preference.end(),
+ payload_type);
+ if (preference == 0) {
+ LOG(LS_WARNING) << "Ignore rtpmap line that did not appear in the "
+ << "<fmt> of the m-line: " << line;
+ return true;
+ }
+ const std::string encoder = fields[1];
+ std::vector<std::string> codec_params;
+ talk_base::split(encoder, '/', &codec_params);
+ // <encoding name>/<clock rate>[/<encodingparameters>]
+ // 2 mandatory fields
+ if (codec_params.size() < 2 || codec_params.size() > 3) {
+ return ParseFailed(line,
+ "Expected format \"<encoding name>/<clock rate>"
+ "[/<encodingparameters>]\".",
+ error);
+ }
+ const std::string encoding_name = codec_params[0];
+ const int clock_rate = talk_base::FromString<int>(codec_params[1]);
+ if (media_type == cricket::MEDIA_TYPE_VIDEO) {
+ VideoContentDescription* video_desc =
+ static_cast<VideoContentDescription*>(media_desc);
+ // TODO: We will send resolution in SDP. For now use
+ // JsepSessionDescription::kMaxVideoCodecWidth and kMaxVideoCodecHeight.
+ UpdateCodec(payload_type, encoding_name,
+ JsepSessionDescription::kMaxVideoCodecWidth,
+ JsepSessionDescription::kMaxVideoCodecHeight,
+ JsepSessionDescription::kDefaultVideoCodecFramerate,
+ preference, video_desc);
+ } else if (media_type == cricket::MEDIA_TYPE_AUDIO) {
+ // RFC 4566
+ // For audio streams, <encoding parameters> indicates the number
+ // of audio channels. This parameter is OPTIONAL and may be
+ // omitted if the number of channels is one, provided that no
+ // additional parameters are needed.
+ int channels = 1;
+ if (codec_params.size() == 3) {
+ channels = talk_base::FromString<int>(codec_params[2]);
+ }
+ int bitrate = 0;
+ // The default behavior for ISAC (bitrate == 0) in webrtcvoiceengine.cc
+ // (specifically FindWebRtcCodec) is bandwidth-adaptive variable bitrate.
+ // The bandwidth adaptation doesn't always work well, so this code
+ // sets a fixed target bitrate instead.
+ if (_stricmp(encoding_name.c_str(), kIsacCodecName) == 0) {
+ if (clock_rate <= 16000) {
+ bitrate = kIsacWbDefaultRate;
+ } else {
+ bitrate = kIsacSwbDefaultRate;
+ }
+ }
+ AudioContentDescription* audio_desc =
+ static_cast<AudioContentDescription*>(media_desc);
+ UpdateCodec(payload_type, encoding_name, clock_rate, bitrate, channels,
+ preference, audio_desc);
+ } else if (media_type == cricket::MEDIA_TYPE_DATA) {
+ DataContentDescription* data_desc =
+ static_cast<DataContentDescription*>(media_desc);
+ data_desc->AddCodec(cricket::DataCodec(payload_type, encoding_name,
+ preference));
+ }
+ return true;
+}
+
+void PruneRight(const char delimiter, std::string* message) {
+ size_t trailing = message->find(delimiter);
+ if (trailing != std::string::npos) {
+ *message = message->substr(0, trailing);
+ }
+}
+
+bool ParseFmtpParam(const std::string& line, std::string* parameter,
+ std::string* value, SdpParseError* error) {
+ if (!SplitByDelimiter(line, kSdpDelimiterEqual, parameter, value)) {
+ ParseFailed(line, "Unable to parse fmtp parameter. \'=\' missing.", error);
+ return false;
+ }
+ // a=fmtp:<payload_type> <param1>=<value1>; <param2>=<value2>; ...
+ // When parsing the values the trailing ";" gets picked up. Remove them.
+ PruneRight(kSdpDelimiterSemicolon, value);
+ return true;
+}
+
+bool ParseFmtpAttributes(const std::string& line, const MediaType media_type,
+ MediaContentDescription* media_desc,
+ SdpParseError* error) {
+ if (media_type != cricket::MEDIA_TYPE_AUDIO &&
+ media_type != cricket::MEDIA_TYPE_VIDEO) {
+ return true;
+ }
+ std::vector<std::string> fields;
+ talk_base::split(line.substr(kLinePrefixLength),
+ kSdpDelimiterSpace, &fields);
+
+ // RFC 5576
+ // a=fmtp:<format> <format specific parameters>
+ // At least two fields, whereas the second one is any of the optional
+ // parameters.
+ if (fields.size() < 2) {
+ ParseFailedExpectMinFieldNum(line, 2, error);
+ return false;
+ }
+
+ std::string payload_type;
+ if (!GetValue(fields[0], kAttributeFmtp, &payload_type, error)) {
+ return false;
+ }
+
+ cricket::CodecParameterMap codec_params;
+ for (std::vector<std::string>::const_iterator iter = fields.begin() + 1;
+ iter != fields.end(); ++iter) {
+ std::string name;
+ std::string value;
+ if (iter->find(kSdpDelimiterEqual) == std::string::npos) {
+ // Only fmtps with equals are currently supported. Other fmtp types
+ // should be ignored. Unknown fmtps do not constitute an error.
+ continue;
+ }
+ if (!ParseFmtpParam(*iter, &name, &value, error)) {
+ return false;
+ }
+ codec_params[name] = value;
+ }
+
+ int int_payload_type = talk_base::FromString<int>(payload_type);
+ if (media_type == cricket::MEDIA_TYPE_AUDIO) {
+ UpdateCodec<AudioContentDescription, cricket::AudioCodec>(
+ media_desc, int_payload_type, codec_params);
+ } else if (media_type == cricket::MEDIA_TYPE_VIDEO) {
+ UpdateCodec<VideoContentDescription, cricket::VideoCodec>(
+ media_desc, int_payload_type, codec_params);
+ }
+ return true;
+}
+
+bool ParseRtcpFbAttribute(const std::string& line, const MediaType media_type,
+ MediaContentDescription* media_desc,
+ SdpParseError* error) {
+ if (media_type != cricket::MEDIA_TYPE_AUDIO &&
+ media_type != cricket::MEDIA_TYPE_VIDEO) {
+ return true;
+ }
+ std::vector<std::string> rtcp_fb_fields;
+ talk_base::split(line.c_str(), kSdpDelimiterSpace, &rtcp_fb_fields);
+ if (rtcp_fb_fields.size() < 2) {
+ return ParseFailedGetValue(line, kAttributeRtcpFb, error);
+ }
+ std::string payload_type_string;
+ if (!GetValue(rtcp_fb_fields[0], kAttributeRtcpFb, &payload_type_string,
+ error)) {
+ return false;
+ }
+ int payload_type = (payload_type_string == "*") ?
+ kWildcardPayloadType : talk_base::FromString<int>(payload_type_string);
+ std::string id = rtcp_fb_fields[1];
+ std::string param = "";
+ for (std::vector<std::string>::iterator iter = rtcp_fb_fields.begin() + 2;
+ iter != rtcp_fb_fields.end(); ++iter) {
+ param.append(*iter);
+ }
+ const cricket::FeedbackParam feedback_param(id, param);
+
+ if (media_type == cricket::MEDIA_TYPE_AUDIO) {
+ UpdateCodec<AudioContentDescription, cricket::AudioCodec>(media_desc,
+ payload_type,
+ feedback_param);
+ } else if (media_type == cricket::MEDIA_TYPE_VIDEO) {
+ UpdateCodec<VideoContentDescription, cricket::VideoCodec>(media_desc,
+ payload_type,
+ feedback_param);
+ }
+ return true;
+}
+
+} // namespace webrtc
diff --git a/talk/app/webrtc/webrtcsdp.h b/talk/app/webrtc/webrtcsdp.h
new file mode 100644
index 0000000..c2f93a0
--- /dev/null
+++ b/talk/app/webrtc/webrtcsdp.h
@@ -0,0 +1,81 @@
+/*
+ * libjingle
+ * Copyright 2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// This file contain functions for parsing and serializing SDP messages.
+// Related RFC/draft including:
+// * RFC 4566 - SDP
+// * RFC 5245 - ICE
+// * RFC 3388 - Grouping of Media Lines in SDP
+// * RFC 4568 - SDP Security Descriptions for Media Streams
+// * draft-lennox-mmusic-sdp-source-selection-02 -
+// Mechanisms for Media Source Selection in SDP
+
+#ifndef TALK_APP_WEBRTC_WEBRTCSDP_H_
+#define TALK_APP_WEBRTC_WEBRTCSDP_H_
+
+#include <string>
+
+namespace webrtc {
+
+class IceCandidateInterface;
+class JsepIceCandidate;
+class JsepSessionDescription;
+struct SdpParseError;
+
+// Serializes the passed in JsepSessionDescription.
+// Serialize SessionDescription including candidates if
+// JsepSessionDescription has candidates.
+// jdesc - The JsepSessionDescription object to be serialized.
+// return - SDP string serialized from the arguments.
+std::string SdpSerialize(const JsepSessionDescription& jdesc);
+
+// Serializes the passed in IceCandidateInterface to a SDP string.
+// candidate - The candidate to be serialized.
+std::string SdpSerializeCandidate(const IceCandidateInterface& candidate);
+
+// Deserializes the passed in SDP string to a JsepSessionDescription.
+// message - SDP string to be Deserialized.
+// jdesc - The JsepSessionDescription deserialized from the SDP string.
+// error - The detail error information when parsing fails.
+// return - true on success, false on failure.
+bool SdpDeserialize(const std::string& message,
+ JsepSessionDescription* jdesc,
+ SdpParseError* error);
+
+// Deserializes the passed in SDP string to one JsepIceCandidate.
+// The first line must be a=candidate line and only the first line will be
+// parsed.
+// message - The SDP string to be Deserialized.
+// candidates - The JsepIceCandidate from the SDP string.
+// error - The detail error information when parsing fails.
+// return - true on success, false on failure.
+bool SdpDeserializeCandidate(const std::string& message,
+ JsepIceCandidate* candidate,
+ SdpParseError* error);
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_WEBRTCSDP_H_
diff --git a/talk/app/webrtc/webrtcsdp_unittest.cc b/talk/app/webrtc/webrtcsdp_unittest.cc
new file mode 100644
index 0000000..9c3debd
--- /dev/null
+++ b/talk/app/webrtc/webrtcsdp_unittest.cc
@@ -0,0 +1,1961 @@
+/*
+ * libjingle
+ * Copyright 2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <set>
+#include <string>
+#include <vector>
+
+#include "talk/app/webrtc/jsepsessiondescription.h"
+#include "talk/app/webrtc/webrtcsdp.h"
+#include "talk/base/gunit.h"
+#include "talk/base/logging.h"
+#include "talk/base/messagedigest.h"
+#include "talk/base/scoped_ptr.h"
+#include "talk/base/sslfingerprint.h"
+#include "talk/base/stringencode.h"
+#include "talk/base/stringutils.h"
+#include "talk/media/base/constants.h"
+#include "talk/p2p/base/constants.h"
+#include "talk/session/media/mediasession.h"
+
+using cricket::AudioCodec;
+using cricket::AudioContentDescription;
+using cricket::Candidate;
+using cricket::ContentInfo;
+using cricket::CryptoParams;
+using cricket::ContentGroup;
+using cricket::DataCodec;
+using cricket::DataContentDescription;
+using cricket::ICE_CANDIDATE_COMPONENT_RTCP;
+using cricket::ICE_CANDIDATE_COMPONENT_RTP;
+using cricket::kFecSsrcGroupSemantics;
+using cricket::LOCAL_PORT_TYPE;
+using cricket::NS_JINGLE_DRAFT_SCTP;
+using cricket::NS_JINGLE_ICE_UDP;
+using cricket::NS_JINGLE_RTP;
+using cricket::RtpHeaderExtension;
+using cricket::RELAY_PORT_TYPE;
+using cricket::SessionDescription;
+using cricket::StreamParams;
+using cricket::STUN_PORT_TYPE;
+using cricket::TransportDescription;
+using cricket::TransportInfo;
+using cricket::VideoCodec;
+using cricket::VideoContentDescription;
+using webrtc::IceCandidateCollection;
+using webrtc::IceCandidateInterface;
+using webrtc::JsepIceCandidate;
+using webrtc::JsepSessionDescription;
+using webrtc::SdpParseError;
+using webrtc::SessionDescriptionInterface;
+
+typedef std::vector<AudioCodec> AudioCodecs;
+typedef std::vector<Candidate> Candidates;
+
+static const char kSessionTime[] = "t=0 0\r\n";
+static const uint32 kCandidatePriority = 2130706432U; // pref = 1.0
+static const char kCandidateUfragVoice[] = "ufrag_voice";
+static const char kCandidatePwdVoice[] = "pwd_voice";
+static const char kAttributeIcePwdVoice[] = "a=ice-pwd:pwd_voice\r\n";
+static const char kCandidateUfragVideo[] = "ufrag_video";
+static const char kCandidatePwdVideo[] = "pwd_video";
+static const char kCandidateUfragData[] = "ufrag_data";
+static const char kCandidatePwdData[] = "pwd_data";
+static const char kAttributeIcePwdVideo[] = "a=ice-pwd:pwd_video\r\n";
+static const uint32 kCandidateGeneration = 2;
+static const char kCandidateFoundation1[] = "a0+B/1";
+static const char kCandidateFoundation2[] = "a0+B/2";
+static const char kCandidateFoundation3[] = "a0+B/3";
+static const char kCandidateFoundation4[] = "a0+B/4";
+static const char kAttributeCryptoVoice[] =
+ "a=crypto:1 AES_CM_128_HMAC_SHA1_32 "
+ "inline:NzB4d1BINUAvLEw6UzF3WSJ+PSdFcGdUJShpX1Zj|2^20|1:32 "
+ "dummy_session_params\r\n";
+static const char kAttributeCryptoVideo[] =
+ "a=crypto:1 AES_CM_128_HMAC_SHA1_80 "
+ "inline:d0RmdmcmVCspeEc3QGZiNWpVLFJhQX1cfHAwJSoj|2^20|1:32\r\n";
+static const char kFingerprint[] = "a=fingerprint:sha-1 "
+ "4A:AD:B9:B1:3F:82:18:3B:54:02:12:DF:3E:5D:49:6B:19:E5:7C:AB\r\n";
+static const int kExtmapId = 1;
+static const char kExtmapUri[] = "http://example.com/082005/ext.htm#ttime";
+static const char kExtmap[] =
+ "a=extmap:1 http://example.com/082005/ext.htm#ttime\r\n";
+static const char kExtmapWithDirectionAndAttribute[] =
+ "a=extmap:1/sendrecv http://example.com/082005/ext.htm#ttime a1 a2\r\n";
+
+static const uint8 kIdentityDigest[] = {0x4A, 0xAD, 0xB9, 0xB1,
+ 0x3F, 0x82, 0x18, 0x3B,
+ 0x54, 0x02, 0x12, 0xDF,
+ 0x3E, 0x5D, 0x49, 0x6B,
+ 0x19, 0xE5, 0x7C, 0xAB};
+
+struct CodecParams {
+ int max_ptime;
+ int ptime;
+ int min_ptime;
+ int sprop_stereo;
+ int stereo;
+ int useinband;
+};
+
+// Reference sdp string
+static const char kSdpFullString[] =
+ "v=0\r\n"
+ "o=- 18446744069414584320 18446462598732840960 IN IP4 127.0.0.1\r\n"
+ "s=-\r\n"
+ "t=0 0\r\n"
+ "a=msid-semantic: WMS local_stream_1 local_stream_2\r\n"
+ "m=audio 2345 RTP/SAVPF 111 103 104\r\n"
+ "c=IN IP4 74.125.127.126\r\n"
+ "a=rtcp:2347 IN IP4 74.125.127.126\r\n"
+ "a=candidate:a0+B/1 1 udp 2130706432 192.168.1.5 1234 typ host "
+ "generation 2\r\n"
+ "a=candidate:a0+B/1 2 udp 2130706432 192.168.1.5 1235 typ host "
+ "generation 2\r\n"
+ "a=candidate:a0+B/2 1 udp 2130706432 ::1 1238 typ host "
+ "generation 2\r\n"
+ "a=candidate:a0+B/2 2 udp 2130706432 ::1 1239 typ host "
+ "generation 2\r\n"
+ "a=candidate:a0+B/3 1 udp 2130706432 74.125.127.126 2345 typ srflx "
+ "raddr 192.168.1.5 rport 2346 "
+ "generation 2\r\n"
+ "a=candidate:a0+B/3 2 udp 2130706432 74.125.127.126 2347 typ srflx "
+ "raddr 192.168.1.5 rport 2348 "
+ "generation 2\r\n"
+ "a=ice-ufrag:ufrag_voice\r\na=ice-pwd:pwd_voice\r\n"
+ "a=mid:audio_content_name\r\n"
+ "a=sendrecv\r\n"
+ "a=rtcp-mux\r\n"
+ "a=crypto:1 AES_CM_128_HMAC_SHA1_32 "
+ "inline:NzB4d1BINUAvLEw6UzF3WSJ+PSdFcGdUJShpX1Zj|2^20|1:32 "
+ "dummy_session_params\r\n"
+ "a=rtpmap:111 opus/48000/2\r\n"
+ "a=rtpmap:103 ISAC/16000\r\n"
+ "a=rtpmap:104 CELT/32000/2\r\n"
+ "a=ssrc:1 cname:stream_1_cname\r\n"
+ "a=ssrc:1 msid:local_stream_1 audio_track_id_1\r\n"
+ "a=ssrc:1 mslabel:local_stream_1\r\n"
+ "a=ssrc:1 label:audio_track_id_1\r\n"
+ "a=ssrc:4 cname:stream_2_cname\r\n"
+ "a=ssrc:4 msid:local_stream_2 audio_track_id_2\r\n"
+ "a=ssrc:4 mslabel:local_stream_2\r\n"
+ "a=ssrc:4 label:audio_track_id_2\r\n"
+ "m=video 3457 RTP/SAVPF 120\r\n"
+ "c=IN IP4 74.125.224.39\r\n"
+ "a=rtcp:3456 IN IP4 74.125.224.39\r\n"
+ "a=candidate:a0+B/1 2 udp 2130706432 192.168.1.5 1236 typ host "
+ "generation 2\r\n"
+ "a=candidate:a0+B/1 1 udp 2130706432 192.168.1.5 1237 typ host "
+ "generation 2\r\n"
+ "a=candidate:a0+B/2 2 udp 2130706432 ::1 1240 typ host "
+ "generation 2\r\n"
+ "a=candidate:a0+B/2 1 udp 2130706432 ::1 1241 typ host "
+ "generation 2\r\n"
+ "a=candidate:a0+B/4 2 udp 2130706432 74.125.224.39 3456 typ relay "
+ "generation 2\r\n"
+ "a=candidate:a0+B/4 1 udp 2130706432 74.125.224.39 3457 typ relay "
+ "generation 2\r\n"
+ "a=ice-ufrag:ufrag_video\r\na=ice-pwd:pwd_video\r\n"
+ "a=mid:video_content_name\r\n"
+ "a=sendrecv\r\n"
+ "a=crypto:1 AES_CM_128_HMAC_SHA1_80 "
+ "inline:d0RmdmcmVCspeEc3QGZiNWpVLFJhQX1cfHAwJSoj|2^20|1:32\r\n"
+ "a=rtpmap:120 VP8/90000\r\n"
+ "a=ssrc:2 cname:stream_1_cname\r\n"
+ "a=ssrc:2 msid:local_stream_1 video_track_id_1\r\n"
+ "a=ssrc:2 mslabel:local_stream_1\r\n"
+ "a=ssrc:2 label:video_track_id_1\r\n"
+ "a=ssrc:3 cname:stream_1_cname\r\n"
+ "a=ssrc:3 msid:local_stream_1 video_track_id_2\r\n"
+ "a=ssrc:3 mslabel:local_stream_1\r\n"
+ "a=ssrc:3 label:video_track_id_2\r\n"
+ "a=ssrc-group:FEC 5 6\r\n"
+ "a=ssrc:5 cname:stream_2_cname\r\n"
+ "a=ssrc:5 msid:local_stream_2 video_track_id_3\r\n"
+ "a=ssrc:5 mslabel:local_stream_2\r\n"
+ "a=ssrc:5 label:video_track_id_3\r\n"
+ "a=ssrc:6 cname:stream_2_cname\r\n"
+ "a=ssrc:6 msid:local_stream_2 video_track_id_3\r\n"
+ "a=ssrc:6 mslabel:local_stream_2\r\n"
+ "a=ssrc:6 label:video_track_id_3\r\n";
+
+// SDP reference string without the candidates.
+static const char kSdpString[] =
+ "v=0\r\n"
+ "o=- 18446744069414584320 18446462598732840960 IN IP4 127.0.0.1\r\n"
+ "s=-\r\n"
+ "t=0 0\r\n"
+ "a=msid-semantic: WMS local_stream_1 local_stream_2\r\n"
+ "m=audio 1 RTP/SAVPF 111 103 104\r\n"
+ "c=IN IP4 0.0.0.0\r\n"
+ "a=rtcp:1 IN IP4 0.0.0.0\r\n"
+ "a=ice-ufrag:ufrag_voice\r\na=ice-pwd:pwd_voice\r\n"
+ "a=mid:audio_content_name\r\n"
+ "a=sendrecv\r\n"
+ "a=rtcp-mux\r\n"
+ "a=crypto:1 AES_CM_128_HMAC_SHA1_32 "
+ "inline:NzB4d1BINUAvLEw6UzF3WSJ+PSdFcGdUJShpX1Zj|2^20|1:32 "
+ "dummy_session_params\r\n"
+ "a=rtpmap:111 opus/48000/2\r\n"
+ "a=rtpmap:103 ISAC/16000\r\n"
+ "a=rtpmap:104 CELT/32000/2\r\n"
+ "a=ssrc:1 cname:stream_1_cname\r\n"
+ "a=ssrc:1 msid:local_stream_1 audio_track_id_1\r\n"
+ "a=ssrc:1 mslabel:local_stream_1\r\n"
+ "a=ssrc:1 label:audio_track_id_1\r\n"
+ "a=ssrc:4 cname:stream_2_cname\r\n"
+ "a=ssrc:4 msid:local_stream_2 audio_track_id_2\r\n"
+ "a=ssrc:4 mslabel:local_stream_2\r\n"
+ "a=ssrc:4 label:audio_track_id_2\r\n"
+ "m=video 1 RTP/SAVPF 120\r\n"
+ "c=IN IP4 0.0.0.0\r\n"
+ "a=rtcp:1 IN IP4 0.0.0.0\r\n"
+ "a=ice-ufrag:ufrag_video\r\na=ice-pwd:pwd_video\r\n"
+ "a=mid:video_content_name\r\n"
+ "a=sendrecv\r\n"
+ "a=crypto:1 AES_CM_128_HMAC_SHA1_80 "
+ "inline:d0RmdmcmVCspeEc3QGZiNWpVLFJhQX1cfHAwJSoj|2^20|1:32\r\n"
+ "a=rtpmap:120 VP8/90000\r\n"
+ "a=ssrc:2 cname:stream_1_cname\r\n"
+ "a=ssrc:2 msid:local_stream_1 video_track_id_1\r\n"
+ "a=ssrc:2 mslabel:local_stream_1\r\n"
+ "a=ssrc:2 label:video_track_id_1\r\n"
+ "a=ssrc:3 cname:stream_1_cname\r\n"
+ "a=ssrc:3 msid:local_stream_1 video_track_id_2\r\n"
+ "a=ssrc:3 mslabel:local_stream_1\r\n"
+ "a=ssrc:3 label:video_track_id_2\r\n"
+ "a=ssrc-group:FEC 5 6\r\n"
+ "a=ssrc:5 cname:stream_2_cname\r\n"
+ "a=ssrc:5 msid:local_stream_2 video_track_id_3\r\n"
+ "a=ssrc:5 mslabel:local_stream_2\r\n"
+ "a=ssrc:5 label:video_track_id_3\r\n"
+ "a=ssrc:6 cname:stream_2_cname\r\n"
+ "a=ssrc:6 msid:local_stream_2 video_track_id_3\r\n"
+ "a=ssrc:6 mslabel:local_stream_2\r\n"
+ "a=ssrc:6 label:video_track_id_3\r\n";
+
+static const char kSdpRtpDataChannelString[] =
+ "m=application 1 RTP/SAVPF 101\r\n"
+ "c=IN IP4 0.0.0.0\r\n"
+ "a=rtcp:1 IN IP4 0.0.0.0\r\n"
+ "a=ice-ufrag:ufrag_data\r\n"
+ "a=ice-pwd:pwd_data\r\n"
+ "a=mid:data_content_name\r\n"
+ "a=sendrecv\r\n"
+ "a=crypto:1 AES_CM_128_HMAC_SHA1_80 "
+ "inline:FvLcvU2P3ZWmQxgPAgcDu7Zl9vftYElFOjEzhWs5\r\n"
+ "a=rtpmap:101 google-data/90000\r\n"
+ "a=ssrc:10 cname:data_channel_cname\r\n"
+ "a=ssrc:10 msid:data_channel data_channeld0\r\n"
+ "a=ssrc:10 mslabel:data_channel\r\n"
+ "a=ssrc:10 label:data_channeld0\r\n";
+
+static const char kSdpSctpDataChannelString[] =
+ "m=application 1 DTLS/SCTP 5000\r\n"
+ "c=IN IP4 0.0.0.0\r\n"
+ "a=ice-ufrag:ufrag_data\r\n"
+ "a=ice-pwd:pwd_data\r\n"
+ "a=mid:data_content_name\r\n"
+ "a=fmtp:5000 protocol=webrtc-datachannel; streams=10\r\n";
+
+static const char kSdpSctpDataChannelWithCandidatesString[] =
+ "m=application 2345 DTLS/SCTP 5000\r\n"
+ "c=IN IP4 74.125.127.126\r\n"
+ "a=candidate:a0+B/1 1 udp 2130706432 192.168.1.5 1234 typ host "
+ "generation 2\r\n"
+ "a=candidate:a0+B/2 1 udp 2130706432 ::1 1238 typ host "
+ "generation 2\r\n"
+ "a=candidate:a0+B/3 1 udp 2130706432 74.125.127.126 2345 typ srflx "
+ "raddr 192.168.1.5 rport 2346 "
+ "generation 2\r\n"
+ "a=ice-ufrag:ufrag_data\r\n"
+ "a=ice-pwd:pwd_data\r\n"
+ "a=mid:data_content_name\r\n"
+ "a=fmtp:5000 protocol=webrtc-datachannel; streams=10\r\n";
+
+
+// One candidate reference string as per W3c spec.
+// candidate:<blah> not a=candidate:<blah>CRLF
+static const char kRawCandidate[] =
+ "candidate:a0+B/1 1 udp 2130706432 192.168.1.5 1234 typ host generation 2";
+// One candidate reference string.
+static const char kSdpOneCandidate[] =
+ "a=candidate:a0+B/1 1 udp 2130706432 192.168.1.5 1234 typ host "
+ "generation 2\r\n";
+
+// One candidate reference string.
+static const char kSdpOneCandidateOldFormat[] =
+ "a=candidate:a0+B/1 1 udp 2130706432 192.168.1.5 1234 typ host network_name"
+ " eth0 username user_rtp password password_rtp generation 2\r\n";
+
+// Session id and version
+static const char kSessionId[] = "18446744069414584320";
+static const char kSessionVersion[] = "18446462598732840960";
+
+// Ice options
+static const char kIceOption1[] = "iceoption1";
+static const char kIceOption2[] = "iceoption2";
+static const char kIceOption3[] = "iceoption3";
+
+// Content name
+static const char kAudioContentName[] = "audio_content_name";
+static const char kVideoContentName[] = "video_content_name";
+static const char kDataContentName[] = "data_content_name";
+
+// MediaStream 1
+static const char kStreamLabel1[] = "local_stream_1";
+static const char kStream1Cname[] = "stream_1_cname";
+static const char kAudioTrackId1[] = "audio_track_id_1";
+static const uint32 kAudioTrack1Ssrc = 1;
+static const char kVideoTrackId1[] = "video_track_id_1";
+static const uint32 kVideoTrack1Ssrc = 2;
+static const char kVideoTrackId2[] = "video_track_id_2";
+static const uint32 kVideoTrack2Ssrc = 3;
+
+// MediaStream 2
+static const char kStreamLabel2[] = "local_stream_2";
+static const char kStream2Cname[] = "stream_2_cname";
+static const char kAudioTrackId2[] = "audio_track_id_2";
+static const uint32 kAudioTrack2Ssrc = 4;
+static const char kVideoTrackId3[] = "video_track_id_3";
+static const uint32 kVideoTrack3Ssrc = 5;
+static const uint32 kVideoTrack4Ssrc = 6;
+
+// DataChannel
+static const char kDataChannelLabel[] = "data_channel";
+static const char kDataChannelMsid[] = "data_channeld0";
+static const char kDataChannelCname[] = "data_channel_cname";
+static const uint32 kDataChannelSsrc = 10;
+
+// Candidate
+static const char kDummyMid[] = "dummy_mid";
+static const int kDummyIndex = 123;
+
+// Misc
+static const char kDummyString[] = "dummy";
+
+// Helper functions
+
+static bool SdpDeserialize(const std::string& message,
+ JsepSessionDescription* jdesc) {
+ return webrtc::SdpDeserialize(message, jdesc, NULL);
+}
+
+static bool SdpDeserializeCandidate(const std::string& message,
+ JsepIceCandidate* candidate) {
+ return webrtc::SdpDeserializeCandidate(message, candidate, NULL);
+}
+
+// Add some extra |newlines| to the |message| after |line|.
+static void InjectAfter(const std::string& line,
+ const std::string& newlines,
+ std::string* message) {
+ const std::string tmp = line + newlines;
+ talk_base::replace_substrs(line.c_str(), line.length(),
+ tmp.c_str(), tmp.length(), message);
+}
+
+static void Replace(const std::string& line,
+ const std::string& newlines,
+ std::string* message) {
+ talk_base::replace_substrs(line.c_str(), line.length(),
+ newlines.c_str(), newlines.length(), message);
+}
+
+static void ReplaceAndTryToParse(const char* search, const char* replace) {
+ JsepSessionDescription desc(kDummyString);
+ std::string sdp = kSdpFullString;
+ Replace(search, replace, &sdp);
+ SdpParseError error;
+ bool ret = webrtc::SdpDeserialize(sdp, &desc, &error);
+ EXPECT_FALSE(ret);
+ EXPECT_NE(std::string::npos, error.line.find(replace));
+}
+
+static void ReplaceDirection(cricket::MediaContentDirection direction,
+ std::string* message) {
+ std::string new_direction;
+ switch (direction) {
+ case cricket::MD_INACTIVE:
+ new_direction = "a=inactive";
+ break;
+ case cricket::MD_SENDONLY:
+ new_direction = "a=sendonly";
+ break;
+ case cricket::MD_RECVONLY:
+ new_direction = "a=recvonly";
+ break;
+ case cricket::MD_SENDRECV:
+ default:
+ new_direction = "a=sendrecv";
+ break;
+ }
+ Replace("a=sendrecv", new_direction, message);
+}
+
+static void ReplaceRejected(bool audio_rejected, bool video_rejected,
+ std::string* message) {
+ if (audio_rejected) {
+ Replace("m=audio 2345", "m=audio 0", message);
+ }
+ if (video_rejected) {
+ Replace("m=video 3457", "m=video 0", message);
+ }
+}
+
+// WebRtcSdpTest
+
+class WebRtcSdpTest : public testing::Test {
+ public:
+ WebRtcSdpTest()
+ : jdesc_(kDummyString) {
+ // AudioContentDescription
+ audio_desc_ = CreateAudioContentDescription();
+ AudioCodec opus(111, "opus", 48000, 0, 2, 3);
+ audio_desc_->AddCodec(opus);
+ audio_desc_->AddCodec(AudioCodec(103, "ISAC", 16000, 32000, 1, 2));
+ audio_desc_->AddCodec(AudioCodec(104, "CELT", 32000, 0, 2, 1));
+ desc_.AddContent(kAudioContentName, NS_JINGLE_RTP, audio_desc_);
+
+ // VideoContentDescription
+ talk_base::scoped_ptr<VideoContentDescription> video(
+ new VideoContentDescription());
+ video_desc_ = video.get();
+ StreamParams video_stream1;
+ video_stream1.id = kVideoTrackId1;
+ video_stream1.cname = kStream1Cname;
+ video_stream1.sync_label = kStreamLabel1;
+ video_stream1.ssrcs.push_back(kVideoTrack1Ssrc);
+ video->AddStream(video_stream1);
+ StreamParams video_stream2;
+ video_stream2.id = kVideoTrackId2;
+ video_stream2.cname = kStream1Cname;
+ video_stream2.sync_label = kStreamLabel1;
+ video_stream2.ssrcs.push_back(kVideoTrack2Ssrc);
+ video->AddStream(video_stream2);
+ StreamParams video_stream3;
+ video_stream3.id = kVideoTrackId3;
+ video_stream3.cname = kStream2Cname;
+ video_stream3.sync_label = kStreamLabel2;
+ video_stream3.ssrcs.push_back(kVideoTrack3Ssrc);
+ video_stream3.ssrcs.push_back(kVideoTrack4Ssrc);
+ cricket::SsrcGroup ssrc_group(kFecSsrcGroupSemantics, video_stream3.ssrcs);
+ video_stream3.ssrc_groups.push_back(ssrc_group);
+ video->AddStream(video_stream3);
+ video->AddCrypto(CryptoParams(1, "AES_CM_128_HMAC_SHA1_80",
+ "inline:d0RmdmcmVCspeEc3QGZiNWpVLFJhQX1cfHAwJSoj|2^20|1:32", ""));
+ video->set_protocol(cricket::kMediaProtocolSavpf);
+ video->AddCodec(VideoCodec(
+ 120,
+ JsepSessionDescription::kDefaultVideoCodecName,
+ JsepSessionDescription::kMaxVideoCodecWidth,
+ JsepSessionDescription::kMaxVideoCodecHeight,
+ JsepSessionDescription::kDefaultVideoCodecFramerate,
+ JsepSessionDescription::kDefaultVideoCodecPreference));
+
+ desc_.AddContent(kVideoContentName, NS_JINGLE_RTP,
+ video.release());
+
+ // TransportInfo
+ EXPECT_TRUE(desc_.AddTransportInfo(
+ TransportInfo(kAudioContentName,
+ TransportDescription(NS_JINGLE_ICE_UDP,
+ std::vector<std::string>(),
+ kCandidateUfragVoice,
+ kCandidatePwdVoice,
+ cricket::ICEMODE_FULL,
+ NULL, Candidates()))));
+ EXPECT_TRUE(desc_.AddTransportInfo(
+ TransportInfo(kVideoContentName,
+ TransportDescription(NS_JINGLE_ICE_UDP,
+ std::vector<std::string>(),
+ kCandidateUfragVideo,
+ kCandidatePwdVideo,
+ cricket::ICEMODE_FULL,
+ NULL, Candidates()))));
+
+ // v4 host
+ int port = 1234;
+ talk_base::SocketAddress address("192.168.1.5", port++);
+ Candidate candidate1(
+ "", ICE_CANDIDATE_COMPONENT_RTP, "udp", address, kCandidatePriority,
+ "", "", LOCAL_PORT_TYPE,
+ "", kCandidateGeneration, kCandidateFoundation1);
+ address.SetPort(port++);
+ Candidate candidate2(
+ "", ICE_CANDIDATE_COMPONENT_RTCP, "udp", address, kCandidatePriority,
+ "", "", LOCAL_PORT_TYPE,
+ "", kCandidateGeneration, kCandidateFoundation1);
+ address.SetPort(port++);
+ Candidate candidate3(
+ "", ICE_CANDIDATE_COMPONENT_RTCP, "udp", address, kCandidatePriority,
+ "", "", LOCAL_PORT_TYPE,
+ "", kCandidateGeneration, kCandidateFoundation1);
+ address.SetPort(port++);
+ Candidate candidate4(
+ "", ICE_CANDIDATE_COMPONENT_RTP, "udp", address, kCandidatePriority,
+ "", "", LOCAL_PORT_TYPE,
+ "", kCandidateGeneration, kCandidateFoundation1);
+
+ // v6 host
+ talk_base::SocketAddress v6_address("::1", port++);
+ cricket::Candidate candidate5(
+ "", cricket::ICE_CANDIDATE_COMPONENT_RTP,
+ "udp", v6_address, kCandidatePriority,
+ "", "", cricket::LOCAL_PORT_TYPE,
+ "", kCandidateGeneration, kCandidateFoundation2);
+ v6_address.SetPort(port++);
+ cricket::Candidate candidate6(
+ "", cricket::ICE_CANDIDATE_COMPONENT_RTCP,
+ "udp", v6_address, kCandidatePriority,
+ "", "", cricket::LOCAL_PORT_TYPE,
+ "", kCandidateGeneration, kCandidateFoundation2);
+ v6_address.SetPort(port++);
+ cricket::Candidate candidate7(
+ "", cricket::ICE_CANDIDATE_COMPONENT_RTCP,
+ "udp", v6_address, kCandidatePriority,
+ "", "", cricket::LOCAL_PORT_TYPE,
+ "", kCandidateGeneration, kCandidateFoundation2);
+ v6_address.SetPort(port++);
+ cricket::Candidate candidate8(
+ "", cricket::ICE_CANDIDATE_COMPONENT_RTP,
+ "udp", v6_address, kCandidatePriority,
+ "", "", cricket::LOCAL_PORT_TYPE,
+ "", kCandidateGeneration, kCandidateFoundation2);
+
+ // stun
+ int port_stun = 2345;
+ talk_base::SocketAddress address_stun("74.125.127.126", port_stun++);
+ talk_base::SocketAddress rel_address_stun("192.168.1.5", port_stun++);
+ cricket::Candidate candidate9
+ ("", cricket::ICE_CANDIDATE_COMPONENT_RTP,
+ "udp", address_stun, kCandidatePriority,
+ "", "", STUN_PORT_TYPE,
+ "", kCandidateGeneration, kCandidateFoundation3);
+ candidate9.set_related_address(rel_address_stun);
+
+ address_stun.SetPort(port_stun++);
+ rel_address_stun.SetPort(port_stun++);
+ cricket::Candidate candidate10(
+ "", cricket::ICE_CANDIDATE_COMPONENT_RTCP,
+ "udp", address_stun, kCandidatePriority,
+ "", "", STUN_PORT_TYPE,
+ "", kCandidateGeneration, kCandidateFoundation3);
+ candidate10.set_related_address(rel_address_stun);
+
+ // relay
+ int port_relay = 3456;
+ talk_base::SocketAddress address_relay("74.125.224.39", port_relay++);
+ cricket::Candidate candidate11(
+ "", cricket::ICE_CANDIDATE_COMPONENT_RTCP,
+ "udp", address_relay, kCandidatePriority,
+ "", "",
+ cricket::RELAY_PORT_TYPE, "",
+ kCandidateGeneration, kCandidateFoundation4);
+ address_relay.SetPort(port_relay++);
+ cricket::Candidate candidate12(
+ "", cricket::ICE_CANDIDATE_COMPONENT_RTP,
+ "udp", address_relay, kCandidatePriority,
+ "", "",
+ RELAY_PORT_TYPE, "",
+ kCandidateGeneration, kCandidateFoundation4);
+
+ // voice
+ candidates_.push_back(candidate1);
+ candidates_.push_back(candidate2);
+ candidates_.push_back(candidate5);
+ candidates_.push_back(candidate6);
+ candidates_.push_back(candidate9);
+ candidates_.push_back(candidate10);
+
+ // video
+ candidates_.push_back(candidate3);
+ candidates_.push_back(candidate4);
+ candidates_.push_back(candidate7);
+ candidates_.push_back(candidate8);
+ candidates_.push_back(candidate11);
+ candidates_.push_back(candidate12);
+
+ jcandidate_.reset(new JsepIceCandidate(std::string("audio_content_name"),
+ 0, candidate1));
+
+ // Set up JsepSessionDescription.
+ jdesc_.Initialize(desc_.Copy(), kSessionId, kSessionVersion);
+ std::string mline_id;
+ int mline_index = 0;
+ for (size_t i = 0; i< candidates_.size(); ++i) {
+ // In this test, the audio m line index will be 0, and the video m line
+ // will be 1.
+ bool is_video = (i > 5);
+ mline_id = is_video ? "video_content_name" : "audio_content_name";
+ mline_index = is_video ? 1 : 0;
+ JsepIceCandidate jice(mline_id,
+ mline_index,
+ candidates_.at(i));
+ jdesc_.AddCandidate(&jice);
+ }
+ }
+
+ AudioContentDescription* CreateAudioContentDescription() {
+ AudioContentDescription* audio = new AudioContentDescription();
+ audio->set_rtcp_mux(true);
+ StreamParams audio_stream1;
+ audio_stream1.id = kAudioTrackId1;
+ audio_stream1.cname = kStream1Cname;
+ audio_stream1.sync_label = kStreamLabel1;
+ audio_stream1.ssrcs.push_back(kAudioTrack1Ssrc);
+ audio->AddStream(audio_stream1);
+ StreamParams audio_stream2;
+ audio_stream2.id = kAudioTrackId2;
+ audio_stream2.cname = kStream2Cname;
+ audio_stream2.sync_label = kStreamLabel2;
+ audio_stream2.ssrcs.push_back(kAudioTrack2Ssrc);
+ audio->AddStream(audio_stream2);
+ audio->AddCrypto(CryptoParams(1, "AES_CM_128_HMAC_SHA1_32",
+ "inline:NzB4d1BINUAvLEw6UzF3WSJ+PSdFcGdUJShpX1Zj|2^20|1:32",
+ "dummy_session_params"));
+ audio->set_protocol(cricket::kMediaProtocolSavpf);
+ return audio;
+ }
+
+ template <class MCD>
+ void CompareMediaContentDescription(const MCD* cd1,
+ const MCD* cd2) {
+ // type
+ EXPECT_EQ(cd1->type(), cd1->type());
+
+ // content direction
+ EXPECT_EQ(cd1->direction(), cd2->direction());
+
+ // rtcp_mux
+ EXPECT_EQ(cd1->rtcp_mux(), cd2->rtcp_mux());
+
+ // cryptos
+ EXPECT_EQ(cd1->cryptos().size(), cd2->cryptos().size());
+ if (cd1->cryptos().size() != cd2->cryptos().size()) {
+ ADD_FAILURE();
+ return;
+ }
+ for (size_t i = 0; i< cd1->cryptos().size(); ++i) {
+ const CryptoParams c1 = cd1->cryptos().at(i);
+ const CryptoParams c2 = cd2->cryptos().at(i);
+ EXPECT_TRUE(c1.Matches(c2));
+ EXPECT_EQ(c1.key_params, c2.key_params);
+ EXPECT_EQ(c1.session_params, c2.session_params);
+ }
+ // protocol
+ EXPECT_EQ(cd1->protocol(), cd2->protocol());
+
+ // codecs
+ EXPECT_EQ(cd1->codecs(), cd2->codecs());
+
+ // bandwidth
+ EXPECT_EQ(cd1->bandwidth(), cd2->bandwidth());
+
+ // streams
+ EXPECT_EQ(cd1->streams(), cd2->streams());
+
+ // extmap
+ ASSERT_EQ(cd1->rtp_header_extensions().size(),
+ cd2->rtp_header_extensions().size());
+ for (size_t i = 0; i< cd1->rtp_header_extensions().size(); ++i) {
+ const RtpHeaderExtension ext1 = cd1->rtp_header_extensions().at(i);
+ const RtpHeaderExtension ext2 = cd2->rtp_header_extensions().at(i);
+ EXPECT_EQ(ext1.uri, ext2.uri);
+ EXPECT_EQ(ext1.id, ext2.id);
+ }
+
+ // buffered mode latency
+ EXPECT_EQ(cd1->buffered_mode_latency(), cd2->buffered_mode_latency());
+ }
+
+
+ void CompareSessionDescription(const SessionDescription& desc1,
+ const SessionDescription& desc2) {
+ // Compare content descriptions.
+ if (desc1.contents().size() != desc2.contents().size()) {
+ ADD_FAILURE();
+ return;
+ }
+ for (size_t i = 0 ; i < desc1.contents().size(); ++i) {
+ const cricket::ContentInfo& c1 = desc1.contents().at(i);
+ const cricket::ContentInfo& c2 = desc2.contents().at(i);
+ // content name
+ EXPECT_EQ(c1.name, c2.name);
+ // content type
+ // Note, ASSERT will return from the function, but will not stop the test.
+ ASSERT_EQ(c1.type, c2.type);
+
+ ASSERT_EQ(IsAudioContent(&c1), IsAudioContent(&c2));
+ if (IsAudioContent(&c1)) {
+ const AudioContentDescription* acd1 =
+ static_cast<const AudioContentDescription*>(c1.description);
+ const AudioContentDescription* acd2 =
+ static_cast<const AudioContentDescription*>(c2.description);
+ CompareMediaContentDescription<AudioContentDescription>(acd1, acd2);
+ }
+
+ ASSERT_EQ(IsVideoContent(&c1), IsVideoContent(&c2));
+ if (IsVideoContent(&c1)) {
+ const VideoContentDescription* vcd1 =
+ static_cast<const VideoContentDescription*>(c1.description);
+ const VideoContentDescription* vcd2 =
+ static_cast<const VideoContentDescription*>(c2.description);
+ CompareMediaContentDescription<VideoContentDescription>(vcd1, vcd2);
+ }
+
+ ASSERT_EQ(IsDataContent(&c1), IsDataContent(&c2));
+ if (IsDataContent(&c1)) {
+ const DataContentDescription* dcd1 =
+ static_cast<const DataContentDescription*>(c1.description);
+ const DataContentDescription* dcd2 =
+ static_cast<const DataContentDescription*>(c2.description);
+ CompareMediaContentDescription<DataContentDescription>(dcd1, dcd2);
+ }
+ }
+
+ // group
+ const cricket::ContentGroups groups1 = desc1.groups();
+ const cricket::ContentGroups groups2 = desc2.groups();
+ EXPECT_EQ(groups1.size(), groups1.size());
+ if (groups1.size() != groups2.size()) {
+ ADD_FAILURE();
+ return;
+ }
+ for (size_t i = 0; i < groups1.size(); ++i) {
+ const cricket::ContentGroup group1 = groups1.at(i);
+ const cricket::ContentGroup group2 = groups2.at(i);
+ EXPECT_EQ(group1.semantics(), group2.semantics());
+ const cricket::ContentNames names1 = group1.content_names();
+ const cricket::ContentNames names2 = group2.content_names();
+ EXPECT_EQ(names1.size(), names2.size());
+ if (names1.size() != names2.size()) {
+ ADD_FAILURE();
+ return;
+ }
+ cricket::ContentNames::const_iterator iter1 = names1.begin();
+ cricket::ContentNames::const_iterator iter2 = names2.begin();
+ while (iter1 != names1.end()) {
+ EXPECT_EQ(*iter1++, *iter2++);
+ }
+ }
+
+ // transport info
+ const cricket::TransportInfos transports1 = desc1.transport_infos();
+ const cricket::TransportInfos transports2 = desc2.transport_infos();
+ EXPECT_EQ(transports1.size(), transports2.size());
+ if (transports1.size() != transports2.size()) {
+ ADD_FAILURE();
+ return;
+ }
+ for (size_t i = 0; i < transports1.size(); ++i) {
+ const cricket::TransportInfo transport1 = transports1.at(i);
+ const cricket::TransportInfo transport2 = transports2.at(i);
+ EXPECT_EQ(transport1.content_name, transport2.content_name);
+ EXPECT_EQ(transport1.description.transport_type,
+ transport2.description.transport_type);
+ EXPECT_EQ(transport1.description.ice_ufrag,
+ transport2.description.ice_ufrag);
+ EXPECT_EQ(transport1.description.ice_pwd,
+ transport2.description.ice_pwd);
+ if (transport1.description.identity_fingerprint) {
+ EXPECT_EQ(*transport1.description.identity_fingerprint,
+ *transport2.description.identity_fingerprint);
+ } else {
+ EXPECT_EQ(transport1.description.identity_fingerprint.get(),
+ transport2.description.identity_fingerprint.get());
+ }
+ EXPECT_EQ(transport1.description.transport_options,
+ transport2.description.transport_options);
+ EXPECT_TRUE(CompareCandidates(transport1.description.candidates,
+ transport2.description.candidates));
+ }
+ }
+
+ bool CompareCandidates(const Candidates& cs1, const Candidates& cs2) {
+ EXPECT_EQ(cs1.size(), cs2.size());
+ if (cs1.size() != cs2.size())
+ return false;
+ for (size_t i = 0; i< cs1.size(); ++i) {
+ const Candidate c1 = cs1.at(i);
+ const Candidate c2 = cs2.at(i);
+ EXPECT_TRUE(c1.IsEquivalent(c2));
+ }
+ return true;
+ }
+
+ bool CompareSessionDescription(
+ const JsepSessionDescription& desc1,
+ const JsepSessionDescription& desc2) {
+ EXPECT_EQ(desc1.session_id(), desc2.session_id());
+ EXPECT_EQ(desc1.session_version(), desc2.session_version());
+ CompareSessionDescription(*desc1.description(), *desc2.description());
+ if (desc1.number_of_mediasections() != desc2.number_of_mediasections())
+ return false;
+ for (size_t i = 0; i < desc1.number_of_mediasections(); ++i) {
+ const IceCandidateCollection* cc1 = desc1.candidates(i);
+ const IceCandidateCollection* cc2 = desc2.candidates(i);
+ if (cc1->count() != cc2->count())
+ return false;
+ for (size_t j = 0; j < cc1->count(); ++j) {
+ const IceCandidateInterface* c1 = cc1->at(j);
+ const IceCandidateInterface* c2 = cc2->at(j);
+ EXPECT_EQ(c1->sdp_mid(), c2->sdp_mid());
+ EXPECT_EQ(c1->sdp_mline_index(), c2->sdp_mline_index());
+ EXPECT_TRUE(c1->candidate().IsEquivalent(c2->candidate()));
+ }
+ }
+ return true;
+ }
+
+ // Disable the ice-ufrag and ice-pwd in given |sdp| message by replacing
+ // them with invalid keywords so that the parser will just ignore them.
+ bool RemoveCandidateUfragPwd(std::string* sdp) {
+ const char ice_ufrag[] = "a=ice-ufrag";
+ const char ice_ufragx[] = "a=xice-ufrag";
+ const char ice_pwd[] = "a=ice-pwd";
+ const char ice_pwdx[] = "a=xice-pwd";
+ talk_base::replace_substrs(ice_ufrag, strlen(ice_ufrag),
+ ice_ufragx, strlen(ice_ufragx), sdp);
+ talk_base::replace_substrs(ice_pwd, strlen(ice_pwd),
+ ice_pwdx, strlen(ice_pwdx), sdp);
+ return true;
+ }
+
+ // Update the candidates in |jdesc| to use the given |ufrag| and |pwd|.
+ bool UpdateCandidateUfragPwd(JsepSessionDescription* jdesc, int mline_index,
+ const std::string& ufrag, const std::string& pwd) {
+ std::string content_name;
+ if (mline_index == 0) {
+ content_name = kAudioContentName;
+ } else if (mline_index == 1) {
+ content_name = kVideoContentName;
+ } else {
+ ASSERT(false);
+ }
+ TransportInfo transport_info(
+ content_name, TransportDescription(NS_JINGLE_ICE_UDP,
+ std::vector<std::string>(),
+ ufrag, pwd, cricket::ICEMODE_FULL,
+ NULL, Candidates()));
+ SessionDescription* desc =
+ const_cast<SessionDescription*>(jdesc->description());
+ desc->RemoveTransportInfoByName(content_name);
+ EXPECT_TRUE(desc->AddTransportInfo(transport_info));
+ for (size_t i = 0; i < jdesc_.number_of_mediasections(); ++i) {
+ const IceCandidateCollection* cc = jdesc_.candidates(i);
+ for (size_t j = 0; j < cc->count(); ++j) {
+ if (cc->at(j)->sdp_mline_index() == mline_index) {
+ const_cast<Candidate&>(cc->at(j)->candidate()).set_username(
+ ufrag);
+ const_cast<Candidate&>(cc->at(j)->candidate()).set_password(
+ pwd);
+ }
+ }
+ }
+ return true;
+ }
+
+ void AddIceOptions(const std::string& content_name,
+ const std::vector<std::string>& transport_options) {
+ ASSERT_TRUE(desc_.GetTransportInfoByName(content_name) != NULL);
+ cricket::TransportInfo transport_info =
+ *(desc_.GetTransportInfoByName(content_name));
+ desc_.RemoveTransportInfoByName(content_name);
+ transport_info.description.transport_options = transport_options;
+ desc_.AddTransportInfo(transport_info);
+ }
+
+ void AddFingerprint() {
+ desc_.RemoveTransportInfoByName(kAudioContentName);
+ desc_.RemoveTransportInfoByName(kVideoContentName);
+ talk_base::SSLFingerprint fingerprint(talk_base::DIGEST_SHA_1,
+ kIdentityDigest,
+ sizeof(kIdentityDigest));
+ EXPECT_TRUE(desc_.AddTransportInfo(
+ TransportInfo(kAudioContentName,
+ TransportDescription(NS_JINGLE_ICE_UDP,
+ std::vector<std::string>(),
+ kCandidateUfragVoice,
+ kCandidatePwdVoice,
+ cricket::ICEMODE_FULL, &fingerprint,
+ Candidates()))));
+ EXPECT_TRUE(desc_.AddTransportInfo(
+ TransportInfo(kVideoContentName,
+ TransportDescription(NS_JINGLE_ICE_UDP,
+ std::vector<std::string>(),
+ kCandidateUfragVideo,
+ kCandidatePwdVideo,
+ cricket::ICEMODE_FULL, &fingerprint,
+ Candidates()))));
+ }
+
+ void AddExtmap() {
+ audio_desc_ = static_cast<AudioContentDescription*>(
+ audio_desc_->Copy());
+ video_desc_ = static_cast<VideoContentDescription*>(
+ video_desc_->Copy());
+ audio_desc_->AddRtpHeaderExtension(
+ RtpHeaderExtension(kExtmapUri, kExtmapId));
+ video_desc_->AddRtpHeaderExtension(
+ RtpHeaderExtension(kExtmapUri, kExtmapId));
+ desc_.RemoveContentByName(kAudioContentName);
+ desc_.RemoveContentByName(kVideoContentName);
+ desc_.AddContent(kAudioContentName, NS_JINGLE_RTP, audio_desc_);
+ desc_.AddContent(kVideoContentName, NS_JINGLE_RTP, video_desc_);
+ }
+
+ void RemoveCryptos() {
+ audio_desc_->set_cryptos(std::vector<CryptoParams>());
+ video_desc_->set_cryptos(std::vector<CryptoParams>());
+ }
+
+ bool TestSerializeDirection(cricket::MediaContentDirection direction) {
+ audio_desc_->set_direction(direction);
+ video_desc_->set_direction(direction);
+ std::string new_sdp = kSdpFullString;
+ ReplaceDirection(direction, &new_sdp);
+
+ if (!jdesc_.Initialize(desc_.Copy(),
+ jdesc_.session_id(),
+ jdesc_.session_version())) {
+ return false;
+ }
+ std::string message = webrtc::SdpSerialize(jdesc_);
+ EXPECT_EQ(new_sdp, message);
+ return true;
+ }
+
+ bool TestSerializeRejected(bool audio_rejected, bool video_rejected) {
+ audio_desc_ = static_cast<AudioContentDescription*>(
+ audio_desc_->Copy());
+ video_desc_ = static_cast<VideoContentDescription*>(
+ video_desc_->Copy());
+ desc_.RemoveContentByName(kAudioContentName);
+ desc_.RemoveContentByName(kVideoContentName);
+ desc_.AddContent(kAudioContentName, NS_JINGLE_RTP, audio_rejected,
+ audio_desc_);
+ desc_.AddContent(kVideoContentName, NS_JINGLE_RTP, video_rejected,
+ video_desc_);
+ std::string new_sdp = kSdpFullString;
+ ReplaceRejected(audio_rejected, video_rejected, &new_sdp);
+
+ if (!jdesc_.Initialize(desc_.Copy(),
+ jdesc_.session_id(),
+ jdesc_.session_version())) {
+ return false;
+ }
+ std::string message = webrtc::SdpSerialize(jdesc_);
+ EXPECT_EQ(new_sdp, message);
+ return true;
+ }
+
+ void AddSctpDataChannel() {
+ talk_base::scoped_ptr<DataContentDescription> data(
+ new DataContentDescription());
+ data_desc_ = data.get();
+ data_desc_->set_protocol(cricket::kMediaProtocolDtlsSctp);
+ desc_.AddContent(kDataContentName, NS_JINGLE_DRAFT_SCTP, data.release());
+ EXPECT_TRUE(desc_.AddTransportInfo(
+ TransportInfo(kDataContentName,
+ TransportDescription(NS_JINGLE_ICE_UDP,
+ std::vector<std::string>(),
+ kCandidateUfragData,
+ kCandidatePwdData,
+ cricket::ICEMODE_FULL,
+ NULL, Candidates()))));
+ }
+
+ void AddRtpDataChannel() {
+ talk_base::scoped_ptr<DataContentDescription> data(
+ new DataContentDescription());
+ data_desc_ = data.get();
+
+ data_desc_->AddCodec(DataCodec(101, "google-data", 1));
+ StreamParams data_stream;
+ data_stream.id = kDataChannelMsid;
+ data_stream.cname = kDataChannelCname;
+ data_stream.sync_label = kDataChannelLabel;
+ data_stream.ssrcs.push_back(kDataChannelSsrc);
+ data_desc_->AddStream(data_stream);
+ data_desc_->AddCrypto(CryptoParams(
+ 1, "AES_CM_128_HMAC_SHA1_80",
+ "inline:FvLcvU2P3ZWmQxgPAgcDu7Zl9vftYElFOjEzhWs5", ""));
+ data_desc_->set_protocol(cricket::kMediaProtocolSavpf);
+ desc_.AddContent(kDataContentName, NS_JINGLE_RTP, data.release());
+ EXPECT_TRUE(desc_.AddTransportInfo(
+ TransportInfo(kDataContentName,
+ TransportDescription(NS_JINGLE_ICE_UDP,
+ std::vector<std::string>(),
+ kCandidateUfragData,
+ kCandidatePwdData,
+ cricket::ICEMODE_FULL,
+ NULL, Candidates()))));
+ }
+
+ bool TestDeserializeDirection(cricket::MediaContentDirection direction) {
+ std::string new_sdp = kSdpFullString;
+ ReplaceDirection(direction, &new_sdp);
+ JsepSessionDescription new_jdesc(kDummyString);
+
+ EXPECT_TRUE(SdpDeserialize(new_sdp, &new_jdesc));
+
+ audio_desc_->set_direction(direction);
+ video_desc_->set_direction(direction);
+ if (!jdesc_.Initialize(desc_.Copy(),
+ jdesc_.session_id(),
+ jdesc_.session_version())) {
+ return false;
+ }
+ EXPECT_TRUE(CompareSessionDescription(jdesc_, new_jdesc));
+ return true;
+ }
+
+ bool TestDeserializeRejected(bool audio_rejected, bool video_rejected) {
+ std::string new_sdp = kSdpFullString;
+ ReplaceRejected(audio_rejected, video_rejected, &new_sdp);
+ JsepSessionDescription new_jdesc(JsepSessionDescription::kOffer);
+
+ EXPECT_TRUE(SdpDeserialize(new_sdp, &new_jdesc));
+ audio_desc_ = static_cast<AudioContentDescription*>(
+ audio_desc_->Copy());
+ video_desc_ = static_cast<VideoContentDescription*>(
+ video_desc_->Copy());
+ desc_.RemoveContentByName(kAudioContentName);
+ desc_.RemoveContentByName(kVideoContentName);
+ desc_.AddContent(kAudioContentName, NS_JINGLE_RTP, audio_rejected,
+ audio_desc_);
+ desc_.AddContent(kVideoContentName, NS_JINGLE_RTP, video_rejected,
+ video_desc_);
+ if (!jdesc_.Initialize(desc_.Copy(),
+ jdesc_.session_id(),
+ jdesc_.session_version())) {
+ return false;
+ }
+ EXPECT_TRUE(CompareSessionDescription(jdesc_, new_jdesc));
+ return true;
+ }
+
+ void TestDeserializeExtmap(bool session_level, bool media_level) {
+ AddExtmap();
+ JsepSessionDescription new_jdesc("dummy");
+ ASSERT_TRUE(new_jdesc.Initialize(desc_.Copy(),
+ jdesc_.session_id(),
+ jdesc_.session_version()));
+ JsepSessionDescription jdesc_with_extmap("dummy");
+ std::string sdp_with_extmap = kSdpString;
+ if (session_level) {
+ InjectAfter(kSessionTime, kExtmapWithDirectionAndAttribute,
+ &sdp_with_extmap);
+ }
+ if (media_level) {
+ InjectAfter(kAttributeIcePwdVoice, kExtmapWithDirectionAndAttribute,
+ &sdp_with_extmap);
+ InjectAfter(kAttributeIcePwdVideo, kExtmapWithDirectionAndAttribute,
+ &sdp_with_extmap);
+ }
+ // The extmap can't be present at the same time in both session level and
+ // media level.
+ if (session_level && media_level) {
+ SdpParseError error;
+ EXPECT_FALSE(webrtc::SdpDeserialize(sdp_with_extmap,
+ &jdesc_with_extmap, &error));
+ EXPECT_NE(std::string::npos, error.description.find("a=extmap"));
+ } else {
+ EXPECT_TRUE(SdpDeserialize(sdp_with_extmap, &jdesc_with_extmap));
+ EXPECT_TRUE(CompareSessionDescription(jdesc_with_extmap, new_jdesc));
+ }
+ }
+
+ void VerifyCodecParameter(const cricket::CodecParameterMap& params,
+ const std::string& name, int expected_value) {
+ cricket::CodecParameterMap::const_iterator found = params.find(name);
+ ASSERT_TRUE(found != params.end());
+ EXPECT_EQ(found->second, talk_base::ToString<int>(expected_value));
+ }
+
+ void TestDeserializeCodecParams(const CodecParams& params,
+ JsepSessionDescription* jdesc_output) {
+ std::string sdp =
+ "v=0\r\n"
+ "o=- 18446744069414584320 18446462598732840960 IN IP4 127.0.0.1\r\n"
+ "s=-\r\n"
+ "t=0 0\r\n"
+ // Include semantics for WebRTC Media Streams since it is supported by
+ // this parser, and will be added to the SDP when serializing a session
+ // description.
+ "a=msid-semantic: WMS\r\n"
+ // Pl type 111 preferred.
+ "m=audio 1 RTP/SAVPF 111 104 103 102\r\n"
+ // Pltype 111 listed before 103 and 104 in the map.
+ "a=rtpmap:111 opus/48000/2\r\n"
+ // Pltype 103 listed before 104.
+ "a=rtpmap:103 ISAC/16000\r\n"
+ "a=rtpmap:104 CELT/32000/2\r\n"
+ "a=rtpmap:102 ISAC/32000/1\r\n"
+ "a=fmtp:111 0-15,66,70 ";
+ std::ostringstream os;
+ os << "minptime=" << params.min_ptime << " stereo=" << params.stereo
+ << " sprop-stereo=" << params.sprop_stereo
+ << " useinbandfec=" << params.useinband << "\r\n"
+ << "a=ptime:" << params.ptime << "\r\n"
+ << "a=maxptime:" << params.max_ptime << "\r\n";
+ sdp += os.str();
+
+ // Deserialize
+ SdpParseError error;
+ EXPECT_TRUE(webrtc::SdpDeserialize(sdp, jdesc_output, &error));
+
+ const ContentInfo* ac = GetFirstAudioContent(jdesc_output->description());
+ ASSERT_TRUE(ac != NULL);
+ const AudioContentDescription* acd =
+ static_cast<const AudioContentDescription*>(ac->description);
+ ASSERT_FALSE(acd->codecs().empty());
+ cricket::AudioCodec opus = acd->codecs()[0];
+ EXPECT_EQ("opus", opus.name);
+ EXPECT_EQ(111, opus.id);
+ VerifyCodecParameter(opus.params, "minptime", params.min_ptime);
+ VerifyCodecParameter(opus.params, "stereo", params.stereo);
+ VerifyCodecParameter(opus.params, "sprop-stereo", params.sprop_stereo);
+ VerifyCodecParameter(opus.params, "useinbandfec", params.useinband);
+ for (size_t i = 0; i < acd->codecs().size(); ++i) {
+ cricket::AudioCodec codec = acd->codecs()[i];
+ VerifyCodecParameter(codec.params, "ptime", params.ptime);
+ VerifyCodecParameter(codec.params, "maxptime", params.max_ptime);
+ if (codec.name == "ISAC") {
+ if (codec.clockrate == 16000) {
+ EXPECT_EQ(32000, codec.bitrate);
+ } else {
+ EXPECT_EQ(56000, codec.bitrate);
+ }
+ }
+ }
+ }
+
+ void TestDeserializeRtcpFb(JsepSessionDescription* jdesc_output,
+ bool use_wildcard) {
+ std::string sdp =
+ "v=0\r\n"
+ "o=- 18446744069414584320 18446462598732840960 IN IP4 127.0.0.1\r\n"
+ "s=-\r\n"
+ "t=0 0\r\n"
+ // Include semantics for WebRTC Media Streams since it is supported by
+ // this parser, and will be added to the SDP when serializing a session
+ // description.
+ "a=msid-semantic: WMS\r\n"
+ "m=audio 1 RTP/SAVPF 111\r\n"
+ "a=rtpmap:111 opus/48000/2\r\n"
+ "a=rtcp-fb:111 nack\r\n"
+ "m=video 3457 RTP/SAVPF 101\r\n"
+ "a=rtpmap:101 VP8/90000\r\n"
+ "a=rtcp-fb:101 nack\r\n"
+ "a=rtcp-fb:101 goog-remb\r\n"
+ "a=rtcp-fb:101 ccm fir\r\n";
+ std::ostringstream os;
+ os << "a=rtcp-fb:" << (use_wildcard ? "*" : "101") << " ccm fir\r\n";
+ sdp += os.str();
+ // Deserialize
+ SdpParseError error;
+ EXPECT_TRUE(webrtc::SdpDeserialize(sdp, jdesc_output, &error));
+ const ContentInfo* ac = GetFirstAudioContent(jdesc_output->description());
+ ASSERT_TRUE(ac != NULL);
+ const AudioContentDescription* acd =
+ static_cast<const AudioContentDescription*>(ac->description);
+ ASSERT_FALSE(acd->codecs().empty());
+ cricket::AudioCodec opus = acd->codecs()[0];
+ EXPECT_EQ(111, opus.id);
+ EXPECT_TRUE(opus.HasFeedbackParam(
+ cricket::FeedbackParam(cricket::kRtcpFbParamNack,
+ cricket::kParamValueEmpty)));
+
+ const ContentInfo* vc = GetFirstVideoContent(jdesc_output->description());
+ ASSERT_TRUE(vc != NULL);
+ const VideoContentDescription* vcd =
+ static_cast<const VideoContentDescription*>(vc->description);
+ ASSERT_FALSE(vcd->codecs().empty());
+ cricket::VideoCodec vp8 = vcd->codecs()[0];
+ EXPECT_STREQ(webrtc::JsepSessionDescription::kDefaultVideoCodecName,
+ vp8.name.c_str());
+ EXPECT_EQ(101, vp8.id);
+ EXPECT_TRUE(vp8.HasFeedbackParam(
+ cricket::FeedbackParam(cricket::kRtcpFbParamNack,
+ cricket::kParamValueEmpty)));
+ EXPECT_TRUE(vp8.HasFeedbackParam(
+ cricket::FeedbackParam(cricket::kRtcpFbParamRemb,
+ cricket::kParamValueEmpty)));
+ EXPECT_TRUE(vp8.HasFeedbackParam(
+ cricket::FeedbackParam(cricket::kRtcpFbParamCcm,
+ cricket::kRtcpFbCcmParamFir)));
+ }
+
+ // Two SDP messages can mean the same thing but be different strings, e.g.
+ // some of the lines can be serialized in different order.
+ // However, a deserialized description can be compared field by field and has
+ // no order. If deserializer has already been tested, serializing then
+ // deserializing and comparing JsepSessionDescription will test
+ // the serializer sufficiently.
+ void TestSerialize(const JsepSessionDescription& jdesc) {
+ std::string message = webrtc::SdpSerialize(jdesc);
+ JsepSessionDescription jdesc_output_des(kDummyString);
+ SdpParseError error;
+ EXPECT_TRUE(webrtc::SdpDeserialize(message, &jdesc_output_des, &error));
+ EXPECT_TRUE(CompareSessionDescription(jdesc, jdesc_output_des));
+ }
+
+ protected:
+ SessionDescription desc_;
+ AudioContentDescription* audio_desc_;
+ VideoContentDescription* video_desc_;
+ DataContentDescription* data_desc_;
+ Candidates candidates_;
+ talk_base::scoped_ptr<IceCandidateInterface> jcandidate_;
+ JsepSessionDescription jdesc_;
+};
+
+void TestMismatch(const std::string& string1, const std::string& string2) {
+ int position = 0;
+ for (size_t i = 0; i < string1.length() && i < string2.length(); ++i) {
+ if (string1.c_str()[i] != string2.c_str()[i]) {
+ position = i;
+ break;
+ }
+ }
+ EXPECT_EQ(0, position) << "Strings mismatch at the " << position
+ << " character\n"
+ << " 1: " << string1.substr(position, 20) << "\n"
+ << " 2: " << string2.substr(position, 20) << "\n";
+}
+
+std::string GetLine(const std::string& message,
+ const std::string& session_description_name) {
+ size_t start = message.find(session_description_name);
+ if (std::string::npos == start) {
+ return "";
+ }
+ size_t stop = message.find("\r\n", start);
+ if (std::string::npos == stop) {
+ return "";
+ }
+ if (stop <= start) {
+ return "";
+ }
+ return message.substr(start, stop - start);
+}
+
+TEST_F(WebRtcSdpTest, SerializeSessionDescription) {
+ // SessionDescription with desc and candidates.
+ std::string message = webrtc::SdpSerialize(jdesc_);
+ TestMismatch(std::string(kSdpFullString), message);
+}
+
+TEST_F(WebRtcSdpTest, SerializeSessionDescriptionEmpty) {
+ JsepSessionDescription jdesc_empty(kDummyString);
+ EXPECT_EQ("", webrtc::SdpSerialize(jdesc_empty));
+}
+
+// This tests serialization of SDP with a=crypto and a=fingerprint, as would be
+// the case in a DTLS offer.
+TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithFingerprint) {
+ AddFingerprint();
+ JsepSessionDescription jdesc_with_fingerprint(kDummyString);
+ ASSERT_TRUE(jdesc_with_fingerprint.Initialize(desc_.Copy(),
+ kSessionId, kSessionVersion));
+ std::string message = webrtc::SdpSerialize(jdesc_with_fingerprint);
+
+ std::string sdp_with_fingerprint = kSdpString;
+ InjectAfter(kAttributeIcePwdVoice,
+ kFingerprint, &sdp_with_fingerprint);
+ InjectAfter(kAttributeIcePwdVideo,
+ kFingerprint, &sdp_with_fingerprint);
+
+ EXPECT_EQ(sdp_with_fingerprint, message);
+}
+
+// This tests serialization of SDP with a=fingerprint with no a=crypto, as would
+// be the case in a DTLS answer.
+TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithFingerprintNoCryptos) {
+ AddFingerprint();
+ RemoveCryptos();
+ JsepSessionDescription jdesc_with_fingerprint(kDummyString);
+ ASSERT_TRUE(jdesc_with_fingerprint.Initialize(desc_.Copy(),
+ kSessionId, kSessionVersion));
+ std::string message = webrtc::SdpSerialize(jdesc_with_fingerprint);
+
+ std::string sdp_with_fingerprint = kSdpString;
+ Replace(kAttributeCryptoVoice, "", &sdp_with_fingerprint);
+ Replace(kAttributeCryptoVideo, "", &sdp_with_fingerprint);
+ InjectAfter(kAttributeIcePwdVoice,
+ kFingerprint, &sdp_with_fingerprint);
+ InjectAfter(kAttributeIcePwdVideo,
+ kFingerprint, &sdp_with_fingerprint);
+
+ EXPECT_EQ(sdp_with_fingerprint, message);
+}
+
+TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithoutCandidates) {
+ // JsepSessionDescription with desc but without candidates.
+ JsepSessionDescription jdesc_no_candidates(kDummyString);
+ ASSERT_TRUE(jdesc_no_candidates.Initialize(desc_.Copy(),
+ kSessionId, kSessionVersion));
+ std::string message = webrtc::SdpSerialize(jdesc_no_candidates);
+ EXPECT_EQ(std::string(kSdpString), message);
+}
+
+TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithBundle) {
+ ContentGroup group(cricket::GROUP_TYPE_BUNDLE);
+ group.AddContentName(kAudioContentName);
+ group.AddContentName(kVideoContentName);
+ desc_.AddGroup(group);
+ ASSERT_TRUE(jdesc_.Initialize(desc_.Copy(),
+ jdesc_.session_id(),
+ jdesc_.session_version()));
+ std::string message = webrtc::SdpSerialize(jdesc_);
+ std::string sdp_with_bundle = kSdpFullString;
+ InjectAfter(kSessionTime,
+ "a=group:BUNDLE audio_content_name video_content_name\r\n",
+ &sdp_with_bundle);
+ EXPECT_EQ(sdp_with_bundle, message);
+}
+
+TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithBandwidth) {
+ VideoContentDescription* vcd = static_cast<VideoContentDescription*>(
+ GetFirstVideoContent(&desc_)->description);
+ vcd->set_bandwidth(100 * 1000);
+ AudioContentDescription* acd = static_cast<AudioContentDescription*>(
+ GetFirstAudioContent(&desc_)->description);
+ acd->set_bandwidth(50 * 1000);
+ ASSERT_TRUE(jdesc_.Initialize(desc_.Copy(),
+ jdesc_.session_id(),
+ jdesc_.session_version()));
+ std::string message = webrtc::SdpSerialize(jdesc_);
+ std::string sdp_with_bandwidth = kSdpFullString;
+ InjectAfter("a=mid:video_content_name\r\na=sendrecv\r\n",
+ "b=AS:100\r\n",
+ &sdp_with_bandwidth);
+ InjectAfter("a=mid:audio_content_name\r\na=sendrecv\r\n",
+ "b=AS:50\r\n",
+ &sdp_with_bandwidth);
+ EXPECT_EQ(sdp_with_bandwidth, message);
+}
+
+TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithIceOptions) {
+ std::vector<std::string> transport_options;
+ transport_options.push_back(kIceOption1);
+ transport_options.push_back(kIceOption3);
+ AddIceOptions(kAudioContentName, transport_options);
+ transport_options.clear();
+ transport_options.push_back(kIceOption2);
+ transport_options.push_back(kIceOption3);
+ AddIceOptions(kVideoContentName, transport_options);
+ ASSERT_TRUE(jdesc_.Initialize(desc_.Copy(),
+ jdesc_.session_id(),
+ jdesc_.session_version()));
+ std::string message = webrtc::SdpSerialize(jdesc_);
+ std::string sdp_with_ice_options = kSdpFullString;
+ InjectAfter(kAttributeIcePwdVoice,
+ "a=ice-options:iceoption1 iceoption3\r\n",
+ &sdp_with_ice_options);
+ InjectAfter(kAttributeIcePwdVideo,
+ "a=ice-options:iceoption2 iceoption3\r\n",
+ &sdp_with_ice_options);
+ EXPECT_EQ(sdp_with_ice_options, message);
+}
+
+TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithRecvOnlyContent) {
+ EXPECT_TRUE(TestSerializeDirection(cricket::MD_RECVONLY));
+}
+
+TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithSendOnlyContent) {
+ EXPECT_TRUE(TestSerializeDirection(cricket::MD_SENDONLY));
+}
+
+TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithInactiveContent) {
+ EXPECT_TRUE(TestSerializeDirection(cricket::MD_INACTIVE));
+}
+
+TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithAudioRejected) {
+ EXPECT_TRUE(TestSerializeRejected(true, false));
+}
+
+TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithVideoRejected) {
+ EXPECT_TRUE(TestSerializeRejected(false, true));
+}
+
+TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithAudioVideoRejected) {
+ EXPECT_TRUE(TestSerializeRejected(true, true));
+}
+
+TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithRtpDataChannel) {
+ AddRtpDataChannel();
+ JsepSessionDescription jsep_desc(kDummyString);
+
+ ASSERT_TRUE(jsep_desc.Initialize(desc_.Copy(), kSessionId, kSessionVersion));
+ std::string message = webrtc::SdpSerialize(jsep_desc);
+
+ std::string expected_sdp = kSdpString;
+ expected_sdp.append(kSdpRtpDataChannelString);
+ EXPECT_EQ(expected_sdp, message);
+}
+
+TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithSctpDataChannel) {
+ AddSctpDataChannel();
+ JsepSessionDescription jsep_desc(kDummyString);
+
+ ASSERT_TRUE(jsep_desc.Initialize(desc_.Copy(), kSessionId, kSessionVersion));
+ std::string message = webrtc::SdpSerialize(jsep_desc);
+
+ std::string expected_sdp = kSdpString;
+ expected_sdp.append(kSdpSctpDataChannelString);
+ EXPECT_EQ(message, expected_sdp);
+}
+
+TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithExtmap) {
+ AddExtmap();
+ JsepSessionDescription desc_with_extmap("dummy");
+ ASSERT_TRUE(desc_with_extmap.Initialize(desc_.Copy(),
+ kSessionId, kSessionVersion));
+ std::string message = webrtc::SdpSerialize(desc_with_extmap);
+
+ std::string sdp_with_extmap = kSdpString;
+ InjectAfter("a=mid:audio_content_name\r\n",
+ kExtmap, &sdp_with_extmap);
+ InjectAfter("a=mid:video_content_name\r\n",
+ kExtmap, &sdp_with_extmap);
+
+ EXPECT_EQ(sdp_with_extmap, message);
+}
+
+
+TEST_F(WebRtcSdpTest, SerializeCandidates) {
+ std::string message = webrtc::SdpSerializeCandidate(*jcandidate_);
+ EXPECT_EQ(std::string(kSdpOneCandidate), message);
+}
+
+TEST_F(WebRtcSdpTest, DeserializeSessionDescription) {
+ JsepSessionDescription jdesc(kDummyString);
+ // Deserialize
+ EXPECT_TRUE(SdpDeserialize(kSdpFullString, &jdesc));
+ // Verify
+ EXPECT_TRUE(CompareSessionDescription(jdesc_, jdesc));
+}
+
+TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithoutCarriageReturn) {
+ JsepSessionDescription jdesc(kDummyString);
+ std::string sdp_without_carriage_return = kSdpFullString;
+ Replace("\r\n", "\n", &sdp_without_carriage_return);
+ // Deserialize
+ EXPECT_TRUE(SdpDeserialize(sdp_without_carriage_return, &jdesc));
+ // Verify
+ EXPECT_TRUE(CompareSessionDescription(jdesc_, jdesc));
+}
+
+TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithoutCandidates) {
+ // SessionDescription with desc but without candidates.
+ JsepSessionDescription jdesc_no_candidates(kDummyString);
+ ASSERT_TRUE(jdesc_no_candidates.Initialize(desc_.Copy(),
+ kSessionId, kSessionVersion));
+ JsepSessionDescription new_jdesc(kDummyString);
+ EXPECT_TRUE(SdpDeserialize(kSdpString, &new_jdesc));
+ EXPECT_TRUE(CompareSessionDescription(jdesc_no_candidates, new_jdesc));
+}
+
+TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithoutRtpmap) {
+ static const char kSdpNoRtpmapString[] =
+ "v=0\r\n"
+ "o=- 11 22 IN IP4 127.0.0.1\r\n"
+ "s=-\r\n"
+ "t=0 0\r\n"
+ "m=audio 49232 RTP/AVP 0 18 103\r\n"
+ // Codec that doesn't appear in the m= line will be ignored.
+ "a=rtpmap:104 CELT/32000/2\r\n"
+ // The rtpmap line for static payload codec is optional.
+ "a=rtpmap:18 G729/16000\r\n"
+ "a=rtpmap:103 ISAC/16000\r\n";
+
+ JsepSessionDescription jdesc(kDummyString);
+ EXPECT_TRUE(SdpDeserialize(kSdpNoRtpmapString, &jdesc));
+ cricket::AudioContentDescription* audio =
+ static_cast<AudioContentDescription*>(
+ jdesc.description()->GetContentDescriptionByName(cricket::CN_AUDIO));
+ AudioCodecs ref_codecs;
+ // The codecs in the AudioContentDescription will be sorted by preference.
+ ref_codecs.push_back(AudioCodec(0, "PCMU", 8000, 0, 1, 3));
+ ref_codecs.push_back(AudioCodec(18, "G729", 16000, 0, 1, 2));
+ ref_codecs.push_back(AudioCodec(103, "ISAC", 16000, 32000, 1, 1));
+ EXPECT_EQ(ref_codecs, audio->codecs());
+}
+
+// Ensure that we can deserialize SDP with a=fingerprint properly.
+TEST_F(WebRtcSdpTest, DeserializeJsepSessionDescriptionWithFingerprint) {
+ // Add a DTLS a=fingerprint attribute to our session description.
+ AddFingerprint();
+ JsepSessionDescription new_jdesc(kDummyString);
+ ASSERT_TRUE(new_jdesc.Initialize(desc_.Copy(),
+ jdesc_.session_id(),
+ jdesc_.session_version()));
+
+ JsepSessionDescription jdesc_with_fingerprint(kDummyString);
+ std::string sdp_with_fingerprint = kSdpString;
+ InjectAfter(kAttributeIcePwdVoice, kFingerprint, &sdp_with_fingerprint);
+ InjectAfter(kAttributeIcePwdVideo, kFingerprint, &sdp_with_fingerprint);
+ EXPECT_TRUE(SdpDeserialize(sdp_with_fingerprint, &jdesc_with_fingerprint));
+ EXPECT_TRUE(CompareSessionDescription(jdesc_with_fingerprint, new_jdesc));
+}
+
+TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithBundle) {
+ JsepSessionDescription jdesc_with_bundle(kDummyString);
+ std::string sdp_with_bundle = kSdpFullString;
+ InjectAfter(kSessionTime,
+ "a=group:BUNDLE audio_content_name video_content_name\r\n",
+ &sdp_with_bundle);
+ EXPECT_TRUE(SdpDeserialize(sdp_with_bundle, &jdesc_with_bundle));
+ ContentGroup group(cricket::GROUP_TYPE_BUNDLE);
+ group.AddContentName(kAudioContentName);
+ group.AddContentName(kVideoContentName);
+ desc_.AddGroup(group);
+ ASSERT_TRUE(jdesc_.Initialize(desc_.Copy(),
+ jdesc_.session_id(),
+ jdesc_.session_version()));
+ EXPECT_TRUE(CompareSessionDescription(jdesc_, jdesc_with_bundle));
+}
+
+TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithBandwidth) {
+ JsepSessionDescription jdesc_with_bandwidth(kDummyString);
+ std::string sdp_with_bandwidth = kSdpFullString;
+ InjectAfter("a=mid:video_content_name\r\na=sendrecv\r\n",
+ "b=AS:100\r\n",
+ &sdp_with_bandwidth);
+ InjectAfter("a=mid:audio_content_name\r\na=sendrecv\r\n",
+ "b=AS:50\r\n",
+ &sdp_with_bandwidth);
+ EXPECT_TRUE(
+ SdpDeserialize(sdp_with_bandwidth, &jdesc_with_bandwidth));
+ VideoContentDescription* vcd = static_cast<VideoContentDescription*>(
+ GetFirstVideoContent(&desc_)->description);
+ vcd->set_bandwidth(100 * 1000);
+ AudioContentDescription* acd = static_cast<AudioContentDescription*>(
+ GetFirstAudioContent(&desc_)->description);
+ acd->set_bandwidth(50 * 1000);
+ ASSERT_TRUE(jdesc_.Initialize(desc_.Copy(),
+ jdesc_.session_id(),
+ jdesc_.session_version()));
+ EXPECT_TRUE(CompareSessionDescription(jdesc_, jdesc_with_bandwidth));
+}
+
+TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithIceOptions) {
+ JsepSessionDescription jdesc_with_ice_options(kDummyString);
+ std::string sdp_with_ice_options = kSdpFullString;
+ InjectAfter(kSessionTime,
+ "a=ice-options:iceoption3\r\n",
+ &sdp_with_ice_options);
+ InjectAfter(kAttributeIcePwdVoice,
+ "a=ice-options:iceoption1\r\n",
+ &sdp_with_ice_options);
+ InjectAfter(kAttributeIcePwdVideo,
+ "a=ice-options:iceoption2\r\n",
+ &sdp_with_ice_options);
+ EXPECT_TRUE(SdpDeserialize(sdp_with_ice_options, &jdesc_with_ice_options));
+ std::vector<std::string> transport_options;
+ transport_options.push_back(kIceOption3);
+ transport_options.push_back(kIceOption1);
+ AddIceOptions(kAudioContentName, transport_options);
+ transport_options.clear();
+ transport_options.push_back(kIceOption3);
+ transport_options.push_back(kIceOption2);
+ AddIceOptions(kVideoContentName, transport_options);
+ ASSERT_TRUE(jdesc_.Initialize(desc_.Copy(),
+ jdesc_.session_id(),
+ jdesc_.session_version()));
+ EXPECT_TRUE(CompareSessionDescription(jdesc_, jdesc_with_ice_options));
+}
+
+TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithUfragPwd) {
+ // Remove the original ice-ufrag and ice-pwd
+ JsepSessionDescription jdesc_with_ufrag_pwd(kDummyString);
+ std::string sdp_with_ufrag_pwd = kSdpFullString;
+ EXPECT_TRUE(RemoveCandidateUfragPwd(&sdp_with_ufrag_pwd));
+ // Add session level ufrag and pwd
+ InjectAfter(kSessionTime,
+ "a=ice-pwd:session+level+icepwd\r\n"
+ "a=ice-ufrag:session+level+iceufrag\r\n",
+ &sdp_with_ufrag_pwd);
+ // Add media level ufrag and pwd for audio
+ InjectAfter("a=mid:audio_content_name\r\n",
+ "a=ice-pwd:media+level+icepwd\r\na=ice-ufrag:media+level+iceufrag\r\n",
+ &sdp_with_ufrag_pwd);
+ // Update the candidate ufrag and pwd to the expected ones.
+ EXPECT_TRUE(UpdateCandidateUfragPwd(&jdesc_, 0,
+ "media+level+iceufrag", "media+level+icepwd"));
+ EXPECT_TRUE(UpdateCandidateUfragPwd(&jdesc_, 1,
+ "session+level+iceufrag", "session+level+icepwd"));
+ EXPECT_TRUE(SdpDeserialize(sdp_with_ufrag_pwd, &jdesc_with_ufrag_pwd));
+ EXPECT_TRUE(CompareSessionDescription(jdesc_, jdesc_with_ufrag_pwd));
+}
+
+
+TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithRecvOnlyContent) {
+ EXPECT_TRUE(TestDeserializeDirection(cricket::MD_RECVONLY));
+}
+
+TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithSendOnlyContent) {
+ EXPECT_TRUE(TestDeserializeDirection(cricket::MD_SENDONLY));
+}
+
+TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithInactiveContent) {
+ EXPECT_TRUE(TestDeserializeDirection(cricket::MD_INACTIVE));
+}
+
+TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithRejectedAudio) {
+ EXPECT_TRUE(TestDeserializeRejected(true, false));
+}
+
+TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithRejectedVideo) {
+ EXPECT_TRUE(TestDeserializeRejected(false, true));
+}
+
+TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithRejectedAudioVideo) {
+ EXPECT_TRUE(TestDeserializeRejected(true, true));
+}
+
+// Tests that we can still handle the sdp uses mslabel and label instead of
+// msid for backward compatibility.
+TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithoutMsid) {
+ JsepSessionDescription jdesc(kDummyString);
+ std::string sdp_without_msid = kSdpFullString;
+ Replace("msid", "xmsid", &sdp_without_msid);
+ // Deserialize
+ EXPECT_TRUE(SdpDeserialize(sdp_without_msid, &jdesc));
+ // Verify
+ EXPECT_TRUE(CompareSessionDescription(jdesc_, jdesc));
+}
+
+TEST_F(WebRtcSdpTest, DeserializeCandidate) {
+ JsepIceCandidate jcandidate(kDummyMid, kDummyIndex);
+
+ std::string sdp = kSdpOneCandidate;
+ EXPECT_TRUE(SdpDeserializeCandidate(sdp, &jcandidate));
+ EXPECT_EQ(kDummyMid, jcandidate.sdp_mid());
+ EXPECT_EQ(kDummyIndex, jcandidate.sdp_mline_index());
+ EXPECT_TRUE(jcandidate.candidate().IsEquivalent(jcandidate_->candidate()));
+
+ // Candidate line without generation extension.
+ sdp = kSdpOneCandidate;
+ Replace(" generation 2", "", &sdp);
+ EXPECT_TRUE(SdpDeserializeCandidate(sdp, &jcandidate));
+ EXPECT_EQ(kDummyMid, jcandidate.sdp_mid());
+ EXPECT_EQ(kDummyIndex, jcandidate.sdp_mline_index());
+ Candidate expected = jcandidate_->candidate();
+ expected.set_generation(0);
+ EXPECT_TRUE(jcandidate.candidate().IsEquivalent(expected));
+
+ // Multiple candidate lines.
+ // Only the first line will be deserialized. The rest will be ignored.
+ sdp = kSdpOneCandidate;
+ sdp.append("a=candidate:1 2 tcp 1234 192.168.1.100 5678 typ host\r\n");
+ EXPECT_TRUE(SdpDeserializeCandidate(sdp, &jcandidate));
+ EXPECT_EQ(kDummyMid, jcandidate.sdp_mid());
+ EXPECT_EQ(kDummyIndex, jcandidate.sdp_mline_index());
+ EXPECT_TRUE(jcandidate.candidate().IsEquivalent(jcandidate_->candidate()));
+}
+
+// This test verifies the deserialization of candidate-attribute
+// as per RFC 5245. Candiate-attribute will be of the format
+// candidate:<blah>. This format will be used when candidates
+// are trickled.
+TEST_F(WebRtcSdpTest, DeserializeRawCandidateAttribute) {
+ JsepIceCandidate jcandidate(kDummyMid, kDummyIndex);
+
+ std::string candidate_attribute = kRawCandidate;
+ EXPECT_TRUE(SdpDeserializeCandidate(candidate_attribute, &jcandidate));
+ EXPECT_EQ(kDummyMid, jcandidate.sdp_mid());
+ EXPECT_EQ(kDummyIndex, jcandidate.sdp_mline_index());
+ EXPECT_TRUE(jcandidate.candidate().IsEquivalent(jcandidate_->candidate()));
+ EXPECT_EQ(2u, jcandidate.candidate().generation());
+
+ // Candidate line without generation extension.
+ candidate_attribute = kRawCandidate;
+ Replace(" generation 2", "", &candidate_attribute);
+ EXPECT_TRUE(SdpDeserializeCandidate(candidate_attribute, &jcandidate));
+ EXPECT_EQ(kDummyMid, jcandidate.sdp_mid());
+ EXPECT_EQ(kDummyIndex, jcandidate.sdp_mline_index());
+ Candidate expected = jcandidate_->candidate();
+ expected.set_generation(0);
+ EXPECT_TRUE(jcandidate.candidate().IsEquivalent(expected));
+
+ // Candidate line without candidate:
+ candidate_attribute = kRawCandidate;
+ Replace("candidate:", "", &candidate_attribute);
+ EXPECT_FALSE(SdpDeserializeCandidate(candidate_attribute, &jcandidate));
+
+ // Concatenating additional candidate. Expecting deserialization to fail.
+ candidate_attribute = kRawCandidate;
+ candidate_attribute.append("candidate:1 2 udp 1234 192.168.1.1 typ host");
+ EXPECT_FALSE(SdpDeserializeCandidate(candidate_attribute, &jcandidate));
+}
+
+TEST_F(WebRtcSdpTest, DeserializeSdpWithRtpDataChannels) {
+ AddRtpDataChannel();
+ JsepSessionDescription jdesc(kDummyString);
+ ASSERT_TRUE(jdesc.Initialize(desc_.Copy(), kSessionId, kSessionVersion));
+
+ std::string sdp_with_data = kSdpString;
+ sdp_with_data.append(kSdpRtpDataChannelString);
+ JsepSessionDescription jdesc_output(kDummyString);
+
+ // Deserialize
+ EXPECT_TRUE(SdpDeserialize(sdp_with_data, &jdesc_output));
+ // Verify
+ EXPECT_TRUE(CompareSessionDescription(jdesc, jdesc_output));
+}
+
+TEST_F(WebRtcSdpTest, DeserializeSdpWithSctpDataChannels) {
+ AddSctpDataChannel();
+ JsepSessionDescription jdesc(kDummyString);
+ ASSERT_TRUE(jdesc.Initialize(desc_.Copy(), kSessionId, kSessionVersion));
+
+ std::string sdp_with_data = kSdpString;
+ sdp_with_data.append(kSdpSctpDataChannelString);
+ JsepSessionDescription jdesc_output(kDummyString);
+
+ EXPECT_TRUE(SdpDeserialize(sdp_with_data, &jdesc_output));
+ EXPECT_TRUE(CompareSessionDescription(jdesc, jdesc_output));
+}
+
+TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithSessionLevelExtmap) {
+ TestDeserializeExtmap(true, false);
+}
+
+TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithMediaLevelExtmap) {
+ TestDeserializeExtmap(false, true);
+}
+
+TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithInvalidExtmap) {
+ TestDeserializeExtmap(true, true);
+}
+
+TEST_F(WebRtcSdpTest, DeserializeCandidateWithDifferentTransport) {
+ JsepIceCandidate jcandidate(kDummyMid, kDummyIndex);
+ std::string new_sdp = kSdpOneCandidate;
+ Replace("udp", "unsupported_transport", &new_sdp);
+ EXPECT_FALSE(SdpDeserializeCandidate(new_sdp, &jcandidate));
+ new_sdp = kSdpOneCandidate;
+ Replace("udp", "uDP", &new_sdp);
+ EXPECT_TRUE(SdpDeserializeCandidate(new_sdp, &jcandidate));
+ EXPECT_EQ(kDummyMid, jcandidate.sdp_mid());
+ EXPECT_EQ(kDummyIndex, jcandidate.sdp_mline_index());
+ EXPECT_TRUE(jcandidate.candidate().IsEquivalent(jcandidate_->candidate()));
+}
+
+TEST_F(WebRtcSdpTest, DeserializeCandidateOldFormat) {
+ JsepIceCandidate jcandidate(kDummyMid, kDummyIndex);
+ EXPECT_TRUE(SdpDeserializeCandidate(kSdpOneCandidateOldFormat,&jcandidate));
+ EXPECT_EQ(kDummyMid, jcandidate.sdp_mid());
+ EXPECT_EQ(kDummyIndex, jcandidate.sdp_mline_index());
+ Candidate ref_candidate = jcandidate_->candidate();
+ ref_candidate.set_username("user_rtp");
+ ref_candidate.set_password("password_rtp");
+ EXPECT_TRUE(jcandidate.candidate().IsEquivalent(ref_candidate));
+}
+
+TEST_F(WebRtcSdpTest, DeserializeBrokenSdp) {
+ const char kSdpDestroyer[] = "!@#$%^&";
+ const char kSdpInvalidLine1[] = " =candidate";
+ const char kSdpInvalidLine2[] = "a+candidate";
+ const char kSdpInvalidLine3[] = "a= candidate";
+ // Broken fingerprint.
+ const char kSdpInvalidLine4[] = "a=fingerprint:sha-1 "
+ "4AAD:B9:B1:3F:82:18:3B:54:02:12:DF:3E:5D:49:6B:19:E5:7C:AB";
+ // Extra field.
+ const char kSdpInvalidLine5[] = "a=fingerprint:sha-1 "
+ "4A:AD:B9:B1:3F:82:18:3B:54:02:12:DF:3E:5D:49:6B:19:E5:7C:AB XXX";
+ // Missing space.
+ const char kSdpInvalidLine6[] = "a=fingerprint:sha-1"
+ "4A:AD:B9:B1:3F:82:18:3B:54:02:12:DF:3E:5D:49:6B:19:E5:7C:AB";
+
+ // Broken session description
+ ReplaceAndTryToParse("v=", kSdpDestroyer);
+ ReplaceAndTryToParse("o=", kSdpDestroyer);
+ ReplaceAndTryToParse("s=-", kSdpDestroyer);
+ // Broken time description
+ ReplaceAndTryToParse("t=", kSdpDestroyer);
+
+ // Broken media description
+ ReplaceAndTryToParse("m=video", kSdpDestroyer);
+
+ // Invalid lines
+ ReplaceAndTryToParse("a=candidate", kSdpInvalidLine1);
+ ReplaceAndTryToParse("a=candidate", kSdpInvalidLine2);
+ ReplaceAndTryToParse("a=candidate", kSdpInvalidLine3);
+
+ // Bogus fingerprint replacing a=sendrev. We selected this attribute
+ // because it's orthogonal to what we are replacing and hence
+ // safe.
+ ReplaceAndTryToParse("a=sendrecv", kSdpInvalidLine4);
+ ReplaceAndTryToParse("a=sendrecv", kSdpInvalidLine5);
+ ReplaceAndTryToParse("a=sendrecv", kSdpInvalidLine6);
+}
+
+TEST_F(WebRtcSdpTest, DeserializeSdpWithReorderedPltypes) {
+ JsepSessionDescription jdesc_output(kDummyString);
+
+ const char kSdpWithReorderedPlTypesString[] =
+ "v=0\r\n"
+ "o=- 18446744069414584320 18446462598732840960 IN IP4 127.0.0.1\r\n"
+ "s=-\r\n"
+ "t=0 0\r\n"
+ "m=audio 1 RTP/SAVPF 104 103\r\n" // Pl type 104 preferred.
+ "a=rtpmap:111 opus/48000/2\r\n" // Pltype 111 listed before 103 and 104
+ // in the map.
+ "a=rtpmap:103 ISAC/16000\r\n" // Pltype 103 listed before 104 in the map.
+ "a=rtpmap:104 CELT/32000/2\r\n";
+
+ // Deserialize
+ EXPECT_TRUE(SdpDeserialize(kSdpWithReorderedPlTypesString, &jdesc_output));
+
+ const ContentInfo* ac = GetFirstAudioContent(jdesc_output.description());
+ ASSERT_TRUE(ac != NULL);
+ const AudioContentDescription* acd =
+ static_cast<const AudioContentDescription*>(ac->description);
+ ASSERT_FALSE(acd->codecs().empty());
+ EXPECT_EQ("CELT", acd->codecs()[0].name);
+ EXPECT_EQ(104, acd->codecs()[0].id);
+}
+
+TEST_F(WebRtcSdpTest, DeserializeSerializeCodecParams) {
+ JsepSessionDescription jdesc_output(kDummyString);
+ CodecParams params;
+ params.max_ptime = 40;
+ params.ptime = 30;
+ params.min_ptime = 10;
+ params.sprop_stereo = 1;
+ params.stereo = 1;
+ params.useinband = 1;
+ TestDeserializeCodecParams(params, &jdesc_output);
+ TestSerialize(jdesc_output);
+}
+
+TEST_F(WebRtcSdpTest, DeserializeSerializeRtcpFb) {
+ const bool kUseWildcard = false;
+ JsepSessionDescription jdesc_output(kDummyString);
+ TestDeserializeRtcpFb(&jdesc_output, kUseWildcard);
+ TestSerialize(jdesc_output);
+}
+
+TEST_F(WebRtcSdpTest, DeserializeSerializeRtcpFbWildcard) {
+ const bool kUseWildcard = true;
+ JsepSessionDescription jdesc_output(kDummyString);
+ TestDeserializeRtcpFb(&jdesc_output, kUseWildcard);
+ TestSerialize(jdesc_output);
+}
+
+TEST_F(WebRtcSdpTest, DeserializeVideoFmtp) {
+ JsepSessionDescription jdesc_output(kDummyString);
+
+ const char kSdpWithFmtpString[] =
+ "v=0\r\n"
+ "o=- 18446744069414584320 18446462598732840960 IN IP4 127.0.0.1\r\n"
+ "s=-\r\n"
+ "t=0 0\r\n"
+ "m=video 3457 RTP/SAVPF 120\r\n"
+ "a=rtpmap:120 VP8/90000\r\n"
+ "a=fmtp:120 x-google-min-bitrate=10; x-google-max-quantization=40\r\n";
+
+ // Deserialize
+ SdpParseError error;
+ EXPECT_TRUE(webrtc::SdpDeserialize(kSdpWithFmtpString, &jdesc_output,
+ &error));
+
+ const ContentInfo* vc = GetFirstVideoContent(jdesc_output.description());
+ ASSERT_TRUE(vc != NULL);
+ const VideoContentDescription* vcd =
+ static_cast<const VideoContentDescription*>(vc->description);
+ ASSERT_FALSE(vcd->codecs().empty());
+ cricket::VideoCodec vp8 = vcd->codecs()[0];
+ EXPECT_EQ("VP8", vp8.name);
+ EXPECT_EQ(120, vp8.id);
+ cricket::CodecParameterMap::iterator found =
+ vp8.params.find("x-google-min-bitrate");
+ ASSERT_TRUE(found != vp8.params.end());
+ EXPECT_EQ(found->second, "10");
+ found = vp8.params.find("x-google-max-quantization");
+ ASSERT_TRUE(found != vp8.params.end());
+ EXPECT_EQ(found->second, "40");
+}
+
+TEST_F(WebRtcSdpTest, SerializeVideoFmtp) {
+ VideoContentDescription* vcd = static_cast<VideoContentDescription*>(
+ GetFirstVideoContent(&desc_)->description);
+
+ cricket::VideoCodecs codecs = vcd->codecs();
+ codecs[0].params["x-google-min-bitrate"] = "10";
+ vcd->set_codecs(codecs);
+
+ ASSERT_TRUE(jdesc_.Initialize(desc_.Copy(),
+ jdesc_.session_id(),
+ jdesc_.session_version()));
+ std::string message = webrtc::SdpSerialize(jdesc_);
+ std::string sdp_with_fmtp = kSdpFullString;
+ InjectAfter("a=rtpmap:120 VP8/90000\r\n",
+ "a=fmtp:120 x-google-min-bitrate=10\r\n",
+ &sdp_with_fmtp);
+ EXPECT_EQ(sdp_with_fmtp, message);
+}
+
+TEST_F(WebRtcSdpTest, DeserializeSdpWithIceLite) {
+ JsepSessionDescription jdesc_with_icelite(kDummyString);
+ std::string sdp_with_icelite = kSdpFullString;
+ EXPECT_TRUE(SdpDeserialize(sdp_with_icelite, &jdesc_with_icelite));
+ cricket::SessionDescription* desc = jdesc_with_icelite.description();
+ const cricket::TransportInfo* tinfo1 =
+ desc->GetTransportInfoByName("audio_content_name");
+ EXPECT_EQ(cricket::ICEMODE_FULL, tinfo1->description.ice_mode);
+ const cricket::TransportInfo* tinfo2 =
+ desc->GetTransportInfoByName("video_content_name");
+ EXPECT_EQ(cricket::ICEMODE_FULL, tinfo2->description.ice_mode);
+ InjectAfter(kSessionTime,
+ "a=ice-lite\r\n",
+ &sdp_with_icelite);
+ EXPECT_TRUE(SdpDeserialize(sdp_with_icelite, &jdesc_with_icelite));
+ desc = jdesc_with_icelite.description();
+ const cricket::TransportInfo* atinfo =
+ desc->GetTransportInfoByName("audio_content_name");
+ EXPECT_EQ(cricket::ICEMODE_LITE, atinfo->description.ice_mode);
+ const cricket::TransportInfo* vtinfo =
+ desc->GetTransportInfoByName("video_content_name");
+ EXPECT_EQ(cricket::ICEMODE_LITE, vtinfo->description.ice_mode);
+}
+
+// Verifies that the candidates in the input SDP are parsed and serialized
+// correctly in the output SDP.
+TEST_F(WebRtcSdpTest, RoundTripSdpWithSctpDataChannelsWithCandidates) {
+ std::string sdp_with_data = kSdpString;
+ sdp_with_data.append(kSdpSctpDataChannelWithCandidatesString);
+ JsepSessionDescription jdesc_output(kDummyString);
+
+ EXPECT_TRUE(SdpDeserialize(sdp_with_data, &jdesc_output));
+ EXPECT_EQ(sdp_with_data, webrtc::SdpSerialize(jdesc_output));
+}
diff --git a/talk/app/webrtc/webrtcsession.cc b/talk/app/webrtc/webrtcsession.cc
new file mode 100644
index 0000000..fee8d42
--- /dev/null
+++ b/talk/app/webrtc/webrtcsession.cc
@@ -0,0 +1,1440 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/webrtcsession.h"
+
+#include <algorithm>
+#include <climits>
+#include <vector>
+
+#include "talk/app/webrtc/jsepicecandidate.h"
+#include "talk/app/webrtc/jsepsessiondescription.h"
+#include "talk/app/webrtc/mediaconstraintsinterface.h"
+#include "talk/app/webrtc/mediastreamsignaling.h"
+#include "talk/app/webrtc/peerconnectioninterface.h"
+#include "talk/base/helpers.h"
+#include "talk/base/logging.h"
+#include "talk/base/stringencode.h"
+#include "talk/media/base/constants.h"
+#include "talk/media/base/videocapturer.h"
+#include "talk/session/media/channel.h"
+#include "talk/session/media/channelmanager.h"
+#include "talk/session/media/mediasession.h"
+
+using cricket::ContentInfo;
+using cricket::ContentInfos;
+using cricket::MediaContentDescription;
+using cricket::SessionDescription;
+using cricket::TransportInfo;
+
+typedef cricket::MediaSessionOptions::Stream Stream;
+typedef cricket::MediaSessionOptions::Streams Streams;
+
+namespace webrtc {
+
+static const uint64 kInitSessionVersion = 2;
+
+const char kInternalConstraintPrefix[] = "internal";
+
+// Supported MediaConstraints.
+// DTLS-SRTP pseudo-constraints.
+const char MediaConstraintsInterface::kEnableDtlsSrtp[] =
+ "DtlsSrtpKeyAgreement";
+// DataChannel pseudo constraints.
+const char MediaConstraintsInterface::kEnableRtpDataChannels[] =
+ "RtpDataChannels";
+// This constraint is for internal use only, representing the Chrome command
+// line flag. So it is prefixed with kInternalConstraintPrefix so JS values
+// will be removed.
+const char MediaConstraintsInterface::kEnableSctpDataChannels[] =
+ "internalSctpDataChannels";
+
+// Arbitrary constant used as prefix for the identity.
+// Chosen to make the certificates more readable.
+const char kWebRTCIdentityPrefix[] = "WebRTC";
+
+// Error messages
+const char kSetLocalSdpFailed[] = "SetLocalDescription failed: ";
+const char kSetRemoteSdpFailed[] = "SetRemoteDescription failed: ";
+const char kCreateChannelFailed[] = "Failed to create channels.";
+const char kInvalidCandidates[] = "Description contains invalid candidates.";
+const char kInvalidSdp[] = "Invalid session description.";
+const char kMlineMismatch[] =
+ "Offer and answer descriptions m-lines are not matching. "
+ "Rejecting answer.";
+const char kSdpWithoutCrypto[] = "Called with a SDP without crypto enabled.";
+const char kSessionError[] = "Session error code: ";
+const char kUpdateStateFailed[] = "Failed to update session state: ";
+const char kPushDownOfferTDFailed[] =
+ "Failed to push down offer transport description.";
+const char kPushDownPranswerTDFailed[] =
+ "Failed to push down pranswer transport description.";
+const char kPushDownAnswerTDFailed[] =
+ "Failed to push down answer transport description.";
+
+// Compares |answer| against |offer|. Comparision is done
+// for number of m-lines in answer against offer. If matches true will be
+// returned otherwise false.
+static bool VerifyMediaDescriptions(
+ const SessionDescription* answer, const SessionDescription* offer) {
+ if (offer->contents().size() != answer->contents().size())
+ return false;
+
+ for (size_t i = 0; i < offer->contents().size(); ++i) {
+ if ((offer->contents()[i].name) != answer->contents()[i].name) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static void CopyCandidatesFromSessionDescription(
+ const SessionDescriptionInterface* source_desc,
+ SessionDescriptionInterface* dest_desc) {
+ if (!source_desc)
+ return;
+ for (size_t m = 0; m < source_desc->number_of_mediasections() &&
+ m < dest_desc->number_of_mediasections(); ++m) {
+ const IceCandidateCollection* source_candidates =
+ source_desc->candidates(m);
+ const IceCandidateCollection* dest_candidates = dest_desc->candidates(m);
+ for (size_t n = 0; n < source_candidates->count(); ++n) {
+ const IceCandidateInterface* new_candidate = source_candidates->at(n);
+ if (!dest_candidates->HasCandidate(new_candidate))
+ dest_desc->AddCandidate(source_candidates->at(n));
+ }
+ }
+}
+
+// Checks that each non-rejected content has SDES crypto keys or a DTLS
+// fingerprint. Mismatches, such as replying with a DTLS fingerprint to SDES
+// keys, will be caught in Transport negotiation, and backstopped by Channel's
+// |secure_required| check.
+static bool VerifyCrypto(const SessionDescription* desc) {
+ if (!desc) {
+ return false;
+ }
+ const ContentInfos& contents = desc->contents();
+ for (size_t index = 0; index < contents.size(); ++index) {
+ const ContentInfo* cinfo = &contents[index];
+ if (cinfo->rejected) {
+ continue;
+ }
+
+ // If the content isn't rejected, crypto must be present.
+ const MediaContentDescription* media =
+ static_cast<const MediaContentDescription*>(cinfo->description);
+ const TransportInfo* tinfo = desc->GetTransportInfoByName(cinfo->name);
+ if (!media || !tinfo) {
+ // Something is not right.
+ LOG(LS_ERROR) << kInvalidSdp;
+ return false;
+ }
+ if (media->cryptos().empty() &&
+ !tinfo->description.identity_fingerprint) {
+ // Crypto must be supplied.
+ LOG(LS_WARNING) << "Session description must have SDES or DTLS-SRTP.";
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool CompareStream(const Stream& stream1, const Stream& stream2) {
+ return (stream1.id < stream2.id);
+}
+
+static bool SameId(const Stream& stream1, const Stream& stream2) {
+ return (stream1.id == stream2.id);
+}
+
+// Checks if each Stream within the |streams| has unique id.
+static bool ValidStreams(const Streams& streams) {
+ Streams sorted_streams = streams;
+ std::sort(sorted_streams.begin(), sorted_streams.end(), CompareStream);
+ Streams::iterator it =
+ std::adjacent_find(sorted_streams.begin(), sorted_streams.end(),
+ SameId);
+ return (it == sorted_streams.end());
+}
+
+static bool GetAudioSsrcByTrackId(
+ const SessionDescription* session_description,
+ const std::string& track_id, uint32 *ssrc) {
+ const cricket::ContentInfo* audio_info =
+ cricket::GetFirstAudioContent(session_description);
+ if (!audio_info) {
+ LOG(LS_ERROR) << "Audio not used in this call";
+ return false;
+ }
+
+ const cricket::MediaContentDescription* audio_content =
+ static_cast<const cricket::MediaContentDescription*>(
+ audio_info->description);
+ cricket::StreamParams stream;
+ if (!cricket::GetStreamByIds(audio_content->streams(), "", track_id,
+ &stream)) {
+ return false;
+ }
+ *ssrc = stream.first_ssrc();
+ return true;
+}
+
+static bool GetTrackIdBySsrc(const SessionDescription* session_description,
+ uint32 ssrc, std::string* track_id) {
+ ASSERT(track_id != NULL);
+
+ cricket::StreamParams stream_out;
+ const cricket::ContentInfo* audio_info =
+ cricket::GetFirstAudioContent(session_description);
+ if (!audio_info) {
+ return false;
+ }
+ const cricket::MediaContentDescription* audio_content =
+ static_cast<const cricket::MediaContentDescription*>(
+ audio_info->description);
+
+ if (cricket::GetStreamBySsrc(audio_content->streams(), ssrc, &stream_out)) {
+ *track_id = stream_out.id;
+ return true;
+ }
+
+ const cricket::ContentInfo* video_info =
+ cricket::GetFirstVideoContent(session_description);
+ if (!video_info) {
+ return false;
+ }
+ const cricket::MediaContentDescription* video_content =
+ static_cast<const cricket::MediaContentDescription*>(
+ video_info->description);
+
+ if (cricket::GetStreamBySsrc(video_content->streams(), ssrc, &stream_out)) {
+ *track_id = stream_out.id;
+ return true;
+ }
+ return false;
+}
+
+static bool BadSdp(const std::string& desc, std::string* err_desc) {
+ if (err_desc) {
+ *err_desc = desc;
+ }
+ LOG(LS_ERROR) << desc;
+ return false;
+}
+
+static bool BadLocalSdp(const std::string& desc, std::string* err_desc) {
+ std::string set_local_sdp_failed = kSetLocalSdpFailed;
+ set_local_sdp_failed.append(desc);
+ return BadSdp(set_local_sdp_failed, err_desc);
+}
+
+static bool BadRemoteSdp(const std::string& desc, std::string* err_desc) {
+ std::string set_remote_sdp_failed = kSetRemoteSdpFailed;
+ set_remote_sdp_failed.append(desc);
+ return BadSdp(set_remote_sdp_failed, err_desc);
+}
+
+static bool BadSdp(cricket::ContentSource source,
+ const std::string& desc, std::string* err_desc) {
+ if (source == cricket::CS_LOCAL) {
+ return BadLocalSdp(desc, err_desc);
+ } else {
+ return BadRemoteSdp(desc, err_desc);
+ }
+}
+
+static std::string SessionErrorMsg(cricket::BaseSession::Error error) {
+ std::ostringstream desc;
+ desc << kSessionError << error;
+ return desc.str();
+}
+
+#define GET_STRING_OF_STATE(state) \
+ case cricket::BaseSession::state: \
+ result = #state; \
+ break;
+
+static std::string GetStateString(cricket::BaseSession::State state) {
+ std::string result;
+ switch (state) {
+ GET_STRING_OF_STATE(STATE_INIT)
+ GET_STRING_OF_STATE(STATE_SENTINITIATE)
+ GET_STRING_OF_STATE(STATE_RECEIVEDINITIATE)
+ GET_STRING_OF_STATE(STATE_SENTPRACCEPT)
+ GET_STRING_OF_STATE(STATE_SENTACCEPT)
+ GET_STRING_OF_STATE(STATE_RECEIVEDPRACCEPT)
+ GET_STRING_OF_STATE(STATE_RECEIVEDACCEPT)
+ GET_STRING_OF_STATE(STATE_SENTMODIFY)
+ GET_STRING_OF_STATE(STATE_RECEIVEDMODIFY)
+ GET_STRING_OF_STATE(STATE_SENTREJECT)
+ GET_STRING_OF_STATE(STATE_RECEIVEDREJECT)
+ GET_STRING_OF_STATE(STATE_SENTREDIRECT)
+ GET_STRING_OF_STATE(STATE_SENTTERMINATE)
+ GET_STRING_OF_STATE(STATE_RECEIVEDTERMINATE)
+ GET_STRING_OF_STATE(STATE_INPROGRESS)
+ GET_STRING_OF_STATE(STATE_DEINIT)
+ default:
+ ASSERT(false);
+ break;
+ }
+ return result;
+}
+
+#define GET_STRING_OF_ERROR(err) \
+ case cricket::BaseSession::err: \
+ result = #err; \
+ break;
+
+static std::string GetErrorString(cricket::BaseSession::Error err) {
+ std::string result;
+ switch (err) {
+ GET_STRING_OF_ERROR(ERROR_NONE)
+ GET_STRING_OF_ERROR(ERROR_TIME)
+ GET_STRING_OF_ERROR(ERROR_RESPONSE)
+ GET_STRING_OF_ERROR(ERROR_NETWORK)
+ GET_STRING_OF_ERROR(ERROR_CONTENT)
+ GET_STRING_OF_ERROR(ERROR_TRANSPORT)
+ default:
+ ASSERT(false);
+ break;
+ }
+ return result;
+}
+
+static bool SetSessionStateFailed(cricket::ContentSource source,
+ cricket::BaseSession::Error err,
+ std::string* err_desc) {
+ std::string set_state_err = kUpdateStateFailed;
+ set_state_err.append(GetErrorString(err));
+ return BadSdp(source, set_state_err, err_desc);
+}
+
+// Help class used to remember if a a remote peer has requested ice restart by
+// by sending a description with new ice ufrag and password.
+class IceRestartAnswerLatch {
+ public:
+ IceRestartAnswerLatch() : ice_restart_(false) { }
+
+ // Returns true if CheckForRemoteIceRestart has been called since last
+ // time this method was called with a new session description where
+ // ice password and ufrag has changed.
+ bool AnswerWithIceRestartLatch() {
+ if (ice_restart_) {
+ ice_restart_ = false;
+ return true;
+ }
+ return false;
+ }
+
+ void CheckForRemoteIceRestart(
+ const SessionDescriptionInterface* old_desc,
+ const SessionDescriptionInterface* new_desc) {
+ if (!old_desc || new_desc->type() != SessionDescriptionInterface::kOffer) {
+ return;
+ }
+ const SessionDescription* new_sd = new_desc->description();
+ const SessionDescription* old_sd = old_desc->description();
+ const ContentInfos& contents = new_sd->contents();
+ for (size_t index = 0; index < contents.size(); ++index) {
+ const ContentInfo* cinfo = &contents[index];
+ if (cinfo->rejected) {
+ continue;
+ }
+ // If the content isn't rejected, check if ufrag and password has
+ // changed.
+ const cricket::TransportDescription* new_transport_desc =
+ new_sd->GetTransportDescriptionByName(cinfo->name);
+ const cricket::TransportDescription* old_transport_desc =
+ old_sd->GetTransportDescriptionByName(cinfo->name);
+ if (!new_transport_desc || !old_transport_desc) {
+ // No transport description exist. This is not an ice restart.
+ continue;
+ }
+ if (new_transport_desc->ice_pwd != old_transport_desc->ice_pwd &&
+ new_transport_desc->ice_ufrag != old_transport_desc->ice_ufrag) {
+ LOG(LS_INFO) << "Remote peer request ice restart.";
+ ice_restart_ = true;
+ break;
+ }
+ }
+ }
+
+ private:
+ bool ice_restart_;
+};
+
+WebRtcSession::WebRtcSession(cricket::ChannelManager* channel_manager,
+ talk_base::Thread* signaling_thread,
+ talk_base::Thread* worker_thread,
+ cricket::PortAllocator* port_allocator,
+ MediaStreamSignaling* mediastream_signaling)
+ : cricket::BaseSession(signaling_thread, worker_thread, port_allocator,
+ talk_base::ToString(talk_base::CreateRandomId64() &
+ LLONG_MAX),
+ cricket::NS_JINGLE_RTP, false),
+ // RFC 3264: The numeric value of the session id and version in the
+ // o line MUST be representable with a "64 bit signed integer".
+ // Due to this constraint session id |sid_| is max limited to LLONG_MAX.
+ channel_manager_(channel_manager),
+ session_desc_factory_(channel_manager, &transport_desc_factory_),
+ mediastream_signaling_(mediastream_signaling),
+ ice_observer_(NULL),
+ ice_connection_state_(PeerConnectionInterface::kIceConnectionNew),
+ // RFC 4566 suggested a Network Time Protocol (NTP) format timestamp
+ // as the session id and session version. To simplify, it should be fine
+ // to just use a random number as session id and start version from
+ // |kInitSessionVersion|.
+ session_version_(kInitSessionVersion),
+ older_version_remote_peer_(false),
+ data_channel_type_(cricket::DCT_NONE),
+ ice_restart_latch_(new IceRestartAnswerLatch) {
+ transport_desc_factory_.set_protocol(cricket::ICEPROTO_HYBRID);
+}
+
+WebRtcSession::~WebRtcSession() {
+ if (voice_channel_.get()) {
+ SignalVoiceChannelDestroyed();
+ channel_manager_->DestroyVoiceChannel(voice_channel_.release());
+ }
+ if (video_channel_.get()) {
+ SignalVideoChannelDestroyed();
+ channel_manager_->DestroyVideoChannel(video_channel_.release());
+ }
+ if (data_channel_.get()) {
+ SignalDataChannelDestroyed();
+ channel_manager_->DestroyDataChannel(data_channel_.release());
+ }
+ for (size_t i = 0; i < saved_candidates_.size(); ++i) {
+ delete saved_candidates_[i];
+ }
+ delete identity();
+ set_identity(NULL);
+ transport_desc_factory_.set_identity(NULL);
+}
+
+bool WebRtcSession::Initialize(const MediaConstraintsInterface* constraints) {
+ // TODO(perkj): Take |constraints| into consideration. Return false if not all
+ // mandatory constraints can be fulfilled. Note that |constraints|
+ // can be null.
+
+ // By default SRTP-SDES is enabled in WebRtc.
+ set_secure_policy(cricket::SEC_REQUIRED);
+
+ // Enable DTLS-SRTP if the constraint is set.
+ bool value;
+ if (FindConstraint(constraints, MediaConstraintsInterface::kEnableDtlsSrtp,
+ &value, NULL) && value) {
+ LOG(LS_INFO) << "DTLS-SRTP enabled; generating identity";
+ std::string identity_name = kWebRTCIdentityPrefix +
+ talk_base::ToString(talk_base::CreateRandomId());
+ transport_desc_factory_.set_identity(talk_base::SSLIdentity::Generate(
+ identity_name));
+ LOG(LS_INFO) << "Finished generating identity";
+ set_identity(transport_desc_factory_.identity());
+ transport_desc_factory_.set_digest_algorithm(talk_base::DIGEST_SHA_256);
+
+ transport_desc_factory_.set_secure(cricket::SEC_ENABLED);
+ }
+
+ // Enable creation of RTP data channels if the kEnableRtpDataChannels is set.
+ // It takes precendence over the kEnableSctpDataChannels constraint.
+ if (FindConstraint(
+ constraints, MediaConstraintsInterface::kEnableRtpDataChannels,
+ &value, NULL) && value) {
+ LOG(LS_INFO) << "Allowing RTP data engine.";
+ data_channel_type_ = cricket::DCT_RTP;
+ } else if (
+ FindConstraint(
+ constraints,
+ MediaConstraintsInterface::kEnableSctpDataChannels,
+ &value, NULL) && value &&
+ // DTLS has to be enabled to use SCTP.
+ (transport_desc_factory_.secure() == cricket::SEC_ENABLED)) {
+ LOG(LS_INFO) << "Allowing SCTP data engine.";
+ data_channel_type_ = cricket::DCT_SCTP;
+ }
+ if (data_channel_type_ != cricket::DCT_NONE) {
+ mediastream_signaling_->SetDataChannelFactory(this);
+ }
+
+ // Make sure SessionDescriptions only contains the StreamParams we negotiate.
+ session_desc_factory_.set_add_legacy_streams(false);
+
+ const cricket::VideoCodec default_codec(
+ JsepSessionDescription::kDefaultVideoCodecId,
+ JsepSessionDescription::kDefaultVideoCodecName,
+ JsepSessionDescription::kMaxVideoCodecWidth,
+ JsepSessionDescription::kMaxVideoCodecHeight,
+ JsepSessionDescription::kDefaultVideoCodecFramerate,
+ JsepSessionDescription::kDefaultVideoCodecPreference);
+ channel_manager_->SetDefaultVideoEncoderConfig(
+ cricket::VideoEncoderConfig(default_codec));
+ return true;
+}
+
+void WebRtcSession::Terminate() {
+ SetState(STATE_RECEIVEDTERMINATE);
+ RemoveUnusedChannelsAndTransports(NULL);
+ ASSERT(voice_channel_.get() == NULL);
+ ASSERT(video_channel_.get() == NULL);
+ ASSERT(data_channel_.get() == NULL);
+}
+
+bool WebRtcSession::StartCandidatesAllocation() {
+ // SpeculativelyConnectTransportChannels, will call ConnectChannels method
+ // from TransportProxy to start gathering ice candidates.
+ SpeculativelyConnectAllTransportChannels();
+ if (!saved_candidates_.empty()) {
+ // If there are saved candidates which arrived before local description is
+ // set, copy those to remote description.
+ CopySavedCandidates(remote_desc_.get());
+ }
+ // Push remote candidates present in remote description to transport channels.
+ UseCandidatesInSessionDescription(remote_desc_.get());
+ return true;
+}
+
+void WebRtcSession::set_secure_policy(
+ cricket::SecureMediaPolicy secure_policy) {
+ session_desc_factory_.set_secure(secure_policy);
+}
+
+SessionDescriptionInterface* WebRtcSession::CreateOffer(
+ const MediaConstraintsInterface* constraints) {
+ cricket::MediaSessionOptions options;
+
+ if (!mediastream_signaling_->GetOptionsForOffer(constraints, &options)) {
+ LOG(LS_ERROR) << "CreateOffer called with invalid constraints.";
+ return NULL;
+ }
+
+ if (!ValidStreams(options.streams)) {
+ LOG(LS_ERROR) << "CreateOffer called with invalid media streams.";
+ return NULL;
+ }
+
+ if (data_channel_type_ == cricket::DCT_SCTP) {
+ options.data_channel_type = cricket::DCT_SCTP;
+ }
+ SessionDescription* desc(
+ session_desc_factory_.CreateOffer(options,
+ BaseSession::local_description()));
+ // RFC 3264
+ // When issuing an offer that modifies the session,
+ // the "o=" line of the new SDP MUST be identical to that in the
+ // previous SDP, except that the version in the origin field MUST
+ // increment by one from the previous SDP.
+
+ // Just increase the version number by one each time when a new offer
+ // is created regardless if it's identical to the previous one or not.
+ // The |session_version_| is a uint64, the wrap around should not happen.
+ ASSERT(session_version_ + 1 > session_version_);
+ JsepSessionDescription* offer(new JsepSessionDescription(
+ JsepSessionDescription::kOffer));
+ if (!offer->Initialize(desc, id(),
+ talk_base::ToString(session_version_++))) {
+ delete offer;
+ return NULL;
+ }
+ if (local_description() && !options.transport_options.ice_restart) {
+ // Include all local ice candidates in the SessionDescription unless
+ // the an ice restart has been requested.
+ CopyCandidatesFromSessionDescription(local_description(), offer);
+ }
+ return offer;
+}
+
+SessionDescriptionInterface* WebRtcSession::CreateAnswer(
+ const MediaConstraintsInterface* constraints) {
+ if (!remote_description()) {
+ LOG(LS_ERROR) << "CreateAnswer can't be called before"
+ << " SetRemoteDescription.";
+ return NULL;
+ }
+ if (remote_description()->type() != JsepSessionDescription::kOffer) {
+ LOG(LS_ERROR) << "CreateAnswer failed because remote_description is not an"
+ << " offer.";
+ return NULL;
+ }
+
+ cricket::MediaSessionOptions options;
+ if (!mediastream_signaling_->GetOptionsForAnswer(constraints, &options)) {
+ LOG(LS_ERROR) << "CreateAnswer called with invalid constraints.";
+ return NULL;
+ }
+ if (!ValidStreams(options.streams)) {
+ LOG(LS_ERROR) << "CreateAnswer called with invalid media streams.";
+ return NULL;
+ }
+ if (data_channel_type_ == cricket::DCT_SCTP) {
+ options.data_channel_type = cricket::DCT_SCTP;
+ }
+ // According to http://tools.ietf.org/html/rfc5245#section-9.2.1.1
+ // an answer should also contain new ice ufrag and password if an offer has
+ // been received with new ufrag and password.
+ options.transport_options.ice_restart =
+ ice_restart_latch_->AnswerWithIceRestartLatch();
+ SessionDescription* desc(
+ session_desc_factory_.CreateAnswer(BaseSession::remote_description(),
+ options,
+ BaseSession::local_description()));
+ // RFC 3264
+ // If the answer is different from the offer in any way (different IP
+ // addresses, ports, etc.), the origin line MUST be different in the answer.
+ // In that case, the version number in the "o=" line of the answer is
+ // unrelated to the version number in the o line of the offer.
+ // Get a new version number by increasing the |session_version_answer_|.
+ // The |session_version_| is a uint64, the wrap around should not happen.
+ ASSERT(session_version_ + 1 > session_version_);
+ JsepSessionDescription* answer(new JsepSessionDescription(
+ JsepSessionDescription::kAnswer));
+ if (!answer->Initialize(desc, id(),
+ talk_base::ToString(session_version_++))) {
+ delete answer;
+ return NULL;
+ }
+ if (local_description() && !options.transport_options.ice_restart) {
+ // Include all local ice candidates in the SessionDescription unless
+ // the remote peer has requested an ice restart.
+ CopyCandidatesFromSessionDescription(local_description(), answer);
+ }
+ return answer;
+}
+
+bool WebRtcSession::SetLocalDescription(SessionDescriptionInterface* desc,
+ std::string* err_desc) {
+ if (error() != cricket::BaseSession::ERROR_NONE) {
+ delete desc;
+ return BadLocalSdp(SessionErrorMsg(error()), err_desc);
+ }
+
+ if (!desc || !desc->description()) {
+ delete desc;
+ return BadLocalSdp(kInvalidSdp, err_desc);
+ }
+ Action action = GetAction(desc->type());
+ if (!ExpectSetLocalDescription(action)) {
+ std::string type = desc->type();
+ delete desc;
+ return BadLocalSdp(BadStateErrMsg(type, state()), err_desc);
+ }
+
+ if (session_desc_factory_.secure() == cricket::SEC_REQUIRED &&
+ !VerifyCrypto(desc->description())) {
+ delete desc;
+ return BadLocalSdp(kSdpWithoutCrypto, err_desc);
+ }
+
+ if (action == kAnswer && !VerifyMediaDescriptions(
+ desc->description(), remote_description()->description())) {
+ return BadLocalSdp(kMlineMismatch, err_desc);
+ }
+
+ // Update the initiator flag if this session is the initiator.
+ if (state() == STATE_INIT && action == kOffer) {
+ set_initiator(true);
+ }
+
+ // Update the MediaContentDescription crypto settings as per the policy set.
+ UpdateSessionDescriptionSecurePolicy(desc->description());
+
+ set_local_description(desc->description()->Copy());
+ local_desc_.reset(desc);
+
+ // Transport and Media channels will be created only when offer is set.
+ if (action == kOffer && !CreateChannels(desc->description())) {
+ // TODO(mallinath) - Handle CreateChannel failure, as new local description
+ // is applied. Restore back to old description.
+ return BadLocalSdp(kCreateChannelFailed, err_desc);
+ }
+
+ // Remove channel and transport proxies, if MediaContentDescription is
+ // rejected.
+ RemoveUnusedChannelsAndTransports(desc->description());
+
+ if (!UpdateSessionState(action, cricket::CS_LOCAL,
+ desc->description(), err_desc)) {
+ return false;
+ }
+ // Kick starting the ice candidates allocation.
+ StartCandidatesAllocation();
+
+ // Update state and SSRC of local MediaStreams and DataChannels based on the
+ // local session description.
+ mediastream_signaling_->OnLocalDescriptionChanged(local_desc_.get());
+
+ if (error() != cricket::BaseSession::ERROR_NONE) {
+ return BadLocalSdp(SessionErrorMsg(error()), err_desc);
+ }
+ return true;
+}
+
+bool WebRtcSession::SetRemoteDescription(SessionDescriptionInterface* desc,
+ std::string* err_desc) {
+ if (error() != cricket::BaseSession::ERROR_NONE) {
+ delete desc;
+ return BadRemoteSdp(SessionErrorMsg(error()), err_desc);
+ }
+
+ if (!desc || !desc->description()) {
+ delete desc;
+ return BadRemoteSdp(kInvalidSdp, err_desc);
+ }
+ Action action = GetAction(desc->type());
+ if (!ExpectSetRemoteDescription(action)) {
+ std::string type = desc->type();
+ delete desc;
+ return BadRemoteSdp(BadStateErrMsg(type, state()), err_desc);
+ }
+
+ if (action == kAnswer && !VerifyMediaDescriptions(
+ desc->description(), local_description()->description())) {
+ return BadRemoteSdp(kMlineMismatch, err_desc);
+ }
+
+ if (session_desc_factory_.secure() == cricket::SEC_REQUIRED &&
+ !VerifyCrypto(desc->description())) {
+ delete desc;
+ return BadRemoteSdp(kSdpWithoutCrypto, err_desc);
+ }
+
+ // Transport and Media channels will be created only when offer is set.
+ if (action == kOffer && !CreateChannels(desc->description())) {
+ // TODO(mallinath) - Handle CreateChannel failure, as new local description
+ // is applied. Restore back to old description.
+ return BadRemoteSdp(kCreateChannelFailed, err_desc);
+ }
+
+ // Remove channel and transport proxies, if MediaContentDescription is
+ // rejected.
+ RemoveUnusedChannelsAndTransports(desc->description());
+
+ // NOTE: Candidates allocation will be initiated only when SetLocalDescription
+ // is called.
+ set_remote_description(desc->description()->Copy());
+ if (!UpdateSessionState(action, cricket::CS_REMOTE,
+ desc->description(), err_desc)) {
+ return false;
+ }
+
+ // Update remote MediaStreams.
+ mediastream_signaling_->OnRemoteDescriptionChanged(desc);
+ if (local_description() && !UseCandidatesInSessionDescription(desc)) {
+ delete desc;
+ return BadRemoteSdp(kInvalidCandidates, err_desc);
+ }
+
+ // Copy all saved candidates.
+ CopySavedCandidates(desc);
+ // We retain all received candidates.
+ CopyCandidatesFromSessionDescription(remote_desc_.get(), desc);
+ // Check if this new SessionDescription contains new ice ufrag and password
+ // that indicates the remote peer requests ice restart.
+ ice_restart_latch_->CheckForRemoteIceRestart(remote_desc_.get(),
+ desc);
+ remote_desc_.reset(desc);
+ if (error() != cricket::BaseSession::ERROR_NONE) {
+ return BadRemoteSdp(SessionErrorMsg(error()), err_desc);
+ }
+ return true;
+}
+
+bool WebRtcSession::UpdateSessionState(
+ Action action, cricket::ContentSource source,
+ const cricket::SessionDescription* desc,
+ std::string* err_desc) {
+ // If there's already a pending error then no state transition should happen.
+ // But all call-sites should be verifying this before calling us!
+ ASSERT(error() == cricket::BaseSession::ERROR_NONE);
+ if (action == kOffer) {
+ if (!PushdownTransportDescription(source, cricket::CA_OFFER)) {
+ return BadSdp(source, kPushDownOfferTDFailed, err_desc);
+ }
+ SetState(source == cricket::CS_LOCAL ?
+ STATE_SENTINITIATE : STATE_RECEIVEDINITIATE);
+ if (error() != cricket::BaseSession::ERROR_NONE) {
+ return SetSessionStateFailed(source, error(), err_desc);
+ }
+ } else if (action == kPrAnswer) {
+ if (!PushdownTransportDescription(source, cricket::CA_PRANSWER)) {
+ return BadSdp(source, kPushDownPranswerTDFailed, err_desc);
+ }
+ EnableChannels();
+ SetState(source == cricket::CS_LOCAL ?
+ STATE_SENTPRACCEPT : STATE_RECEIVEDPRACCEPT);
+ if (error() != cricket::BaseSession::ERROR_NONE) {
+ return SetSessionStateFailed(source, error(), err_desc);
+ }
+ } else if (action == kAnswer) {
+ if (!PushdownTransportDescription(source, cricket::CA_ANSWER)) {
+ return BadSdp(source, kPushDownAnswerTDFailed, err_desc);
+ }
+ MaybeEnableMuxingSupport();
+ EnableChannels();
+ SetState(source == cricket::CS_LOCAL ?
+ STATE_SENTACCEPT : STATE_RECEIVEDACCEPT);
+ if (error() != cricket::BaseSession::ERROR_NONE) {
+ return SetSessionStateFailed(source, error(), err_desc);
+ }
+ }
+ return true;
+}
+
+WebRtcSession::Action WebRtcSession::GetAction(const std::string& type) {
+ if (type == SessionDescriptionInterface::kOffer) {
+ return WebRtcSession::kOffer;
+ } else if (type == SessionDescriptionInterface::kPrAnswer) {
+ return WebRtcSession::kPrAnswer;
+ } else if (type == SessionDescriptionInterface::kAnswer) {
+ return WebRtcSession::kAnswer;
+ }
+ ASSERT(false && "unknown action type");
+ return WebRtcSession::kOffer;
+}
+
+bool WebRtcSession::ProcessIceMessage(const IceCandidateInterface* candidate) {
+ if (state() == STATE_INIT) {
+ LOG(LS_ERROR) << "ProcessIceMessage: ICE candidates can't be added "
+ << "without any offer (local or remote) "
+ << "session description.";
+ return false;
+ }
+
+ if (!candidate) {
+ LOG(LS_ERROR) << "ProcessIceMessage: Candidate is NULL";
+ return false;
+ }
+
+ if (!local_description() || !remote_description()) {
+ LOG(LS_INFO) << "ProcessIceMessage: Remote description not set, "
+ << "save the candidate for later use.";
+ saved_candidates_.push_back(
+ new JsepIceCandidate(candidate->sdp_mid(), candidate->sdp_mline_index(),
+ candidate->candidate()));
+ return true;
+ }
+
+ // Add this candidate to the remote session description.
+ if (!remote_desc_->AddCandidate(candidate)) {
+ LOG(LS_ERROR) << "ProcessIceMessage: Candidate cannot be used";
+ return false;
+ }
+
+ return UseCandidatesInSessionDescription(remote_desc_.get());
+}
+
+bool WebRtcSession::GetTrackIdBySsrc(uint32 ssrc, std::string* id) {
+ if (GetLocalTrackId(ssrc, id)) {
+ if (GetRemoteTrackId(ssrc, id)) {
+ LOG(LS_WARNING) << "SSRC " << ssrc
+ << " exists in both local and remote descriptions";
+ return true; // We return the remote track id.
+ }
+ return true;
+ } else {
+ return GetRemoteTrackId(ssrc, id);
+ }
+}
+
+bool WebRtcSession::GetLocalTrackId(uint32 ssrc, std::string* track_id) {
+ if (!BaseSession::local_description())
+ return false;
+ return webrtc::GetTrackIdBySsrc(
+ BaseSession::local_description(), ssrc, track_id);
+}
+
+bool WebRtcSession::GetRemoteTrackId(uint32 ssrc, std::string* track_id) {
+ if (!BaseSession::remote_description())
+ return false;
+ return webrtc::GetTrackIdBySsrc(
+ BaseSession::remote_description(), ssrc, track_id);
+}
+
+std::string WebRtcSession::BadStateErrMsg(
+ const std::string& type, State state) {
+ std::ostringstream desc;
+ desc << "Called with type in wrong state, "
+ << "type: " << type << " state: " << GetStateString(state);
+ return desc.str();
+}
+
+void WebRtcSession::SetAudioPlayout(uint32 ssrc, bool enable) {
+ ASSERT(signaling_thread()->IsCurrent());
+ if (!voice_channel_) {
+ LOG(LS_ERROR) << "SetAudioPlayout: No audio channel exists.";
+ return;
+ }
+ if (!voice_channel_->SetOutputScaling(ssrc, enable ? 1 : 0, enable ? 1 : 0)) {
+ // Allow that SetOutputScaling fail if |enable| is false but assert
+ // otherwise. This in the normal case when the underlying media channel has
+ // already been deleted.
+ ASSERT(enable == false);
+ }
+}
+
+void WebRtcSession::SetAudioSend(uint32 ssrc, bool enable,
+ const cricket::AudioOptions& options) {
+ ASSERT(signaling_thread()->IsCurrent());
+ if (!voice_channel_) {
+ LOG(LS_ERROR) << "SetAudioSend: No audio channel exists.";
+ return;
+ }
+ if (!voice_channel_->MuteStream(ssrc, !enable)) {
+ // Allow that MuteStream fail if |enable| is false but assert otherwise.
+ // This in the normal case when the underlying media channel has already
+ // been deleted.
+ ASSERT(enable == false);
+ return;
+ }
+ if (enable)
+ voice_channel_->SetChannelOptions(options);
+}
+
+bool WebRtcSession::SetAudioRenderer(uint32 ssrc,
+ cricket::AudioRenderer* renderer) {
+ if (!voice_channel_) {
+ LOG(LS_ERROR) << "SetAudioRenderer: No audio channel exists.";
+ return false;
+ }
+
+ if (!voice_channel_->SetRenderer(ssrc, renderer)) {
+ // SetRenderer() can fail if the ssrc is not mapping to the playout channel.
+ LOG(LS_ERROR) << "SetAudioRenderer: ssrc is incorrect: " << ssrc;
+ return false;
+ }
+
+ return true;
+}
+
+bool WebRtcSession::SetCaptureDevice(uint32 ssrc,
+ cricket::VideoCapturer* camera) {
+ ASSERT(signaling_thread()->IsCurrent());
+
+ if (!video_channel_.get()) {
+ // |video_channel_| doesnt't exist. Probably because the remote end doesnt't
+ // support video.
+ LOG(LS_WARNING) << "Video not used in this call.";
+ return false;
+ }
+ if (!video_channel_->SetCapturer(ssrc, camera)) {
+ // Allow that SetCapturer fail if |camera| is NULL but assert otherwise.
+ // This in the normal case when the underlying media channel has already
+ // been deleted.
+ ASSERT(camera == NULL);
+ return false;
+ }
+ return true;
+}
+
+void WebRtcSession::SetVideoPlayout(uint32 ssrc,
+ bool enable,
+ cricket::VideoRenderer* renderer) {
+ ASSERT(signaling_thread()->IsCurrent());
+ if (!video_channel_) {
+ LOG(LS_WARNING) << "SetVideoPlayout: No video channel exists.";
+ return;
+ }
+ if (!video_channel_->SetRenderer(ssrc, enable ? renderer : NULL)) {
+ // Allow that SetRenderer fail if |renderer| is NULL but assert otherwise.
+ // This in the normal case when the underlying media channel has already
+ // been deleted.
+ ASSERT(renderer == NULL);
+ }
+}
+
+void WebRtcSession::SetVideoSend(uint32 ssrc, bool enable,
+ const cricket::VideoOptions* options) {
+ ASSERT(signaling_thread()->IsCurrent());
+ if (!video_channel_) {
+ LOG(LS_WARNING) << "SetVideoSend: No video channel exists.";
+ return;
+ }
+ if (!video_channel_->MuteStream(ssrc, !enable)) {
+ // Allow that MuteStream fail if |enable| is false but assert otherwise.
+ // This in the normal case when the underlying media channel has already
+ // been deleted.
+ ASSERT(enable == false);
+ return;
+ }
+ if (enable && options)
+ video_channel_->SetChannelOptions(*options);
+}
+
+bool WebRtcSession::CanInsertDtmf(const std::string& track_id) {
+ ASSERT(signaling_thread()->IsCurrent());
+ if (!voice_channel_) {
+ LOG(LS_ERROR) << "CanInsertDtmf: No audio channel exists.";
+ return false;
+ }
+ uint32 send_ssrc = 0;
+ // The Dtmf is negotiated per channel not ssrc, so we only check if the ssrc
+ // exists.
+ if (!GetAudioSsrcByTrackId(BaseSession::local_description(), track_id,
+ &send_ssrc)) {
+ LOG(LS_ERROR) << "CanInsertDtmf: Track does not exist: " << track_id;
+ return false;
+ }
+ return voice_channel_->CanInsertDtmf();
+}
+
+bool WebRtcSession::InsertDtmf(const std::string& track_id,
+ int code, int duration) {
+ ASSERT(signaling_thread()->IsCurrent());
+ if (!voice_channel_) {
+ LOG(LS_ERROR) << "InsertDtmf: No audio channel exists.";
+ return false;
+ }
+ uint32 send_ssrc = 0;
+ if (!VERIFY(GetAudioSsrcByTrackId(BaseSession::local_description(),
+ track_id, &send_ssrc))) {
+ LOG(LS_ERROR) << "InsertDtmf: Track does not exist: " << track_id;
+ return false;
+ }
+ if (!voice_channel_->InsertDtmf(send_ssrc, code, duration,
+ cricket::DF_SEND)) {
+ LOG(LS_ERROR) << "Failed to insert DTMF to channel.";
+ return false;
+ }
+ return true;
+}
+
+sigslot::signal0<>* WebRtcSession::GetOnDestroyedSignal() {
+ return &SignalVoiceChannelDestroyed;
+}
+
+talk_base::scoped_refptr<DataChannel> WebRtcSession::CreateDataChannel(
+ const std::string& label,
+ const DataChannelInit* config) {
+ if (state() == STATE_RECEIVEDTERMINATE) {
+ return NULL;
+ }
+ if (data_channel_type_ == cricket::DCT_NONE) {
+ LOG(LS_ERROR) << "CreateDataChannel: Data is not supported in this call.";
+ return NULL;
+ }
+ DataChannelInit new_config = config ? (*config) : DataChannelInit();
+
+ if (data_channel_type_ == cricket::DCT_SCTP) {
+ if (new_config.id < 0) {
+ if (!mediastream_signaling_->AllocateSctpId(&new_config.id)) {
+ LOG(LS_ERROR) << "No id can be allocated for the SCTP data channel.";
+ return NULL;
+ }
+ } else if (!mediastream_signaling_->IsSctpIdAvailable(new_config.id)) {
+ LOG(LS_ERROR) << "Failed to create a SCTP data channel "
+ << "because the id is already in use or out of range.";
+ return NULL;
+ }
+ }
+ talk_base::scoped_refptr<DataChannel> channel(
+ DataChannel::Create(this, label, &new_config));
+ if (channel == NULL)
+ return NULL;
+ if (!mediastream_signaling_->AddDataChannel(channel))
+ return NULL;
+ return channel;
+}
+
+cricket::DataChannelType WebRtcSession::data_channel_type() const {
+ return data_channel_type_;
+}
+
+void WebRtcSession::SetIceConnectionState(
+ PeerConnectionInterface::IceConnectionState state) {
+ if (ice_connection_state_ == state) {
+ return;
+ }
+
+ // ASSERT that the requested transition is allowed. Note that
+ // WebRtcSession does not implement "kIceConnectionClosed" (that is handled
+ // within PeerConnection). This switch statement should compile away when
+ // ASSERTs are disabled.
+ switch (ice_connection_state_) {
+ case PeerConnectionInterface::kIceConnectionNew:
+ ASSERT(state == PeerConnectionInterface::kIceConnectionChecking);
+ break;
+ case PeerConnectionInterface::kIceConnectionChecking:
+ ASSERT(state == PeerConnectionInterface::kIceConnectionFailed ||
+ state == PeerConnectionInterface::kIceConnectionConnected);
+ break;
+ case PeerConnectionInterface::kIceConnectionConnected:
+ ASSERT(state == PeerConnectionInterface::kIceConnectionDisconnected ||
+ state == PeerConnectionInterface::kIceConnectionChecking ||
+ state == PeerConnectionInterface::kIceConnectionCompleted);
+ break;
+ case PeerConnectionInterface::kIceConnectionCompleted:
+ ASSERT(state == PeerConnectionInterface::kIceConnectionConnected ||
+ state == PeerConnectionInterface::kIceConnectionDisconnected);
+ break;
+ case PeerConnectionInterface::kIceConnectionFailed:
+ ASSERT(state == PeerConnectionInterface::kIceConnectionNew);
+ break;
+ case PeerConnectionInterface::kIceConnectionDisconnected:
+ ASSERT(state == PeerConnectionInterface::kIceConnectionChecking ||
+ state == PeerConnectionInterface::kIceConnectionConnected ||
+ state == PeerConnectionInterface::kIceConnectionCompleted ||
+ state == PeerConnectionInterface::kIceConnectionFailed);
+ break;
+ case PeerConnectionInterface::kIceConnectionClosed:
+ ASSERT(false);
+ break;
+ default:
+ ASSERT(false);
+ break;
+ }
+
+ ice_connection_state_ = state;
+ if (ice_observer_) {
+ ice_observer_->OnIceConnectionChange(ice_connection_state_);
+ }
+}
+
+void WebRtcSession::OnTransportRequestSignaling(
+ cricket::Transport* transport) {
+ ASSERT(signaling_thread()->IsCurrent());
+ transport->OnSignalingReady();
+ if (ice_observer_) {
+ ice_observer_->OnIceGatheringChange(
+ PeerConnectionInterface::kIceGatheringGathering);
+ }
+}
+
+void WebRtcSession::OnTransportConnecting(cricket::Transport* transport) {
+ ASSERT(signaling_thread()->IsCurrent());
+ // start monitoring for the write state of the transport.
+ OnTransportWritable(transport);
+}
+
+void WebRtcSession::OnTransportWritable(cricket::Transport* transport) {
+ ASSERT(signaling_thread()->IsCurrent());
+ // TODO(bemasc): Expose more API from Transport to detect when
+ // candidate selection starts or stops, due to success or failure.
+ if (transport->all_channels_writable()) {
+ if (ice_connection_state_ ==
+ PeerConnectionInterface::kIceConnectionChecking ||
+ ice_connection_state_ ==
+ PeerConnectionInterface::kIceConnectionDisconnected) {
+ SetIceConnectionState(PeerConnectionInterface::kIceConnectionConnected);
+ }
+ } else if (transport->HasChannels()) {
+ // If the current state is Connected or Completed, then there were writable
+ // channels but now there are not, so the next state must be Disconnected.
+ if (ice_connection_state_ ==
+ PeerConnectionInterface::kIceConnectionConnected ||
+ ice_connection_state_ ==
+ PeerConnectionInterface::kIceConnectionCompleted) {
+ SetIceConnectionState(
+ PeerConnectionInterface::kIceConnectionDisconnected);
+ }
+ }
+}
+
+void WebRtcSession::OnTransportProxyCandidatesReady(
+ cricket::TransportProxy* proxy, const cricket::Candidates& candidates) {
+ ASSERT(signaling_thread()->IsCurrent());
+ ProcessNewLocalCandidate(proxy->content_name(), candidates);
+}
+
+bool WebRtcSession::ExpectSetLocalDescription(Action action) {
+ return ((action == kOffer && state() == STATE_INIT) ||
+ // update local offer
+ (action == kOffer && state() == STATE_SENTINITIATE) ||
+ // update the current ongoing session.
+ (action == kOffer && state() == STATE_RECEIVEDACCEPT) ||
+ (action == kOffer && state() == STATE_SENTACCEPT) ||
+ (action == kOffer && state() == STATE_INPROGRESS) ||
+ // accept remote offer
+ (action == kAnswer && state() == STATE_RECEIVEDINITIATE) ||
+ (action == kAnswer && state() == STATE_SENTPRACCEPT) ||
+ (action == kPrAnswer && state() == STATE_RECEIVEDINITIATE) ||
+ (action == kPrAnswer && state() == STATE_SENTPRACCEPT));
+}
+
+bool WebRtcSession::ExpectSetRemoteDescription(Action action) {
+ return ((action == kOffer && state() == STATE_INIT) ||
+ // update remote offer
+ (action == kOffer && state() == STATE_RECEIVEDINITIATE) ||
+ // update the current ongoing session
+ (action == kOffer && state() == STATE_RECEIVEDACCEPT) ||
+ (action == kOffer && state() == STATE_SENTACCEPT) ||
+ (action == kOffer && state() == STATE_INPROGRESS) ||
+ // accept local offer
+ (action == kAnswer && state() == STATE_SENTINITIATE) ||
+ (action == kAnswer && state() == STATE_RECEIVEDPRACCEPT) ||
+ (action == kPrAnswer && state() == STATE_SENTINITIATE) ||
+ (action == kPrAnswer && state() == STATE_RECEIVEDPRACCEPT));
+}
+
+void WebRtcSession::OnCandidatesAllocationDone() {
+ ASSERT(signaling_thread()->IsCurrent());
+ if (ice_observer_) {
+ ice_observer_->OnIceGatheringChange(
+ PeerConnectionInterface::kIceGatheringComplete);
+ ice_observer_->OnIceComplete();
+ }
+}
+
+// Enabling voice and video channel.
+void WebRtcSession::EnableChannels() {
+ if (voice_channel_ && !voice_channel_->enabled())
+ voice_channel_->Enable(true);
+
+ if (video_channel_ && !video_channel_->enabled())
+ video_channel_->Enable(true);
+
+ if (data_channel_.get() && !data_channel_->enabled())
+ data_channel_->Enable(true);
+}
+
+void WebRtcSession::ProcessNewLocalCandidate(
+ const std::string& content_name,
+ const cricket::Candidates& candidates) {
+ int sdp_mline_index;
+ if (!GetLocalCandidateMediaIndex(content_name, &sdp_mline_index)) {
+ LOG(LS_ERROR) << "ProcessNewLocalCandidate: content name "
+ << content_name << " not found";
+ return;
+ }
+
+ for (cricket::Candidates::const_iterator citer = candidates.begin();
+ citer != candidates.end(); ++citer) {
+ // Use content_name as the candidate media id.
+ JsepIceCandidate candidate(content_name, sdp_mline_index, *citer);
+ if (ice_observer_) {
+ ice_observer_->OnIceCandidate(&candidate);
+ }
+ if (local_desc_) {
+ local_desc_->AddCandidate(&candidate);
+ }
+ }
+}
+
+// Returns the media index for a local ice candidate given the content name.
+bool WebRtcSession::GetLocalCandidateMediaIndex(const std::string& content_name,
+ int* sdp_mline_index) {
+ if (!BaseSession::local_description() || !sdp_mline_index)
+ return false;
+
+ bool content_found = false;
+ const ContentInfos& contents = BaseSession::local_description()->contents();
+ for (size_t index = 0; index < contents.size(); ++index) {
+ if (contents[index].name == content_name) {
+ *sdp_mline_index = index;
+ content_found = true;
+ break;
+ }
+ }
+ return content_found;
+}
+
+bool WebRtcSession::UseCandidatesInSessionDescription(
+ const SessionDescriptionInterface* remote_desc) {
+ if (!remote_desc)
+ return true;
+ bool ret = true;
+ for (size_t m = 0; m < remote_desc->number_of_mediasections(); ++m) {
+ const IceCandidateCollection* candidates = remote_desc->candidates(m);
+ for (size_t n = 0; n < candidates->count(); ++n) {
+ ret = UseCandidate(candidates->at(n));
+ if (!ret)
+ break;
+ }
+ }
+ return ret;
+}
+
+bool WebRtcSession::UseCandidate(
+ const IceCandidateInterface* candidate) {
+
+ size_t mediacontent_index = static_cast<size_t>(candidate->sdp_mline_index());
+ size_t remote_content_size =
+ BaseSession::remote_description()->contents().size();
+ if (mediacontent_index >= remote_content_size) {
+ LOG(LS_ERROR)
+ << "UseRemoteCandidateInSession: Invalid candidate media index.";
+ return false;
+ }
+
+ cricket::ContentInfo content =
+ BaseSession::remote_description()->contents()[mediacontent_index];
+ std::vector<cricket::Candidate> candidates;
+ candidates.push_back(candidate->candidate());
+ // Invoking BaseSession method to handle remote candidates.
+ std::string error;
+ if (OnRemoteCandidates(content.name, candidates, &error)) {
+ // Candidates successfully submitted for checking.
+ if (ice_connection_state_ == PeerConnectionInterface::kIceConnectionNew ||
+ ice_connection_state_ ==
+ PeerConnectionInterface::kIceConnectionDisconnected) {
+ // If state is New, then the session has just gotten its first remote ICE
+ // candidates, so go to Checking.
+ // If state is Disconnected, the session is re-using old candidates or
+ // receiving additional ones, so go to Checking.
+ // If state is Connected, stay Connected.
+ // TODO(bemasc): If state is Connected, and the new candidates are for a
+ // newly added transport, then the state actually _should_ move to
+ // checking. Add a way to distinguish that case.
+ SetIceConnectionState(PeerConnectionInterface::kIceConnectionChecking);
+ }
+ // TODO(bemasc): If state is Completed, go back to Connected.
+ } else {
+ LOG(LS_WARNING) << error;
+ }
+ return true;
+}
+
+void WebRtcSession::RemoveUnusedChannelsAndTransports(
+ const SessionDescription* desc) {
+ const cricket::ContentInfo* voice_info =
+ cricket::GetFirstAudioContent(desc);
+ if ((!voice_info || voice_info->rejected) && voice_channel_) {
+ mediastream_signaling_->OnAudioChannelClose();
+ SignalVoiceChannelDestroyed();
+ const std::string content_name = voice_channel_->content_name();
+ channel_manager_->DestroyVoiceChannel(voice_channel_.release());
+ DestroyTransportProxy(content_name);
+ }
+
+ const cricket::ContentInfo* video_info =
+ cricket::GetFirstVideoContent(desc);
+ if ((!video_info || video_info->rejected) && video_channel_) {
+ mediastream_signaling_->OnVideoChannelClose();
+ SignalVideoChannelDestroyed();
+ const std::string content_name = video_channel_->content_name();
+ channel_manager_->DestroyVideoChannel(video_channel_.release());
+ DestroyTransportProxy(content_name);
+ }
+
+ const cricket::ContentInfo* data_info =
+ cricket::GetFirstDataContent(desc);
+ if ((!data_info || data_info->rejected) && data_channel_) {
+ mediastream_signaling_->OnDataChannelClose();
+ SignalDataChannelDestroyed();
+ const std::string content_name = data_channel_->content_name();
+ channel_manager_->DestroyDataChannel(data_channel_.release());
+ DestroyTransportProxy(content_name);
+ }
+}
+
+bool WebRtcSession::CreateChannels(const SessionDescription* desc) {
+ // Disabling the BUNDLE flag in PortAllocator if offer disabled it.
+ if (state() == STATE_INIT && !desc->HasGroup(cricket::GROUP_TYPE_BUNDLE)) {
+ port_allocator()->set_flags(port_allocator()->flags() &
+ ~cricket::PORTALLOCATOR_ENABLE_BUNDLE);
+ }
+
+ // Creating the media channels and transport proxies.
+ const cricket::ContentInfo* voice = cricket::GetFirstAudioContent(desc);
+ if (voice && !voice->rejected && !voice_channel_) {
+ if (!CreateVoiceChannel(desc)) {
+ LOG(LS_ERROR) << "Failed to create voice channel.";
+ return false;
+ }
+ }
+
+ const cricket::ContentInfo* video = cricket::GetFirstVideoContent(desc);
+ if (video && !video->rejected && !video_channel_) {
+ if (!CreateVideoChannel(desc)) {
+ LOG(LS_ERROR) << "Failed to create video channel.";
+ return false;
+ }
+ }
+
+ const cricket::ContentInfo* data = cricket::GetFirstDataContent(desc);
+ if (data_channel_type_ != cricket::DCT_NONE &&
+ data && !data->rejected && !data_channel_.get()) {
+ if (!CreateDataChannel(desc)) {
+ LOG(LS_ERROR) << "Failed to create data channel.";
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool WebRtcSession::CreateVoiceChannel(const SessionDescription* desc) {
+ const cricket::ContentInfo* voice = cricket::GetFirstAudioContent(desc);
+ voice_channel_.reset(channel_manager_->CreateVoiceChannel(
+ this, voice->name, true));
+ return voice_channel_ ? true : false;
+}
+
+bool WebRtcSession::CreateVideoChannel(const SessionDescription* desc) {
+ const cricket::ContentInfo* video = cricket::GetFirstVideoContent(desc);
+ video_channel_.reset(channel_manager_->CreateVideoChannel(
+ this, video->name, true, voice_channel_.get()));
+ return video_channel_ ? true : false;
+}
+
+bool WebRtcSession::CreateDataChannel(const SessionDescription* desc) {
+ const cricket::ContentInfo* data = cricket::GetFirstDataContent(desc);
+ bool rtcp = (data_channel_type_ == cricket::DCT_RTP);
+ data_channel_.reset(channel_manager_->CreateDataChannel(
+ this, data->name, rtcp, data_channel_type_));
+ if (!data_channel_.get()) {
+ return false;
+ }
+ return true;
+}
+
+void WebRtcSession::CopySavedCandidates(
+ SessionDescriptionInterface* dest_desc) {
+ if (!dest_desc) {
+ ASSERT(false);
+ return;
+ }
+ for (size_t i = 0; i < saved_candidates_.size(); ++i) {
+ dest_desc->AddCandidate(saved_candidates_[i]);
+ delete saved_candidates_[i];
+ }
+ saved_candidates_.clear();
+}
+
+void WebRtcSession::UpdateSessionDescriptionSecurePolicy(
+ SessionDescription* sdesc) {
+ if (!sdesc) {
+ return;
+ }
+
+ // Updating the |crypto_required_| in MediaContentDescription to the
+ // appropriate state based on the current security policy.
+ for (cricket::ContentInfos::iterator iter = sdesc->contents().begin();
+ iter != sdesc->contents().end(); ++iter) {
+ if (cricket::IsMediaContent(&*iter)) {
+ MediaContentDescription* mdesc =
+ static_cast<MediaContentDescription*> (iter->description);
+ if (mdesc) {
+ mdesc->set_crypto_required(
+ session_desc_factory_.secure() == cricket::SEC_REQUIRED);
+ }
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/talk/app/webrtc/webrtcsession.h b/talk/app/webrtc/webrtcsession.h
new file mode 100644
index 0000000..045d347
--- /dev/null
+++ b/talk/app/webrtc/webrtcsession.h
@@ -0,0 +1,295 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_APP_WEBRTC_WEBRTCSESSION_H_
+#define TALK_APP_WEBRTC_WEBRTCSESSION_H_
+
+#include <string>
+
+#include "talk/app/webrtc/peerconnectioninterface.h"
+#include "talk/app/webrtc/dtmfsender.h"
+#include "talk/app/webrtc/mediastreamprovider.h"
+#include "talk/app/webrtc/datachannel.h"
+#include "talk/app/webrtc/statstypes.h"
+#include "talk/base/sigslot.h"
+#include "talk/base/thread.h"
+#include "talk/media/base/mediachannel.h"
+#include "talk/p2p/base/session.h"
+#include "talk/p2p/base/transportdescriptionfactory.h"
+#include "talk/session/media/mediasession.h"
+
+namespace cricket {
+
+class ChannelManager;
+class DataChannel;
+class StatsReport;
+class Transport;
+class VideoCapturer;
+class BaseChannel;
+class VideoChannel;
+class VoiceChannel;
+
+} // namespace cricket
+
+namespace webrtc {
+
+class IceRestartAnswerLatch;
+class MediaStreamSignaling;
+
+extern const char kSetLocalSdpFailed[];
+extern const char kSetRemoteSdpFailed[];
+extern const char kCreateChannelFailed[];
+extern const char kInvalidCandidates[];
+extern const char kInvalidSdp[];
+extern const char kMlineMismatch[];
+extern const char kSdpWithoutCrypto[];
+extern const char kSessionError[];
+extern const char kUpdateStateFailed[];
+extern const char kPushDownOfferTDFailed[];
+extern const char kPushDownPranswerTDFailed[];
+extern const char kPushDownAnswerTDFailed[];
+
+// ICE state callback interface.
+class IceObserver {
+ public:
+ // Called any time the IceConnectionState changes
+ virtual void OnIceConnectionChange(
+ PeerConnectionInterface::IceConnectionState new_state) {}
+ // Called any time the IceGatheringState changes
+ virtual void OnIceGatheringChange(
+ PeerConnectionInterface::IceGatheringState new_state) {}
+ // New Ice candidate have been found.
+ virtual void OnIceCandidate(const IceCandidateInterface* candidate) = 0;
+ // All Ice candidates have been found.
+ // TODO(bemasc): Remove this once callers transition to OnIceGatheringChange.
+ // (via PeerConnectionObserver)
+ virtual void OnIceComplete() {}
+
+ protected:
+ ~IceObserver() {}
+};
+
+class WebRtcSession : public cricket::BaseSession,
+ public AudioProviderInterface,
+ public DataChannelFactory,
+ public VideoProviderInterface,
+ public DtmfProviderInterface {
+ public:
+ WebRtcSession(cricket::ChannelManager* channel_manager,
+ talk_base::Thread* signaling_thread,
+ talk_base::Thread* worker_thread,
+ cricket::PortAllocator* port_allocator,
+ MediaStreamSignaling* mediastream_signaling);
+ virtual ~WebRtcSession();
+
+ bool Initialize(const MediaConstraintsInterface* constraints);
+ // Deletes the voice, video and data channel and changes the session state
+ // to STATE_RECEIVEDTERMINATE.
+ void Terminate();
+
+ void RegisterIceObserver(IceObserver* observer) {
+ ice_observer_ = observer;
+ }
+
+ virtual cricket::VoiceChannel* voice_channel() {
+ return voice_channel_.get();
+ }
+ virtual cricket::VideoChannel* video_channel() {
+ return video_channel_.get();
+ }
+ virtual cricket::DataChannel* data_channel() {
+ return data_channel_.get();
+ }
+
+ void set_secure_policy(cricket::SecureMediaPolicy secure_policy);
+ cricket::SecureMediaPolicy secure_policy() const {
+ return session_desc_factory_.secure();
+ }
+
+ // Generic error message callback from WebRtcSession.
+ // TODO - It may be necessary to supply error code as well.
+ sigslot::signal0<> SignalError;
+
+ SessionDescriptionInterface* CreateOffer(
+ const MediaConstraintsInterface* constraints);
+
+ SessionDescriptionInterface* CreateAnswer(
+ const MediaConstraintsInterface* constraints);
+
+ bool SetLocalDescription(SessionDescriptionInterface* desc,
+ std::string* err_desc);
+ bool SetRemoteDescription(SessionDescriptionInterface* desc,
+ std::string* err_desc);
+ bool ProcessIceMessage(const IceCandidateInterface* ice_candidate);
+ const SessionDescriptionInterface* local_description() const {
+ return local_desc_.get();
+ }
+ const SessionDescriptionInterface* remote_description() const {
+ return remote_desc_.get();
+ }
+
+ // Get the id used as a media stream track's "id" field from ssrc.
+ virtual bool GetTrackIdBySsrc(uint32 ssrc, std::string* id);
+
+ // AudioMediaProviderInterface implementation.
+ virtual void SetAudioPlayout(uint32 ssrc, bool enable) OVERRIDE;
+ virtual void SetAudioSend(uint32 ssrc, bool enable,
+ const cricket::AudioOptions& options) OVERRIDE;
+ virtual bool SetAudioRenderer(uint32 ssrc,
+ cricket::AudioRenderer* renderer) OVERRIDE;
+
+ // Implements VideoMediaProviderInterface.
+ virtual bool SetCaptureDevice(uint32 ssrc,
+ cricket::VideoCapturer* camera) OVERRIDE;
+ virtual void SetVideoPlayout(uint32 ssrc,
+ bool enable,
+ cricket::VideoRenderer* renderer) OVERRIDE;
+ virtual void SetVideoSend(uint32 ssrc, bool enable,
+ const cricket::VideoOptions* options) OVERRIDE;
+
+ // Implements DtmfProviderInterface.
+ virtual bool CanInsertDtmf(const std::string& track_id);
+ virtual bool InsertDtmf(const std::string& track_id,
+ int code, int duration);
+ virtual sigslot::signal0<>* GetOnDestroyedSignal();
+
+ talk_base::scoped_refptr<DataChannel> CreateDataChannel(
+ const std::string& label,
+ const DataChannelInit* config);
+
+ cricket::DataChannelType data_channel_type() const;
+
+ private:
+ // Indicates the type of SessionDescription in a call to SetLocalDescription
+ // and SetRemoteDescription.
+ enum Action {
+ kOffer,
+ kPrAnswer,
+ kAnswer,
+ };
+ // Invokes ConnectChannels() on transport proxies, which initiates ice
+ // candidates allocation.
+ bool StartCandidatesAllocation();
+ bool UpdateSessionState(Action action, cricket::ContentSource source,
+ const cricket::SessionDescription* desc,
+ std::string* err_desc);
+ static Action GetAction(const std::string& type);
+
+ // Transport related callbacks, override from cricket::BaseSession.
+ virtual void OnTransportRequestSignaling(cricket::Transport* transport);
+ virtual void OnTransportConnecting(cricket::Transport* transport);
+ virtual void OnTransportWritable(cricket::Transport* transport);
+ virtual void OnTransportProxyCandidatesReady(
+ cricket::TransportProxy* proxy,
+ const cricket::Candidates& candidates);
+ virtual void OnCandidatesAllocationDone();
+
+ // Check if a call to SetLocalDescription is acceptable with |action|.
+ bool ExpectSetLocalDescription(Action action);
+ // Check if a call to SetRemoteDescription is acceptable with |action|.
+ bool ExpectSetRemoteDescription(Action action);
+ // Creates local session description with audio and video contents.
+ bool CreateDefaultLocalDescription();
+ // Enables media channels to allow sending of media.
+ void EnableChannels();
+ // Creates a JsepIceCandidate and adds it to the local session description
+ // and notify observers. Called when a new local candidate have been found.
+ void ProcessNewLocalCandidate(const std::string& content_name,
+ const cricket::Candidates& candidates);
+ // Returns the media index for a local ice candidate given the content name.
+ // Returns false if the local session description does not have a media
+ // content called |content_name|.
+ bool GetLocalCandidateMediaIndex(const std::string& content_name,
+ int* sdp_mline_index);
+ // Uses all remote candidates in |remote_desc| in this session.
+ bool UseCandidatesInSessionDescription(
+ const SessionDescriptionInterface* remote_desc);
+ // Uses |candidate| in this session.
+ bool UseCandidate(const IceCandidateInterface* candidate);
+ // Deletes the corresponding channel of contents that don't exist in |desc|.
+ // |desc| can be null. This means that all channels are deleted.
+ void RemoveUnusedChannelsAndTransports(
+ const cricket::SessionDescription* desc);
+
+ // Allocates media channels based on the |desc|. If |desc| doesn't have
+ // the BUNDLE option, this method will disable BUNDLE in PortAllocator.
+ // This method will also delete any existing media channels before creating.
+ bool CreateChannels(const cricket::SessionDescription* desc);
+
+ // Helper methods to create media channels.
+ bool CreateVoiceChannel(const cricket::SessionDescription* desc);
+ bool CreateVideoChannel(const cricket::SessionDescription* desc);
+ bool CreateDataChannel(const cricket::SessionDescription* desc);
+ // Copy the candidates from |saved_candidates_| to |dest_desc|.
+ // The |saved_candidates_| will be cleared after this function call.
+ void CopySavedCandidates(SessionDescriptionInterface* dest_desc);
+
+ // Forces |desc->crypto_required| to the appropriate state based on the
+ // current security policy, to ensure a failure occurs if there is an error
+ // in crypto negotiation.
+ // Called when processing the local session description.
+ void UpdateSessionDescriptionSecurePolicy(cricket::SessionDescription* desc);
+
+ bool GetLocalTrackId(uint32 ssrc, std::string* track_id);
+ bool GetRemoteTrackId(uint32 ssrc, std::string* track_id);
+
+ std::string BadStateErrMsg(const std::string& type, State state);
+ void SetIceConnectionState(PeerConnectionInterface::IceConnectionState state);
+
+ talk_base::scoped_ptr<cricket::VoiceChannel> voice_channel_;
+ talk_base::scoped_ptr<cricket::VideoChannel> video_channel_;
+ talk_base::scoped_ptr<cricket::DataChannel> data_channel_;
+ cricket::ChannelManager* channel_manager_;
+ cricket::TransportDescriptionFactory transport_desc_factory_;
+ cricket::MediaSessionDescriptionFactory session_desc_factory_;
+ MediaStreamSignaling* mediastream_signaling_;
+ IceObserver* ice_observer_;
+ PeerConnectionInterface::IceConnectionState ice_connection_state_;
+ talk_base::scoped_ptr<SessionDescriptionInterface> local_desc_;
+ talk_base::scoped_ptr<SessionDescriptionInterface> remote_desc_;
+ // Candidates that arrived before the remote description was set.
+ std::vector<IceCandidateInterface*> saved_candidates_;
+ uint64 session_version_;
+ // If the remote peer is using a older version of implementation.
+ bool older_version_remote_peer_;
+ // Specifies which kind of data channel is allowed. This is controlled
+ // by the chrome command-line flag and constraints:
+ // 1. If chrome command-line switch 'enable-sctp-data-channels' is enabled,
+ // constraint kEnableDtlsSrtp is true, and constaint kEnableRtpDataChannels is
+ // not set or false, SCTP is allowed (DCT_SCTP);
+ // 2. If constraint kEnableRtpDataChannels is true, RTP is allowed (DCT_RTP);
+ // 3. If both 1&2 are false, data channel is not allowed (DCT_NONE).
+ cricket::DataChannelType data_channel_type_;
+ talk_base::scoped_ptr<IceRestartAnswerLatch> ice_restart_latch_;
+ sigslot::signal0<> SignalVoiceChannelDestroyed;
+ sigslot::signal0<> SignalVideoChannelDestroyed;
+ sigslot::signal0<> SignalDataChannelDestroyed;
+};
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_WEBRTCSESSION_H_
diff --git a/talk/app/webrtc/webrtcsession_unittest.cc b/talk/app/webrtc/webrtcsession_unittest.cc
new file mode 100644
index 0000000..55b2950
--- /dev/null
+++ b/talk/app/webrtc/webrtcsession_unittest.cc
@@ -0,0 +1,2473 @@
+/*
+ * libjingle
+ * Copyright 2012, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/audiotrack.h"
+#include "talk/app/webrtc/jsepicecandidate.h"
+#include "talk/app/webrtc/jsepsessiondescription.h"
+#include "talk/app/webrtc/mediastreamsignaling.h"
+#include "talk/app/webrtc/streamcollection.h"
+#include "talk/app/webrtc/videotrack.h"
+#include "talk/app/webrtc/test/fakeconstraints.h"
+#include "talk/app/webrtc/webrtcsession.h"
+#include "talk/base/fakenetwork.h"
+#include "talk/base/firewallsocketserver.h"
+#include "talk/base/gunit.h"
+#include "talk/base/logging.h"
+#include "talk/base/network.h"
+#include "talk/base/physicalsocketserver.h"
+#include "talk/base/sslstreamadapter.h"
+#include "talk/base/stringutils.h"
+#include "talk/base/thread.h"
+#include "talk/base/virtualsocketserver.h"
+#include "talk/media/base/fakemediaengine.h"
+#include "talk/media/base/fakevideorenderer.h"
+#include "talk/media/base/mediachannel.h"
+#include "talk/media/devices/fakedevicemanager.h"
+#include "talk/p2p/base/stunserver.h"
+#include "talk/p2p/base/teststunserver.h"
+#include "talk/p2p/client/basicportallocator.h"
+#include "talk/session/media/channelmanager.h"
+#include "talk/session/media/mediasession.h"
+
+#define MAYBE_SKIP_TEST(feature) \
+ if (!(feature())) { \
+ LOG(LS_INFO) << "Feature disabled... skipping"; \
+ return; \
+ }
+
+using cricket::BaseSession;
+using cricket::DF_PLAY;
+using cricket::DF_SEND;
+using cricket::FakeVoiceMediaChannel;
+using cricket::NS_GINGLE_P2P;
+using cricket::NS_JINGLE_ICE_UDP;
+using cricket::TransportInfo;
+using cricket::kDtmfDelay;
+using cricket::kDtmfReset;
+using talk_base::SocketAddress;
+using talk_base::scoped_ptr;
+using webrtc::CreateSessionDescription;
+using webrtc::FakeConstraints;
+using webrtc::IceCandidateCollection;
+using webrtc::JsepIceCandidate;
+using webrtc::JsepSessionDescription;
+using webrtc::PeerConnectionInterface;
+using webrtc::SessionDescriptionInterface;
+using webrtc::StreamCollection;
+using webrtc::kMlineMismatch;
+using webrtc::kSdpWithoutCrypto;
+using webrtc::kSessionError;
+using webrtc::kSetLocalSdpFailed;
+using webrtc::kSetRemoteSdpFailed;
+using webrtc::kPushDownAnswerTDFailed;
+using webrtc::kPushDownPranswerTDFailed;
+
+static const SocketAddress kClientAddr1("11.11.11.11", 0);
+static const SocketAddress kClientAddr2("22.22.22.22", 0);
+static const SocketAddress kStunAddr("99.99.99.1", cricket::STUN_SERVER_PORT);
+
+static const char kSessionVersion[] = "1";
+
+static const char kStream1[] = "stream1";
+static const char kVideoTrack1[] = "video1";
+static const char kAudioTrack1[] = "audio1";
+
+static const char kStream2[] = "stream2";
+static const char kVideoTrack2[] = "video2";
+static const char kAudioTrack2[] = "audio2";
+
+// Media index of candidates belonging to the first media content.
+static const int kMediaContentIndex0 = 0;
+static const char kMediaContentName0[] = "audio";
+
+// Media index of candidates belonging to the second media content.
+static const int kMediaContentIndex1 = 1;
+static const char kMediaContentName1[] = "video";
+
+static const int kIceCandidatesTimeout = 10000;
+
+static const cricket::AudioCodec
+ kTelephoneEventCodec(106, "telephone-event", 8000, 0, 1, 0);
+static const cricket::AudioCodec kCNCodec1(102, "CN", 8000, 0, 1, 0);
+static const cricket::AudioCodec kCNCodec2(103, "CN", 16000, 0, 1, 0);
+
+// Add some extra |newlines| to the |message| after |line|.
+static void InjectAfter(const std::string& line,
+ const std::string& newlines,
+ std::string* message) {
+ const std::string tmp = line + newlines;
+ talk_base::replace_substrs(line.c_str(), line.length(),
+ tmp.c_str(), tmp.length(), message);
+}
+
+class MockIceObserver : public webrtc::IceObserver {
+ public:
+ MockIceObserver()
+ : oncandidatesready_(false),
+ ice_connection_state_(PeerConnectionInterface::kIceConnectionNew),
+ ice_gathering_state_(PeerConnectionInterface::kIceGatheringNew) {
+ }
+
+ virtual void OnIceConnectionChange(
+ PeerConnectionInterface::IceConnectionState new_state) {
+ ice_connection_state_ = new_state;
+ }
+ virtual void OnIceGatheringChange(
+ PeerConnectionInterface::IceGatheringState new_state) {
+ // We can never transition back to "new".
+ EXPECT_NE(PeerConnectionInterface::kIceGatheringNew, new_state);
+ ice_gathering_state_ = new_state;
+
+ // oncandidatesready_ really means "ICE gathering is complete".
+ // This if statement ensures that this value remains correct when we
+ // transition from kIceGatheringComplete to kIceGatheringGathering.
+ if (new_state == PeerConnectionInterface::kIceGatheringGathering) {
+ oncandidatesready_ = false;
+ }
+ }
+
+ // Found a new candidate.
+ virtual void OnIceCandidate(const webrtc::IceCandidateInterface* candidate) {
+ if (candidate->sdp_mline_index() == kMediaContentIndex0) {
+ mline_0_candidates_.push_back(candidate->candidate());
+ } else if (candidate->sdp_mline_index() == kMediaContentIndex1) {
+ mline_1_candidates_.push_back(candidate->candidate());
+ }
+ // The ICE gathering state should always be Gathering when a candidate is
+ // received (or possibly Completed in the case of the final candidate).
+ EXPECT_NE(PeerConnectionInterface::kIceGatheringNew, ice_gathering_state_);
+ }
+
+ // TODO(bemasc): Remove this once callers transition to OnIceGatheringChange.
+ virtual void OnIceComplete() {
+ EXPECT_FALSE(oncandidatesready_);
+ oncandidatesready_ = true;
+
+ // OnIceGatheringChange(IceGatheringCompleted) and OnIceComplete() should
+ // be called approximately simultaneously. For ease of testing, this
+ // check additionally requires that they be called in the above order.
+ EXPECT_EQ(PeerConnectionInterface::kIceGatheringComplete,
+ ice_gathering_state_);
+ }
+
+ bool oncandidatesready_;
+ std::vector<cricket::Candidate> mline_0_candidates_;
+ std::vector<cricket::Candidate> mline_1_candidates_;
+ PeerConnectionInterface::IceConnectionState ice_connection_state_;
+ PeerConnectionInterface::IceGatheringState ice_gathering_state_;
+};
+
+class WebRtcSessionForTest : public webrtc::WebRtcSession {
+ public:
+ WebRtcSessionForTest(cricket::ChannelManager* cmgr,
+ talk_base::Thread* signaling_thread,
+ talk_base::Thread* worker_thread,
+ cricket::PortAllocator* port_allocator,
+ webrtc::IceObserver* ice_observer,
+ webrtc::MediaStreamSignaling* mediastream_signaling)
+ : WebRtcSession(cmgr, signaling_thread, worker_thread, port_allocator,
+ mediastream_signaling) {
+ RegisterIceObserver(ice_observer);
+ }
+ virtual ~WebRtcSessionForTest() {}
+
+ using cricket::BaseSession::GetTransportProxy;
+ using webrtc::WebRtcSession::SetAudioPlayout;
+ using webrtc::WebRtcSession::SetAudioSend;
+ using webrtc::WebRtcSession::SetCaptureDevice;
+ using webrtc::WebRtcSession::SetVideoPlayout;
+ using webrtc::WebRtcSession::SetVideoSend;
+};
+
+class FakeMediaStreamSignaling : public webrtc::MediaStreamSignaling,
+ public webrtc::MediaStreamSignalingObserver {
+ public:
+ FakeMediaStreamSignaling() :
+ webrtc::MediaStreamSignaling(talk_base::Thread::Current(), this) {
+ }
+
+ void SendAudioVideoStream1() {
+ ClearLocalStreams();
+ AddLocalStream(CreateStream(kStream1, kAudioTrack1, kVideoTrack1));
+ }
+
+ void SendAudioVideoStream2() {
+ ClearLocalStreams();
+ AddLocalStream(CreateStream(kStream2, kAudioTrack2, kVideoTrack2));
+ }
+
+ void SendAudioVideoStream1And2() {
+ ClearLocalStreams();
+ AddLocalStream(CreateStream(kStream1, kAudioTrack1, kVideoTrack1));
+ AddLocalStream(CreateStream(kStream2, kAudioTrack2, kVideoTrack2));
+ }
+
+ void SendNothing() {
+ ClearLocalStreams();
+ }
+
+ void UseOptionsAudioOnly() {
+ ClearLocalStreams();
+ AddLocalStream(CreateStream(kStream2, kAudioTrack2, ""));
+ }
+
+ void UseOptionsVideoOnly() {
+ ClearLocalStreams();
+ AddLocalStream(CreateStream(kStream2, "", kVideoTrack2));
+ }
+
+ void ClearLocalStreams() {
+ while (local_streams()->count() != 0) {
+ RemoveLocalStream(local_streams()->at(0));
+ }
+ }
+
+ // Implements MediaStreamSignalingObserver.
+ virtual void OnAddRemoteStream(webrtc::MediaStreamInterface* stream) {
+ }
+ virtual void OnRemoveRemoteStream(webrtc::MediaStreamInterface* stream) {
+ }
+ virtual void OnAddDataChannel(webrtc::DataChannelInterface* data_channel) {
+ }
+ virtual void OnAddLocalAudioTrack(webrtc::MediaStreamInterface* stream,
+ webrtc::AudioTrackInterface* audio_track,
+ uint32 ssrc) {
+ }
+ virtual void OnAddLocalVideoTrack(webrtc::MediaStreamInterface* stream,
+ webrtc::VideoTrackInterface* video_track,
+ uint32 ssrc) {
+ }
+ virtual void OnAddRemoteAudioTrack(webrtc::MediaStreamInterface* stream,
+ webrtc::AudioTrackInterface* audio_track,
+ uint32 ssrc) {
+ }
+
+ virtual void OnAddRemoteVideoTrack(webrtc::MediaStreamInterface* stream,
+ webrtc::VideoTrackInterface* video_track,
+ uint32 ssrc) {
+ }
+
+ virtual void OnRemoveRemoteAudioTrack(
+ webrtc::MediaStreamInterface* stream,
+ webrtc::AudioTrackInterface* audio_track) {
+ }
+
+ virtual void OnRemoveRemoteVideoTrack(
+ webrtc::MediaStreamInterface* stream,
+ webrtc::VideoTrackInterface* video_track) {
+ }
+
+ virtual void OnRemoveLocalAudioTrack(
+ webrtc::MediaStreamInterface* stream,
+ webrtc::AudioTrackInterface* audio_track) {
+ }
+ virtual void OnRemoveLocalVideoTrack(
+ webrtc::MediaStreamInterface* stream,
+ webrtc::VideoTrackInterface* video_track) {
+ }
+ virtual void OnRemoveLocalStream(webrtc::MediaStreamInterface* stream) {
+ }
+
+ private:
+ talk_base::scoped_refptr<webrtc::MediaStreamInterface> CreateStream(
+ const std::string& stream_label,
+ const std::string& audio_track_id,
+ const std::string& video_track_id) {
+ talk_base::scoped_refptr<webrtc::MediaStreamInterface> stream(
+ webrtc::MediaStream::Create(stream_label));
+
+ if (!audio_track_id.empty()) {
+ talk_base::scoped_refptr<webrtc::AudioTrackInterface> audio_track(
+ webrtc::AudioTrack::Create(audio_track_id, NULL));
+ stream->AddTrack(audio_track);
+ }
+
+ if (!video_track_id.empty()) {
+ talk_base::scoped_refptr<webrtc::VideoTrackInterface> video_track(
+ webrtc::VideoTrack::Create(video_track_id, NULL));
+ stream->AddTrack(video_track);
+ }
+ return stream;
+ }
+
+ cricket::MediaSessionOptions options_;
+};
+
+class WebRtcSessionTest : public testing::Test {
+ protected:
+ // TODO Investigate why ChannelManager crashes, if it's created
+ // after stun_server.
+ WebRtcSessionTest()
+ : media_engine_(new cricket::FakeMediaEngine()),
+ data_engine_(new cricket::FakeDataEngine()),
+ device_manager_(new cricket::FakeDeviceManager()),
+ channel_manager_(new cricket::ChannelManager(
+ media_engine_, data_engine_, device_manager_,
+ new cricket::CaptureManager(), talk_base::Thread::Current())),
+ tdesc_factory_(new cricket::TransportDescriptionFactory()),
+ desc_factory_(new cricket::MediaSessionDescriptionFactory(
+ channel_manager_.get(), tdesc_factory_.get())),
+ pss_(new talk_base::PhysicalSocketServer),
+ vss_(new talk_base::VirtualSocketServer(pss_.get())),
+ fss_(new talk_base::FirewallSocketServer(vss_.get())),
+ ss_scope_(fss_.get()),
+ stun_server_(talk_base::Thread::Current(), kStunAddr),
+ allocator_(&network_manager_, kStunAddr,
+ SocketAddress(), SocketAddress(), SocketAddress()) {
+ tdesc_factory_->set_protocol(cricket::ICEPROTO_HYBRID);
+ allocator_.set_flags(cricket::PORTALLOCATOR_DISABLE_TCP |
+ cricket::PORTALLOCATOR_DISABLE_RELAY |
+ cricket::PORTALLOCATOR_ENABLE_BUNDLE);
+ EXPECT_TRUE(channel_manager_->Init());
+ desc_factory_->set_add_legacy_streams(false);
+ }
+
+ void AddInterface(const SocketAddress& addr) {
+ network_manager_.AddInterface(addr);
+ }
+
+ void Init() {
+ ASSERT_TRUE(session_.get() == NULL);
+ session_.reset(new WebRtcSessionForTest(
+ channel_manager_.get(), talk_base::Thread::Current(),
+ talk_base::Thread::Current(), &allocator_,
+ &observer_,
+ &mediastream_signaling_));
+
+ EXPECT_EQ(PeerConnectionInterface::kIceConnectionNew,
+ observer_.ice_connection_state_);
+ EXPECT_EQ(PeerConnectionInterface::kIceGatheringNew,
+ observer_.ice_gathering_state_);
+
+ EXPECT_TRUE(session_->Initialize(constraints_.get()));
+ }
+
+ void InitWithDtmfCodec() {
+ // Add kTelephoneEventCodec for dtmf test.
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kTelephoneEventCodec);
+ media_engine_->SetAudioCodecs(codecs);
+ desc_factory_->set_audio_codecs(codecs);
+ Init();
+ }
+
+ void InitWithDtls() {
+ constraints_.reset(new FakeConstraints());
+ constraints_->AddOptional(
+ webrtc::MediaConstraintsInterface::kEnableDtlsSrtp, true);
+
+ Init();
+ }
+
+ // Creates a local offer and applies it. Starts ice.
+ // Call mediastream_signaling_.UseOptionsWithStreamX() before this function
+ // to decide which streams to create.
+ void InitiateCall() {
+ SessionDescriptionInterface* offer = session_->CreateOffer(NULL);
+ SetLocalDescriptionWithoutError(offer);
+ EXPECT_TRUE_WAIT(PeerConnectionInterface::kIceGatheringNew !=
+ observer_.ice_gathering_state_,
+ kIceCandidatesTimeout);
+ }
+
+ bool ChannelsExist() {
+ return (session_->voice_channel() != NULL &&
+ session_->video_channel() != NULL);
+ }
+
+ void CheckTransportChannels() {
+ EXPECT_TRUE(session_->GetChannel(cricket::CN_AUDIO, 1) != NULL);
+ EXPECT_TRUE(session_->GetChannel(cricket::CN_AUDIO, 2) != NULL);
+ EXPECT_TRUE(session_->GetChannel(cricket::CN_VIDEO, 1) != NULL);
+ EXPECT_TRUE(session_->GetChannel(cricket::CN_VIDEO, 2) != NULL);
+ }
+
+ void VerifyCryptoParams(const cricket::SessionDescription* sdp) {
+ ASSERT_TRUE(session_.get() != NULL);
+ const cricket::ContentInfo* content = cricket::GetFirstAudioContent(sdp);
+ ASSERT_TRUE(content != NULL);
+ const cricket::AudioContentDescription* audio_content =
+ static_cast<const cricket::AudioContentDescription*>(
+ content->description);
+ ASSERT_TRUE(audio_content != NULL);
+ ASSERT_EQ(1U, audio_content->cryptos().size());
+ ASSERT_EQ(47U, audio_content->cryptos()[0].key_params.size());
+ ASSERT_EQ("AES_CM_128_HMAC_SHA1_80",
+ audio_content->cryptos()[0].cipher_suite);
+ EXPECT_EQ(std::string(cricket::kMediaProtocolSavpf),
+ audio_content->protocol());
+
+ content = cricket::GetFirstVideoContent(sdp);
+ ASSERT_TRUE(content != NULL);
+ const cricket::VideoContentDescription* video_content =
+ static_cast<const cricket::VideoContentDescription*>(
+ content->description);
+ ASSERT_TRUE(video_content != NULL);
+ ASSERT_EQ(1U, video_content->cryptos().size());
+ ASSERT_EQ("AES_CM_128_HMAC_SHA1_80",
+ video_content->cryptos()[0].cipher_suite);
+ ASSERT_EQ(47U, video_content->cryptos()[0].key_params.size());
+ EXPECT_EQ(std::string(cricket::kMediaProtocolSavpf),
+ video_content->protocol());
+ }
+
+ void VerifyNoCryptoParams(const cricket::SessionDescription* sdp, bool dtls) {
+ const cricket::ContentInfo* content = cricket::GetFirstAudioContent(sdp);
+ ASSERT_TRUE(content != NULL);
+ const cricket::AudioContentDescription* audio_content =
+ static_cast<const cricket::AudioContentDescription*>(
+ content->description);
+ ASSERT_TRUE(audio_content != NULL);
+ ASSERT_EQ(0U, audio_content->cryptos().size());
+
+ content = cricket::GetFirstVideoContent(sdp);
+ ASSERT_TRUE(content != NULL);
+ const cricket::VideoContentDescription* video_content =
+ static_cast<const cricket::VideoContentDescription*>(
+ content->description);
+ ASSERT_TRUE(video_content != NULL);
+ ASSERT_EQ(0U, video_content->cryptos().size());
+
+ if (dtls) {
+ EXPECT_EQ(std::string(cricket::kMediaProtocolSavpf),
+ audio_content->protocol());
+ EXPECT_EQ(std::string(cricket::kMediaProtocolSavpf),
+ video_content->protocol());
+ } else {
+ EXPECT_EQ(std::string(cricket::kMediaProtocolAvpf),
+ audio_content->protocol());
+ EXPECT_EQ(std::string(cricket::kMediaProtocolAvpf),
+ video_content->protocol());
+ }
+ }
+
+ // Set the internal fake description factories to do DTLS-SRTP.
+ void SetFactoryDtlsSrtp() {
+ desc_factory_->set_secure(cricket::SEC_ENABLED);
+ std::string identity_name = "WebRTC" +
+ talk_base::ToString(talk_base::CreateRandomId());
+ tdesc_factory_->set_identity(talk_base::SSLIdentity::Generate(
+ identity_name));
+ tdesc_factory_->set_digest_algorithm(talk_base::DIGEST_SHA_256);
+ tdesc_factory_->set_secure(cricket::SEC_REQUIRED);
+ }
+
+ void VerifyFingerprintStatus(const cricket::SessionDescription* sdp,
+ bool expected) {
+ const TransportInfo* audio = sdp->GetTransportInfoByName("audio");
+ ASSERT_TRUE(audio != NULL);
+ ASSERT_EQ(expected, audio->description.identity_fingerprint.get() != NULL);
+ if (expected) {
+ ASSERT_EQ(std::string(talk_base::DIGEST_SHA_256), audio->description.
+ identity_fingerprint->algorithm);
+ }
+ const TransportInfo* video = sdp->GetTransportInfoByName("video");
+ ASSERT_TRUE(video != NULL);
+ ASSERT_EQ(expected, video->description.identity_fingerprint.get() != NULL);
+ if (expected) {
+ ASSERT_EQ(std::string(talk_base::DIGEST_SHA_256), video->description.
+ identity_fingerprint->algorithm);
+ }
+ }
+
+ void VerifyAnswerFromNonCryptoOffer() {
+ // Create a SDP without Crypto.
+ cricket::MediaSessionOptions options;
+ options.has_video = true;
+ scoped_ptr<JsepSessionDescription> offer(
+ CreateRemoteOffer(options, cricket::SEC_DISABLED));
+ ASSERT_TRUE(offer.get() != NULL);
+ VerifyNoCryptoParams(offer->description(), false);
+ SetRemoteDescriptionExpectError("Called with a SDP without crypto enabled",
+ offer.release());
+ const webrtc::SessionDescriptionInterface* answer =
+ session_->CreateAnswer(NULL);
+ // Answer should be NULL as no crypto params in offer.
+ ASSERT_TRUE(answer == NULL);
+ }
+
+ void VerifyAnswerFromCryptoOffer() {
+ cricket::MediaSessionOptions options;
+ options.has_video = true;
+ options.bundle_enabled = true;
+ scoped_ptr<JsepSessionDescription> offer(
+ CreateRemoteOffer(options, cricket::SEC_REQUIRED));
+ ASSERT_TRUE(offer.get() != NULL);
+ VerifyCryptoParams(offer->description());
+ SetRemoteDescriptionWithoutError(offer.release());
+ scoped_ptr<SessionDescriptionInterface> answer(
+ session_->CreateAnswer(NULL));
+ ASSERT_TRUE(answer.get() != NULL);
+ VerifyCryptoParams(answer->description());
+ }
+
+ void CompareIceUfragAndPassword(const cricket::SessionDescription* desc1,
+ const cricket::SessionDescription* desc2,
+ bool expect_equal) {
+ if (desc1->contents().size() != desc2->contents().size()) {
+ EXPECT_FALSE(expect_equal);
+ return;
+ }
+
+ const cricket::ContentInfos& contents = desc1->contents();
+ cricket::ContentInfos::const_iterator it = contents.begin();
+
+ for (; it != contents.end(); ++it) {
+ const cricket::TransportDescription* transport_desc1 =
+ desc1->GetTransportDescriptionByName(it->name);
+ const cricket::TransportDescription* transport_desc2 =
+ desc2->GetTransportDescriptionByName(it->name);
+ if (!transport_desc1 || !transport_desc2) {
+ EXPECT_FALSE(expect_equal);
+ return;
+ }
+ if (transport_desc1->ice_pwd != transport_desc2->ice_pwd ||
+ transport_desc1->ice_ufrag != transport_desc2->ice_ufrag) {
+ EXPECT_FALSE(expect_equal);
+ return;
+ }
+ }
+ EXPECT_TRUE(expect_equal);
+ }
+ // Creates a remote offer and and applies it as a remote description,
+ // creates a local answer and applies is as a local description.
+ // Call mediastream_signaling_.UseOptionsWithStreamX() before this function
+ // to decide which local and remote streams to create.
+ void CreateAndSetRemoteOfferAndLocalAnswer() {
+ SessionDescriptionInterface* offer = CreateRemoteOffer();
+ SetRemoteDescriptionWithoutError(offer);
+ SessionDescriptionInterface* answer = session_->CreateAnswer(NULL);
+ SetLocalDescriptionWithoutError(answer);
+ }
+ void SetLocalDescriptionWithoutError(SessionDescriptionInterface* desc) {
+ EXPECT_TRUE(session_->SetLocalDescription(desc, NULL));
+ }
+ void SetLocalDescriptionExpectState(SessionDescriptionInterface* desc,
+ BaseSession::State expected_state) {
+ SetLocalDescriptionWithoutError(desc);
+ EXPECT_EQ(expected_state, session_->state());
+ }
+ void SetLocalDescriptionExpectError(const std::string& expected_error,
+ SessionDescriptionInterface* desc) {
+ std::string error;
+ EXPECT_FALSE(session_->SetLocalDescription(desc, &error));
+ EXPECT_NE(std::string::npos, error.find(kSetLocalSdpFailed));
+ EXPECT_NE(std::string::npos, error.find(expected_error));
+ }
+ void SetRemoteDescriptionWithoutError(SessionDescriptionInterface* desc) {
+ EXPECT_TRUE(session_->SetRemoteDescription(desc, NULL));
+ }
+ void SetRemoteDescriptionExpectState(SessionDescriptionInterface* desc,
+ BaseSession::State expected_state) {
+ SetRemoteDescriptionWithoutError(desc);
+ EXPECT_EQ(expected_state, session_->state());
+ }
+ void SetRemoteDescriptionExpectError(const std::string& expected_error,
+ SessionDescriptionInterface* desc) {
+ std::string error;
+ EXPECT_FALSE(session_->SetRemoteDescription(desc, &error));
+ EXPECT_NE(std::string::npos, error.find(kSetRemoteSdpFailed));
+ EXPECT_NE(std::string::npos, error.find(expected_error));
+ }
+
+ void CreateCryptoOfferAndNonCryptoAnswer(SessionDescriptionInterface** offer,
+ SessionDescriptionInterface** nocrypto_answer) {
+ // Create a SDP without Crypto.
+ cricket::MediaSessionOptions options;
+ options.has_video = true;
+ options.bundle_enabled = true;
+ *offer = CreateRemoteOffer(options, cricket::SEC_ENABLED);
+ ASSERT_TRUE(*offer != NULL);
+ VerifyCryptoParams((*offer)->description());
+
+ *nocrypto_answer = CreateRemoteAnswer(*offer, options,
+ cricket::SEC_DISABLED);
+ EXPECT_TRUE(*nocrypto_answer != NULL);
+ }
+
+ JsepSessionDescription* CreateRemoteOfferWithVersion(
+ cricket::MediaSessionOptions options,
+ cricket::SecurePolicy secure_policy,
+ const std::string& session_version,
+ const SessionDescriptionInterface* current_desc) {
+ std::string session_id = talk_base::ToString(talk_base::CreateRandomId64());
+ const cricket::SessionDescription* cricket_desc = NULL;
+ if (current_desc) {
+ cricket_desc = current_desc->description();
+ session_id = current_desc->session_id();
+ }
+
+ desc_factory_->set_secure(secure_policy);
+ JsepSessionDescription* offer(
+ new JsepSessionDescription(JsepSessionDescription::kOffer));
+ if (!offer->Initialize(desc_factory_->CreateOffer(options, cricket_desc),
+ session_id, session_version)) {
+ delete offer;
+ offer = NULL;
+ }
+ return offer;
+ }
+ JsepSessionDescription* CreateRemoteOffer(
+ cricket::MediaSessionOptions options) {
+ return CreateRemoteOfferWithVersion(options, cricket::SEC_ENABLED,
+ kSessionVersion, NULL);
+ }
+ JsepSessionDescription* CreateRemoteOffer(
+ cricket::MediaSessionOptions options, cricket::SecurePolicy policy) {
+ return CreateRemoteOfferWithVersion(options, policy, kSessionVersion, NULL);
+ }
+ JsepSessionDescription* CreateRemoteOffer(
+ cricket::MediaSessionOptions options,
+ const SessionDescriptionInterface* current_desc) {
+ return CreateRemoteOfferWithVersion(options, cricket::SEC_ENABLED,
+ kSessionVersion, current_desc);
+ }
+
+ // Create a remote offer. Call mediastream_signaling_.UseOptionsWithStreamX()
+ // before this function to decide which streams to create.
+ JsepSessionDescription* CreateRemoteOffer() {
+ cricket::MediaSessionOptions options;
+ mediastream_signaling_.GetOptionsForAnswer(NULL, &options);
+ return CreateRemoteOffer(options, session_->remote_description());
+ }
+
+ JsepSessionDescription* CreateRemoteAnswer(
+ const SessionDescriptionInterface* offer,
+ cricket::MediaSessionOptions options,
+ cricket::SecurePolicy policy) {
+ desc_factory_->set_secure(policy);
+ const std::string session_id =
+ talk_base::ToString(talk_base::CreateRandomId64());
+ JsepSessionDescription* answer(
+ new JsepSessionDescription(JsepSessionDescription::kAnswer));
+ if (!answer->Initialize(desc_factory_->CreateAnswer(offer->description(),
+ options, NULL),
+ session_id, kSessionVersion)) {
+ delete answer;
+ answer = NULL;
+ }
+ return answer;
+ }
+
+ JsepSessionDescription* CreateRemoteAnswer(
+ const SessionDescriptionInterface* offer,
+ cricket::MediaSessionOptions options) {
+ return CreateRemoteAnswer(offer, options, cricket::SEC_REQUIRED);
+ }
+
+ // Creates an answer session description with streams based on
+ // |mediastream_signaling_|. Call
+ // mediastream_signaling_.UseOptionsWithStreamX() before this function
+ // to decide which streams to create.
+ JsepSessionDescription* CreateRemoteAnswer(
+ const SessionDescriptionInterface* offer) {
+ cricket::MediaSessionOptions options;
+ mediastream_signaling_.GetOptionsForAnswer(NULL, &options);
+ return CreateRemoteAnswer(offer, options, cricket::SEC_REQUIRED);
+ }
+
+ void TestSessionCandidatesWithBundleRtcpMux(bool bundle, bool rtcp_mux) {
+ AddInterface(kClientAddr1);
+ Init();
+ mediastream_signaling_.SendAudioVideoStream1();
+ FakeConstraints constraints;
+ constraints.SetMandatoryUseRtpMux(bundle);
+ SessionDescriptionInterface* offer = session_->CreateOffer(&constraints);
+ // SetLocalDescription and SetRemoteDescriptions takes ownership of offer
+ // and answer.
+ SetLocalDescriptionWithoutError(offer);
+
+ SessionDescriptionInterface* answer = CreateRemoteAnswer(
+ session_->local_description());
+ std::string sdp;
+ EXPECT_TRUE(answer->ToString(&sdp));
+
+ size_t expected_candidate_num = 2;
+ if (!rtcp_mux) {
+ // If rtcp_mux is enabled we should expect 4 candidates - host and srflex
+ // for rtp and rtcp.
+ expected_candidate_num = 4;
+ // Disable rtcp-mux from the answer
+
+ const std::string kRtcpMux = "a=rtcp-mux";
+ const std::string kXRtcpMux = "a=xrtcp-mux";
+ talk_base::replace_substrs(kRtcpMux.c_str(), kRtcpMux.length(),
+ kXRtcpMux.c_str(), kXRtcpMux.length(),
+ &sdp);
+ }
+
+ SessionDescriptionInterface* new_answer = CreateSessionDescription(
+ JsepSessionDescription::kAnswer, sdp, NULL);
+ delete answer;
+ answer = new_answer;
+
+ // SetRemoteDescription to enable rtcp mux.
+ SetRemoteDescriptionWithoutError(answer);
+ EXPECT_TRUE_WAIT(observer_.oncandidatesready_, kIceCandidatesTimeout);
+ EXPECT_EQ(expected_candidate_num, observer_.mline_0_candidates_.size());
+ EXPECT_EQ(expected_candidate_num, observer_.mline_1_candidates_.size());
+ for (size_t i = 0; i < observer_.mline_0_candidates_.size(); ++i) {
+ cricket::Candidate c0 = observer_.mline_0_candidates_[i];
+ cricket::Candidate c1 = observer_.mline_1_candidates_[i];
+ if (bundle) {
+ EXPECT_TRUE(c0.IsEquivalent(c1));
+ } else {
+ EXPECT_FALSE(c0.IsEquivalent(c1));
+ }
+ }
+ }
+ // Tests that we can only send DTMF when the dtmf codec is supported.
+ void TestCanInsertDtmf(bool can) {
+ if (can) {
+ InitWithDtmfCodec();
+ } else {
+ Init();
+ }
+ mediastream_signaling_.SendAudioVideoStream1();
+ CreateAndSetRemoteOfferAndLocalAnswer();
+ EXPECT_FALSE(session_->CanInsertDtmf(""));
+ EXPECT_EQ(can, session_->CanInsertDtmf(kAudioTrack1));
+ }
+
+ // The method sets up a call from the session to itself, in a loopback
+ // arrangement. It also uses a firewall rule to create a temporary
+ // disconnection. This code is placed as a method so that it can be invoked
+ // by multiple tests with different allocators (e.g. with and without BUNDLE).
+ // While running the call, this method also checks if the session goes through
+ // the correct sequence of ICE states when a connection is established,
+ // broken, and re-established.
+ // The Connection state should go:
+ // New -> Checking -> Connected -> Disconnected -> Connected.
+ // The Gathering state should go: New -> Gathering -> Completed.
+ void TestLoopbackCall() {
+ AddInterface(kClientAddr1);
+ Init();
+ mediastream_signaling_.SendAudioVideoStream1();
+ SessionDescriptionInterface* offer = session_->CreateOffer(NULL);
+
+ EXPECT_EQ(PeerConnectionInterface::kIceGatheringNew,
+ observer_.ice_gathering_state_);
+ SetLocalDescriptionWithoutError(offer);
+ EXPECT_EQ(PeerConnectionInterface::kIceConnectionNew,
+ observer_.ice_connection_state_);
+ EXPECT_EQ_WAIT(PeerConnectionInterface::kIceGatheringGathering,
+ observer_.ice_gathering_state_,
+ kIceCandidatesTimeout);
+ EXPECT_TRUE_WAIT(observer_.oncandidatesready_, kIceCandidatesTimeout);
+ EXPECT_EQ_WAIT(PeerConnectionInterface::kIceGatheringComplete,
+ observer_.ice_gathering_state_,
+ kIceCandidatesTimeout);
+
+ std::string sdp;
+ offer->ToString(&sdp);
+ SessionDescriptionInterface* desc =
+ webrtc::CreateSessionDescription(JsepSessionDescription::kAnswer, sdp);
+ ASSERT_TRUE(desc != NULL);
+ SetRemoteDescriptionWithoutError(desc);
+
+ EXPECT_EQ_WAIT(PeerConnectionInterface::kIceConnectionChecking,
+ observer_.ice_connection_state_,
+ kIceCandidatesTimeout);
+ EXPECT_EQ_WAIT(PeerConnectionInterface::kIceConnectionConnected,
+ observer_.ice_connection_state_,
+ kIceCandidatesTimeout);
+ // TODO(bemasc): EXPECT(Completed) once the details are standardized.
+
+ // Adding firewall rule to block ping requests, which should cause
+ // transport channel failure.
+ fss_->AddRule(false, talk_base::FP_ANY, talk_base::FD_ANY, kClientAddr1);
+ EXPECT_EQ_WAIT(PeerConnectionInterface::kIceConnectionDisconnected,
+ observer_.ice_connection_state_,
+ kIceCandidatesTimeout);
+
+ // Clearing the rules, session should move back to completed state.
+ fss_->ClearRules();
+ // Session is automatically calling OnSignalingReady after creation of
+ // new portallocator session which will allocate new set of candidates.
+
+ // TODO(bemasc): Change this to Completed once the details are standardized.
+ EXPECT_EQ_WAIT(PeerConnectionInterface::kIceConnectionConnected,
+ observer_.ice_connection_state_,
+ kIceCandidatesTimeout);
+ }
+
+ void VerifyTransportType(const std::string& content_name,
+ cricket::TransportProtocol protocol) {
+ const cricket::Transport* transport = session_->GetTransport(content_name);
+ ASSERT_TRUE(transport != NULL);
+ EXPECT_EQ(protocol, transport->protocol());
+ }
+
+ // Adds CN codecs to FakeMediaEngine and MediaDescriptionFactory.
+ void AddCNCodecs() {
+ // Add kTelephoneEventCodec for dtmf test.
+ std::vector<cricket::AudioCodec> codecs = media_engine_->audio_codecs();;
+ codecs.push_back(kCNCodec1);
+ codecs.push_back(kCNCodec2);
+ media_engine_->SetAudioCodecs(codecs);
+ desc_factory_->set_audio_codecs(codecs);
+ }
+
+ bool VerifyNoCNCodecs(const cricket::ContentInfo* content) {
+ const cricket::ContentDescription* description = content->description;
+ ASSERT(description != NULL);
+ const cricket::AudioContentDescription* audio_content_desc =
+ static_cast<const cricket::AudioContentDescription*> (description);
+ ASSERT(audio_content_desc != NULL);
+ for (size_t i = 0; i < audio_content_desc->codecs().size(); ++i) {
+ if (audio_content_desc->codecs()[i].name == "CN")
+ return false;
+ }
+ return true;
+ }
+
+ void SetLocalDescriptionWithDataChannel() {
+ webrtc::DataChannelInit dci;
+ dci.reliable = false;
+ session_->CreateDataChannel("datachannel", &dci);
+ SessionDescriptionInterface* offer = session_->CreateOffer(NULL);
+ SetLocalDescriptionWithoutError(offer);
+ }
+
+ cricket::FakeMediaEngine* media_engine_;
+ cricket::FakeDataEngine* data_engine_;
+ cricket::FakeDeviceManager* device_manager_;
+ talk_base::scoped_ptr<cricket::ChannelManager> channel_manager_;
+ talk_base::scoped_ptr<cricket::TransportDescriptionFactory> tdesc_factory_;
+ talk_base::scoped_ptr<cricket::MediaSessionDescriptionFactory> desc_factory_;
+ talk_base::scoped_ptr<talk_base::PhysicalSocketServer> pss_;
+ talk_base::scoped_ptr<talk_base::VirtualSocketServer> vss_;
+ talk_base::scoped_ptr<talk_base::FirewallSocketServer> fss_;
+ talk_base::SocketServerScope ss_scope_;
+ cricket::TestStunServer stun_server_;
+ talk_base::FakeNetworkManager network_manager_;
+ cricket::BasicPortAllocator allocator_;
+ talk_base::scoped_ptr<FakeConstraints> constraints_;
+ FakeMediaStreamSignaling mediastream_signaling_;
+ talk_base::scoped_ptr<WebRtcSessionForTest> session_;
+ MockIceObserver observer_;
+ cricket::FakeVideoMediaChannel* video_channel_;
+ cricket::FakeVoiceMediaChannel* voice_channel_;
+};
+
+TEST_F(WebRtcSessionTest, TestInitialize) {
+ Init();
+}
+
+TEST_F(WebRtcSessionTest, TestInitializeWithDtls) {
+ InitWithDtls();
+}
+
+TEST_F(WebRtcSessionTest, TestSessionCandidates) {
+ TestSessionCandidatesWithBundleRtcpMux(false, false);
+}
+
+// Below test cases (TestSessionCandidatesWith*) verify the candidates gathered
+// with rtcp-mux and/or bundle.
+TEST_F(WebRtcSessionTest, TestSessionCandidatesWithRtcpMux) {
+ TestSessionCandidatesWithBundleRtcpMux(false, true);
+}
+
+TEST_F(WebRtcSessionTest, TestSessionCandidatesWithBundle) {
+ TestSessionCandidatesWithBundleRtcpMux(true, false);
+}
+
+TEST_F(WebRtcSessionTest, TestSessionCandidatesWithBundleRtcpMux) {
+ TestSessionCandidatesWithBundleRtcpMux(true, true);
+}
+
+TEST_F(WebRtcSessionTest, TestMultihomeCandidates) {
+ AddInterface(kClientAddr1);
+ AddInterface(kClientAddr2);
+ Init();
+ mediastream_signaling_.SendAudioVideoStream1();
+ InitiateCall();
+ EXPECT_TRUE_WAIT(observer_.oncandidatesready_, kIceCandidatesTimeout);
+ EXPECT_EQ(8u, observer_.mline_0_candidates_.size());
+ EXPECT_EQ(8u, observer_.mline_1_candidates_.size());
+}
+
+TEST_F(WebRtcSessionTest, TestStunError) {
+ AddInterface(kClientAddr1);
+ AddInterface(kClientAddr2);
+ fss_->AddRule(false, talk_base::FP_UDP, talk_base::FD_ANY, kClientAddr1);
+ Init();
+ mediastream_signaling_.SendAudioVideoStream1();
+ InitiateCall();
+ // Since kClientAddr1 is blocked, not expecting stun candidates for it.
+ EXPECT_TRUE_WAIT(observer_.oncandidatesready_, kIceCandidatesTimeout);
+ EXPECT_EQ(6u, observer_.mline_0_candidates_.size());
+ EXPECT_EQ(6u, observer_.mline_1_candidates_.size());
+}
+
+// Test creating offers and receive answers and make sure the
+// media engine creates the expected send and receive streams.
+TEST_F(WebRtcSessionTest, TestCreateOfferReceiveAnswer) {
+ Init();
+ mediastream_signaling_.SendAudioVideoStream1();
+ SessionDescriptionInterface* offer = session_->CreateOffer(NULL);
+ const std::string session_id_orig = offer->session_id();
+ const std::string session_version_orig = offer->session_version();
+ SetLocalDescriptionWithoutError(offer);
+
+ mediastream_signaling_.SendAudioVideoStream2();
+ SessionDescriptionInterface* answer =
+ CreateRemoteAnswer(session_->local_description());
+ SetRemoteDescriptionWithoutError(answer);
+
+ video_channel_ = media_engine_->GetVideoChannel(0);
+ voice_channel_ = media_engine_->GetVoiceChannel(0);
+
+ ASSERT_EQ(1u, video_channel_->recv_streams().size());
+ EXPECT_TRUE(kVideoTrack2 == video_channel_->recv_streams()[0].id);
+
+ ASSERT_EQ(1u, voice_channel_->recv_streams().size());
+ EXPECT_TRUE(kAudioTrack2 == voice_channel_->recv_streams()[0].id);
+
+ ASSERT_EQ(1u, video_channel_->send_streams().size());
+ EXPECT_TRUE(kVideoTrack1 == video_channel_->send_streams()[0].id);
+ ASSERT_EQ(1u, voice_channel_->send_streams().size());
+ EXPECT_TRUE(kAudioTrack1 == voice_channel_->send_streams()[0].id);
+
+ // Create new offer without send streams.
+ mediastream_signaling_.SendNothing();
+ offer = session_->CreateOffer(NULL);
+
+ // Verify the session id is the same and the session version is
+ // increased.
+ EXPECT_EQ(session_id_orig, offer->session_id());
+ EXPECT_LT(talk_base::FromString<uint64>(session_version_orig),
+ talk_base::FromString<uint64>(offer->session_version()));
+
+ SetLocalDescriptionWithoutError(offer);
+
+ mediastream_signaling_.SendAudioVideoStream2();
+ answer = CreateRemoteAnswer(session_->local_description());
+ SetRemoteDescriptionWithoutError(answer);
+
+ EXPECT_EQ(0u, video_channel_->send_streams().size());
+ EXPECT_EQ(0u, voice_channel_->send_streams().size());
+
+ // Make sure the receive streams have not changed.
+ ASSERT_EQ(1u, video_channel_->recv_streams().size());
+ EXPECT_TRUE(kVideoTrack2 == video_channel_->recv_streams()[0].id);
+ ASSERT_EQ(1u, voice_channel_->recv_streams().size());
+ EXPECT_TRUE(kAudioTrack2 == voice_channel_->recv_streams()[0].id);
+}
+
+// Test receiving offers and creating answers and make sure the
+// media engine creates the expected send and receive streams.
+TEST_F(WebRtcSessionTest, TestReceiveOfferCreateAnswer) {
+ Init();
+ mediastream_signaling_.SendAudioVideoStream2();
+ SessionDescriptionInterface* offer = session_->CreateOffer(NULL);
+ SetRemoteDescriptionWithoutError(offer);
+
+ mediastream_signaling_.SendAudioVideoStream1();
+ SessionDescriptionInterface* answer = session_->CreateAnswer(NULL);
+ SetLocalDescriptionWithoutError(answer);
+
+ const std::string session_id_orig = answer->session_id();
+ const std::string session_version_orig = answer->session_version();
+
+ video_channel_ = media_engine_->GetVideoChannel(0);
+ voice_channel_ = media_engine_->GetVoiceChannel(0);
+
+ ASSERT_EQ(1u, video_channel_->recv_streams().size());
+ EXPECT_TRUE(kVideoTrack2 == video_channel_->recv_streams()[0].id);
+
+ ASSERT_EQ(1u, voice_channel_->recv_streams().size());
+ EXPECT_TRUE(kAudioTrack2 == voice_channel_->recv_streams()[0].id);
+
+ ASSERT_EQ(1u, video_channel_->send_streams().size());
+ EXPECT_TRUE(kVideoTrack1 == video_channel_->send_streams()[0].id);
+ ASSERT_EQ(1u, voice_channel_->send_streams().size());
+ EXPECT_TRUE(kAudioTrack1 == voice_channel_->send_streams()[0].id);
+
+ mediastream_signaling_.SendAudioVideoStream1And2();
+ offer = session_->CreateOffer(NULL);
+ SetRemoteDescriptionWithoutError(offer);
+
+ // Answer by turning off all send streams.
+ mediastream_signaling_.SendNothing();
+ answer = session_->CreateAnswer(NULL);
+
+ // Verify the session id is the same and the session version is
+ // increased.
+ EXPECT_EQ(session_id_orig, answer->session_id());
+ EXPECT_LT(talk_base::FromString<uint64>(session_version_orig),
+ talk_base::FromString<uint64>(answer->session_version()));
+ SetLocalDescriptionWithoutError(answer);
+
+ ASSERT_EQ(2u, video_channel_->recv_streams().size());
+ EXPECT_TRUE(kVideoTrack1 == video_channel_->recv_streams()[0].id);
+ EXPECT_TRUE(kVideoTrack2 == video_channel_->recv_streams()[1].id);
+ ASSERT_EQ(2u, voice_channel_->recv_streams().size());
+ EXPECT_TRUE(kAudioTrack1 == voice_channel_->recv_streams()[0].id);
+ EXPECT_TRUE(kAudioTrack2 == voice_channel_->recv_streams()[1].id);
+
+ // Make sure we have no send streams.
+ EXPECT_EQ(0u, video_channel_->send_streams().size());
+ EXPECT_EQ(0u, voice_channel_->send_streams().size());
+}
+
+// Test we will return fail when apply an offer that doesn't have
+// crypto enabled.
+TEST_F(WebRtcSessionTest, SetNonCryptoOffer) {
+ Init();
+ cricket::MediaSessionOptions options;
+ options.has_video = true;
+ JsepSessionDescription* offer = CreateRemoteOffer(
+ options, cricket::SEC_DISABLED);
+ ASSERT_TRUE(offer != NULL);
+ VerifyNoCryptoParams(offer->description(), false);
+ // SetRemoteDescription and SetLocalDescription will take the ownership of
+ // the offer.
+ SetRemoteDescriptionExpectError(kSdpWithoutCrypto, offer);
+ offer = CreateRemoteOffer(options, cricket::SEC_DISABLED);
+ ASSERT_TRUE(offer != NULL);
+ SetLocalDescriptionExpectError(kSdpWithoutCrypto, offer);
+}
+
+// Test we will return fail when apply an answer that doesn't have
+// crypto enabled.
+TEST_F(WebRtcSessionTest, SetLocalNonCryptoAnswer) {
+ Init();
+ SessionDescriptionInterface* offer = NULL;
+ SessionDescriptionInterface* answer = NULL;
+ CreateCryptoOfferAndNonCryptoAnswer(&offer, &answer);
+ // SetRemoteDescription and SetLocalDescription will take the ownership of
+ // the offer.
+ SetRemoteDescriptionWithoutError(offer);
+ SetLocalDescriptionExpectError(kSdpWithoutCrypto, answer);
+}
+
+// Test we will return fail when apply an answer that doesn't have
+// crypto enabled.
+TEST_F(WebRtcSessionTest, SetRemoteNonCryptoAnswer) {
+ Init();
+ SessionDescriptionInterface* offer = NULL;
+ SessionDescriptionInterface* answer = NULL;
+ CreateCryptoOfferAndNonCryptoAnswer(&offer, &answer);
+ // SetRemoteDescription and SetLocalDescription will take the ownership of
+ // the offer.
+ SetLocalDescriptionWithoutError(offer);
+ SetRemoteDescriptionExpectError(kSdpWithoutCrypto, answer);
+}
+
+// Test that we can create and set an offer with a DTLS fingerprint.
+TEST_F(WebRtcSessionTest, CreateSetDtlsOffer) {
+ MAYBE_SKIP_TEST(talk_base::SSLStreamAdapter::HaveDtlsSrtp);
+ InitWithDtls();
+ mediastream_signaling_.SendAudioVideoStream1();
+ SessionDescriptionInterface* offer = session_->CreateOffer(NULL);
+ ASSERT_TRUE(offer != NULL);
+ VerifyFingerprintStatus(offer->description(), true);
+ // SetLocalDescription will take the ownership of the offer.
+ SetLocalDescriptionWithoutError(offer);
+}
+
+// Test that we can process an offer with a DTLS fingerprint
+// and that we return an answer with a fingerprint.
+TEST_F(WebRtcSessionTest, ReceiveDtlsOfferCreateAnswer) {
+ MAYBE_SKIP_TEST(talk_base::SSLStreamAdapter::HaveDtlsSrtp);
+ InitWithDtls();
+ SetFactoryDtlsSrtp();
+ cricket::MediaSessionOptions options;
+ options.has_video = true;
+ JsepSessionDescription* offer = CreateRemoteOffer(options);
+ ASSERT_TRUE(offer != NULL);
+ VerifyFingerprintStatus(offer->description(), true);
+
+ // SetRemoteDescription will take the ownership of the offer.
+ SetRemoteDescriptionWithoutError(offer);
+
+ // Verify that we get a crypto fingerprint in the answer.
+ SessionDescriptionInterface* answer = session_->CreateAnswer(NULL);
+ ASSERT_TRUE(answer != NULL);
+ VerifyFingerprintStatus(answer->description(), true);
+ // Check that we don't have an a=crypto line in the answer.
+ VerifyNoCryptoParams(answer->description(), true);
+
+ // Now set the local description, which should work, even without a=crypto.
+ SetLocalDescriptionWithoutError(answer);
+}
+
+// Test that even if we support DTLS, if the other side didn't offer a
+// fingerprint, we don't either.
+TEST_F(WebRtcSessionTest, ReceiveNoDtlsOfferCreateAnswer) {
+ MAYBE_SKIP_TEST(talk_base::SSLStreamAdapter::HaveDtlsSrtp);
+ InitWithDtls();
+ cricket::MediaSessionOptions options;
+ options.has_video = true;
+ JsepSessionDescription* offer = CreateRemoteOffer(
+ options, cricket::SEC_REQUIRED);
+ ASSERT_TRUE(offer != NULL);
+ VerifyFingerprintStatus(offer->description(), false);
+
+ // SetRemoteDescription will take the ownership of
+ // the offer.
+ SetRemoteDescriptionWithoutError(offer);
+
+ // Verify that we don't get a crypto fingerprint in the answer.
+ SessionDescriptionInterface* answer = session_->CreateAnswer(NULL);
+ ASSERT_TRUE(answer != NULL);
+ VerifyFingerprintStatus(answer->description(), false);
+
+ // Now set the local description.
+ SetLocalDescriptionWithoutError(answer);
+}
+
+TEST_F(WebRtcSessionTest, TestSetLocalOfferTwice) {
+ Init();
+ mediastream_signaling_.SendNothing();
+ // SetLocalDescription take ownership of offer.
+ SessionDescriptionInterface* offer = session_->CreateOffer(NULL);
+ SetLocalDescriptionWithoutError(offer);
+
+ // SetLocalDescription take ownership of offer.
+ SessionDescriptionInterface* offer2 = session_->CreateOffer(NULL);
+ SetLocalDescriptionWithoutError(offer2);
+}
+
+TEST_F(WebRtcSessionTest, TestSetRemoteOfferTwice) {
+ Init();
+ mediastream_signaling_.SendNothing();
+ // SetLocalDescription take ownership of offer.
+ SessionDescriptionInterface* offer = session_->CreateOffer(NULL);
+ SetRemoteDescriptionWithoutError(offer);
+
+ SessionDescriptionInterface* offer2 = session_->CreateOffer(NULL);
+ SetRemoteDescriptionWithoutError(offer2);
+}
+
+TEST_F(WebRtcSessionTest, TestSetLocalAndRemoteOffer) {
+ Init();
+ mediastream_signaling_.SendNothing();
+ SessionDescriptionInterface* offer = session_->CreateOffer(NULL);
+ SetLocalDescriptionWithoutError(offer);
+ offer = session_->CreateOffer(NULL);
+ SetRemoteDescriptionExpectError(
+ "Called with type in wrong state, type: offer state: STATE_SENTINITIATE",
+ offer);
+}
+
+TEST_F(WebRtcSessionTest, TestSetRemoteAndLocalOffer) {
+ Init();
+ mediastream_signaling_.SendNothing();
+ SessionDescriptionInterface* offer = session_->CreateOffer(NULL);
+ SetRemoteDescriptionWithoutError(offer);
+ offer = session_->CreateOffer(NULL);
+ SetLocalDescriptionExpectError(
+ "Called with type in wrong state, type: "
+ "offer state: STATE_RECEIVEDINITIATE",
+ offer);
+}
+
+TEST_F(WebRtcSessionTest, TestSetLocalPrAnswer) {
+ Init();
+ mediastream_signaling_.SendNothing();
+ SessionDescriptionInterface* offer = CreateRemoteOffer();
+ SetRemoteDescriptionExpectState(offer, BaseSession::STATE_RECEIVEDINITIATE);
+
+ JsepSessionDescription* pranswer = static_cast<JsepSessionDescription*>(
+ session_->CreateAnswer(NULL));
+ pranswer->set_type(SessionDescriptionInterface::kPrAnswer);
+ SetLocalDescriptionExpectState(pranswer, BaseSession::STATE_SENTPRACCEPT);
+
+ mediastream_signaling_.SendAudioVideoStream1();
+ JsepSessionDescription* pranswer2 = static_cast<JsepSessionDescription*>(
+ session_->CreateAnswer(NULL));
+ pranswer2->set_type(SessionDescriptionInterface::kPrAnswer);
+
+ SetLocalDescriptionExpectState(pranswer2, BaseSession::STATE_SENTPRACCEPT);
+
+ mediastream_signaling_.SendAudioVideoStream2();
+ SessionDescriptionInterface* answer = session_->CreateAnswer(NULL);
+ SetLocalDescriptionExpectState(answer, BaseSession::STATE_SENTACCEPT);
+}
+
+TEST_F(WebRtcSessionTest, TestSetRemotePrAnswer) {
+ Init();
+ mediastream_signaling_.SendNothing();
+ SessionDescriptionInterface* offer = session_->CreateOffer(NULL);
+ SetLocalDescriptionExpectState(offer, BaseSession::STATE_SENTINITIATE);
+
+ JsepSessionDescription* pranswer =
+ CreateRemoteAnswer(session_->local_description());
+ pranswer->set_type(SessionDescriptionInterface::kPrAnswer);
+
+ SetRemoteDescriptionExpectState(pranswer,
+ BaseSession::STATE_RECEIVEDPRACCEPT);
+
+ mediastream_signaling_.SendAudioVideoStream1();
+ JsepSessionDescription* pranswer2 =
+ CreateRemoteAnswer(session_->local_description());
+ pranswer2->set_type(SessionDescriptionInterface::kPrAnswer);
+
+ SetRemoteDescriptionExpectState(pranswer2,
+ BaseSession::STATE_RECEIVEDPRACCEPT);
+
+ mediastream_signaling_.SendAudioVideoStream2();
+ SessionDescriptionInterface* answer =
+ CreateRemoteAnswer(session_->local_description());
+ SetRemoteDescriptionExpectState(answer, BaseSession::STATE_RECEIVEDACCEPT);
+}
+
+TEST_F(WebRtcSessionTest, TestSetLocalAnswerWithoutOffer) {
+ Init();
+ mediastream_signaling_.SendNothing();
+ talk_base::scoped_ptr<SessionDescriptionInterface> offer(
+ session_->CreateOffer(NULL));
+ SessionDescriptionInterface* answer =
+ CreateRemoteAnswer(offer.get());
+ SetLocalDescriptionExpectError(
+ "Called with type in wrong state, type: answer state: STATE_INIT",
+ answer);
+}
+
+TEST_F(WebRtcSessionTest, TestSetRemoteAnswerWithoutOffer) {
+ Init();
+ mediastream_signaling_.SendNothing();
+ talk_base::scoped_ptr<SessionDescriptionInterface> offer(
+ session_->CreateOffer(NULL));
+ SessionDescriptionInterface* answer =
+ CreateRemoteAnswer(offer.get());
+ SetRemoteDescriptionExpectError(
+ "Called with type in wrong state, type: answer state: STATE_INIT",
+ answer);
+}
+
+TEST_F(WebRtcSessionTest, TestAddRemoteCandidate) {
+ Init();
+ mediastream_signaling_.SendAudioVideoStream1();
+
+ cricket::Candidate candidate;
+ candidate.set_component(1);
+ JsepIceCandidate ice_candidate1(kMediaContentName0, 0, candidate);
+
+ // Fail since we have not set a offer description.
+ EXPECT_FALSE(session_->ProcessIceMessage(&ice_candidate1));
+
+ SessionDescriptionInterface* offer = session_->CreateOffer(NULL);
+ SetLocalDescriptionWithoutError(offer);
+ // Candidate should be allowed to add before remote description.
+ EXPECT_TRUE(session_->ProcessIceMessage(&ice_candidate1));
+ candidate.set_component(2);
+ JsepIceCandidate ice_candidate2(kMediaContentName0, 0, candidate);
+ EXPECT_TRUE(session_->ProcessIceMessage(&ice_candidate2));
+
+ SessionDescriptionInterface* answer = CreateRemoteAnswer(
+ session_->local_description());
+ SetRemoteDescriptionWithoutError(answer);
+
+ // Verifying the candidates are copied properly from internal vector.
+ const SessionDescriptionInterface* remote_desc =
+ session_->remote_description();
+ ASSERT_TRUE(remote_desc != NULL);
+ ASSERT_EQ(2u, remote_desc->number_of_mediasections());
+ const IceCandidateCollection* candidates =
+ remote_desc->candidates(kMediaContentIndex0);
+ ASSERT_EQ(2u, candidates->count());
+ EXPECT_EQ(kMediaContentIndex0, candidates->at(0)->sdp_mline_index());
+ EXPECT_EQ(kMediaContentName0, candidates->at(0)->sdp_mid());
+ EXPECT_EQ(1, candidates->at(0)->candidate().component());
+ EXPECT_EQ(2, candidates->at(1)->candidate().component());
+
+ candidate.set_component(2);
+ JsepIceCandidate ice_candidate3(kMediaContentName0, 0, candidate);
+ EXPECT_TRUE(session_->ProcessIceMessage(&ice_candidate3));
+ ASSERT_EQ(3u, candidates->count());
+
+ JsepIceCandidate bad_ice_candidate("bad content name", 99, candidate);
+ EXPECT_FALSE(session_->ProcessIceMessage(&bad_ice_candidate));
+}
+
+// Test that a remote candidate is added to the remote session description and
+// that it is retained if the remote session description is changed.
+TEST_F(WebRtcSessionTest, TestRemoteCandidatesAddedToSessionDescription) {
+ Init();
+ cricket::Candidate candidate1;
+ candidate1.set_component(1);
+ JsepIceCandidate ice_candidate1(kMediaContentName0, kMediaContentIndex0,
+ candidate1);
+ mediastream_signaling_.SendAudioVideoStream1();
+ CreateAndSetRemoteOfferAndLocalAnswer();
+
+ EXPECT_TRUE(session_->ProcessIceMessage(&ice_candidate1));
+ const SessionDescriptionInterface* remote_desc =
+ session_->remote_description();
+ ASSERT_TRUE(remote_desc != NULL);
+ ASSERT_EQ(2u, remote_desc->number_of_mediasections());
+ const IceCandidateCollection* candidates =
+ remote_desc->candidates(kMediaContentIndex0);
+ ASSERT_EQ(1u, candidates->count());
+ EXPECT_EQ(kMediaContentIndex0, candidates->at(0)->sdp_mline_index());
+
+ // Update the RemoteSessionDescription with a new session description and
+ // a candidate and check that the new remote session description contains both
+ // candidates.
+ SessionDescriptionInterface* offer = CreateRemoteOffer();
+ cricket::Candidate candidate2;
+ JsepIceCandidate ice_candidate2(kMediaContentName0, kMediaContentIndex0,
+ candidate2);
+ EXPECT_TRUE(offer->AddCandidate(&ice_candidate2));
+ SetRemoteDescriptionWithoutError(offer);
+
+ remote_desc = session_->remote_description();
+ ASSERT_TRUE(remote_desc != NULL);
+ ASSERT_EQ(2u, remote_desc->number_of_mediasections());
+ candidates = remote_desc->candidates(kMediaContentIndex0);
+ ASSERT_EQ(2u, candidates->count());
+ EXPECT_EQ(kMediaContentIndex0, candidates->at(0)->sdp_mline_index());
+ // Username and password have be updated with the TransportInfo of the
+ // SessionDescription, won't be equal to the original one.
+ candidate2.set_username(candidates->at(0)->candidate().username());
+ candidate2.set_password(candidates->at(0)->candidate().password());
+ EXPECT_TRUE(candidate2.IsEquivalent(candidates->at(0)->candidate()));
+ EXPECT_EQ(kMediaContentIndex0, candidates->at(1)->sdp_mline_index());
+ // No need to verify the username and password.
+ candidate1.set_username(candidates->at(1)->candidate().username());
+ candidate1.set_password(candidates->at(1)->candidate().password());
+ EXPECT_TRUE(candidate1.IsEquivalent(candidates->at(1)->candidate()));
+
+ // Test that the candidate is ignored if we can add the same candidate again.
+ EXPECT_TRUE(session_->ProcessIceMessage(&ice_candidate2));
+}
+
+// Test that local candidates are added to the local session description and
+// that they are retained if the local session description is changed.
+TEST_F(WebRtcSessionTest, TestLocalCandidatesAddedToSessionDescription) {
+ AddInterface(kClientAddr1);
+ Init();
+ mediastream_signaling_.SendAudioVideoStream1();
+ CreateAndSetRemoteOfferAndLocalAnswer();
+
+ const SessionDescriptionInterface* local_desc = session_->local_description();
+ const IceCandidateCollection* candidates =
+ local_desc->candidates(kMediaContentIndex0);
+ ASSERT_TRUE(candidates != NULL);
+ EXPECT_EQ(0u, candidates->count());
+
+ EXPECT_TRUE_WAIT(observer_.oncandidatesready_, kIceCandidatesTimeout);
+
+ local_desc = session_->local_description();
+ candidates = local_desc->candidates(kMediaContentIndex0);
+ ASSERT_TRUE(candidates != NULL);
+ EXPECT_LT(0u, candidates->count());
+ candidates = local_desc->candidates(1);
+ ASSERT_TRUE(candidates != NULL);
+ EXPECT_LT(0u, candidates->count());
+
+ // Update the session descriptions.
+ mediastream_signaling_.SendAudioVideoStream1();
+ CreateAndSetRemoteOfferAndLocalAnswer();
+
+ local_desc = session_->local_description();
+ candidates = local_desc->candidates(kMediaContentIndex0);
+ ASSERT_TRUE(candidates != NULL);
+ EXPECT_LT(0u, candidates->count());
+ candidates = local_desc->candidates(1);
+ ASSERT_TRUE(candidates != NULL);
+ EXPECT_LT(0u, candidates->count());
+}
+
+// Test that we can set a remote session description with remote candidates.
+TEST_F(WebRtcSessionTest, TestSetRemoteSessionDescriptionWithCandidates) {
+ Init();
+
+ cricket::Candidate candidate1;
+ candidate1.set_component(1);
+ JsepIceCandidate ice_candidate(kMediaContentName0, kMediaContentIndex0,
+ candidate1);
+ mediastream_signaling_.SendAudioVideoStream1();
+ SessionDescriptionInterface* offer = session_->CreateOffer(NULL);
+
+ EXPECT_TRUE(offer->AddCandidate(&ice_candidate));
+ SetRemoteDescriptionWithoutError(offer);
+
+ const SessionDescriptionInterface* remote_desc =
+ session_->remote_description();
+ ASSERT_TRUE(remote_desc != NULL);
+ ASSERT_EQ(2u, remote_desc->number_of_mediasections());
+ const IceCandidateCollection* candidates =
+ remote_desc->candidates(kMediaContentIndex0);
+ ASSERT_EQ(1u, candidates->count());
+ EXPECT_EQ(kMediaContentIndex0, candidates->at(0)->sdp_mline_index());
+
+ SessionDescriptionInterface* answer = session_->CreateAnswer(NULL);
+ SetLocalDescriptionWithoutError(answer);
+}
+
+// Test that offers and answers contains ice candidates when Ice candidates have
+// been gathered.
+TEST_F(WebRtcSessionTest, TestSetLocalAndRemoteDescriptionWithCandidates) {
+ AddInterface(kClientAddr1);
+ Init();
+ mediastream_signaling_.SendAudioVideoStream1();
+ // Ice is started but candidates are not provided until SetLocalDescription
+ // is called.
+ EXPECT_EQ(0u, observer_.mline_0_candidates_.size());
+ EXPECT_EQ(0u, observer_.mline_1_candidates_.size());
+ CreateAndSetRemoteOfferAndLocalAnswer();
+ // Wait until at least one local candidate has been collected.
+ EXPECT_TRUE_WAIT(0u < observer_.mline_0_candidates_.size(),
+ kIceCandidatesTimeout);
+ EXPECT_TRUE_WAIT(0u < observer_.mline_1_candidates_.size(),
+ kIceCandidatesTimeout);
+
+ talk_base::scoped_ptr<SessionDescriptionInterface> local_offer(
+ session_->CreateOffer(NULL));
+ ASSERT_TRUE(local_offer->candidates(kMediaContentIndex0) != NULL);
+ EXPECT_LT(0u, local_offer->candidates(kMediaContentIndex0)->count());
+ ASSERT_TRUE(local_offer->candidates(kMediaContentIndex1) != NULL);
+ EXPECT_LT(0u, local_offer->candidates(kMediaContentIndex1)->count());
+
+ SessionDescriptionInterface* remote_offer(CreateRemoteOffer());
+ SetRemoteDescriptionWithoutError(remote_offer);
+ SessionDescriptionInterface* answer = session_->CreateAnswer(NULL);
+ ASSERT_TRUE(answer->candidates(kMediaContentIndex0) != NULL);
+ EXPECT_LT(0u, answer->candidates(kMediaContentIndex0)->count());
+ ASSERT_TRUE(answer->candidates(kMediaContentIndex1) != NULL);
+ EXPECT_LT(0u, answer->candidates(kMediaContentIndex1)->count());
+ SetLocalDescriptionWithoutError(answer);
+}
+
+// Verifies TransportProxy and media channels are created with content names
+// present in the SessionDescription.
+TEST_F(WebRtcSessionTest, TestChannelCreationsWithContentNames) {
+ Init();
+ mediastream_signaling_.SendAudioVideoStream1();
+ talk_base::scoped_ptr<SessionDescriptionInterface> offer(
+ session_->CreateOffer(NULL));
+
+ // CreateOffer creates session description with the content names "audio" and
+ // "video". Goal is to modify these content names and verify transport channel
+ // proxy in the BaseSession, as proxies are created with the content names
+ // present in SDP.
+ std::string sdp;
+ EXPECT_TRUE(offer->ToString(&sdp));
+ const std::string kAudioMid = "a=mid:audio";
+ const std::string kAudioMidReplaceStr = "a=mid:audio_content_name";
+ const std::string kVideoMid = "a=mid:video";
+ const std::string kVideoMidReplaceStr = "a=mid:video_content_name";
+
+ // Replacing |audio| with |audio_content_name|.
+ talk_base::replace_substrs(kAudioMid.c_str(), kAudioMid.length(),
+ kAudioMidReplaceStr.c_str(),
+ kAudioMidReplaceStr.length(),
+ &sdp);
+ // Replacing |video| with |video_content_name|.
+ talk_base::replace_substrs(kVideoMid.c_str(), kVideoMid.length(),
+ kVideoMidReplaceStr.c_str(),
+ kVideoMidReplaceStr.length(),
+ &sdp);
+
+ SessionDescriptionInterface* modified_offer =
+ CreateSessionDescription(JsepSessionDescription::kOffer, sdp, NULL);
+
+ SetRemoteDescriptionWithoutError(modified_offer);
+
+ SessionDescriptionInterface* answer =
+ session_->CreateAnswer(NULL);
+ SetLocalDescriptionWithoutError(answer);
+
+ EXPECT_TRUE(session_->GetTransportProxy("audio_content_name") != NULL);
+ EXPECT_TRUE(session_->GetTransportProxy("video_content_name") != NULL);
+ EXPECT_TRUE((video_channel_ = media_engine_->GetVideoChannel(0)) != NULL);
+ EXPECT_TRUE((voice_channel_ = media_engine_->GetVoiceChannel(0)) != NULL);
+}
+
+// Test that an offer contains the correct media content descriptions based on
+// the send streams when no constraints have been set.
+TEST_F(WebRtcSessionTest, CreateOfferWithoutConstraintsOrStreams) {
+ Init();
+ talk_base::scoped_ptr<SessionDescriptionInterface> offer(
+ session_->CreateOffer(NULL));
+ ASSERT_TRUE(offer != NULL);
+ const cricket::ContentInfo* content =
+ cricket::GetFirstAudioContent(offer->description());
+ EXPECT_TRUE(content != NULL);
+ content = cricket::GetFirstVideoContent(offer->description());
+ EXPECT_TRUE(content == NULL);
+}
+
+// Test that an offer contains the correct media content descriptions based on
+// the send streams when no constraints have been set.
+TEST_F(WebRtcSessionTest, CreateOfferWithoutConstraints) {
+ Init();
+ // Test Audio only offer.
+ mediastream_signaling_.UseOptionsAudioOnly();
+ talk_base::scoped_ptr<SessionDescriptionInterface> offer(
+ session_->CreateOffer(NULL));
+ const cricket::ContentInfo* content =
+ cricket::GetFirstAudioContent(offer->description());
+ EXPECT_TRUE(content != NULL);
+ content = cricket::GetFirstVideoContent(offer->description());
+ EXPECT_TRUE(content == NULL);
+
+ // Test Audio / Video offer.
+ mediastream_signaling_.SendAudioVideoStream1();
+ offer.reset(session_->CreateOffer(NULL));
+ content = cricket::GetFirstAudioContent(offer->description());
+ EXPECT_TRUE(content != NULL);
+ content = cricket::GetFirstVideoContent(offer->description());
+ EXPECT_TRUE(content != NULL);
+}
+
+// Test that an offer contains no media content descriptions if
+// kOfferToReceiveVideo and kOfferToReceiveAudio constraints are set to false.
+TEST_F(WebRtcSessionTest, CreateOfferWithConstraintsWithoutStreams) {
+ Init();
+ webrtc::FakeConstraints constraints_no_receive;
+ constraints_no_receive.SetMandatoryReceiveAudio(false);
+ constraints_no_receive.SetMandatoryReceiveVideo(false);
+
+ talk_base::scoped_ptr<SessionDescriptionInterface> offer(
+ session_->CreateOffer(&constraints_no_receive));
+ ASSERT_TRUE(offer != NULL);
+ const cricket::ContentInfo* content =
+ cricket::GetFirstAudioContent(offer->description());
+ EXPECT_TRUE(content == NULL);
+ content = cricket::GetFirstVideoContent(offer->description());
+ EXPECT_TRUE(content == NULL);
+}
+
+// Test that an offer contains only audio media content descriptions if
+// kOfferToReceiveAudio constraints are set to true.
+TEST_F(WebRtcSessionTest, CreateAudioOnlyOfferWithConstraints) {
+ Init();
+ webrtc::FakeConstraints constraints_audio_only;
+ constraints_audio_only.SetMandatoryReceiveAudio(true);
+ talk_base::scoped_ptr<SessionDescriptionInterface> offer(
+ session_->CreateOffer(&constraints_audio_only));
+
+ const cricket::ContentInfo* content =
+ cricket::GetFirstAudioContent(offer->description());
+ EXPECT_TRUE(content != NULL);
+ content = cricket::GetFirstVideoContent(offer->description());
+ EXPECT_TRUE(content == NULL);
+}
+
+// Test that an offer contains audio and video media content descriptions if
+// kOfferToReceiveAudio and kOfferToReceiveVideo constraints are set to true.
+TEST_F(WebRtcSessionTest, CreateOfferWithConstraints) {
+ Init();
+ // Test Audio / Video offer.
+ webrtc::FakeConstraints constraints_audio_video;
+ constraints_audio_video.SetMandatoryReceiveAudio(true);
+ constraints_audio_video.SetMandatoryReceiveVideo(true);
+ talk_base::scoped_ptr<SessionDescriptionInterface> offer(
+ session_->CreateOffer(&constraints_audio_video));
+ const cricket::ContentInfo* content =
+ cricket::GetFirstAudioContent(offer->description());
+
+ EXPECT_TRUE(content != NULL);
+ content = cricket::GetFirstVideoContent(offer->description());
+ EXPECT_TRUE(content != NULL);
+
+ // TODO(perkj): Should the direction be set to SEND_ONLY if
+ // The constraints is set to not receive audio or video but a track is added?
+}
+
+// Test that an answer can not be created if the last remote description is not
+// an offer.
+TEST_F(WebRtcSessionTest, CreateAnswerWithoutAnOffer) {
+ Init();
+ SessionDescriptionInterface* offer = session_->CreateOffer(NULL);
+ SetLocalDescriptionWithoutError(offer);
+ SessionDescriptionInterface* answer = CreateRemoteAnswer(offer);
+ SetRemoteDescriptionWithoutError(answer);
+ EXPECT_TRUE(session_->CreateAnswer(NULL) == NULL);
+}
+
+// Test that an answer contains the correct media content descriptions when no
+// constraints have been set.
+TEST_F(WebRtcSessionTest, CreateAnswerWithoutConstraintsOrStreams) {
+ Init();
+ // Create a remote offer with audio and video content.
+ talk_base::scoped_ptr<JsepSessionDescription> offer(CreateRemoteOffer());
+ SetRemoteDescriptionWithoutError(offer.release());
+ talk_base::scoped_ptr<SessionDescriptionInterface> answer(
+ session_->CreateAnswer(NULL));
+ const cricket::ContentInfo* content =
+ cricket::GetFirstAudioContent(answer->description());
+ ASSERT_TRUE(content != NULL);
+ EXPECT_FALSE(content->rejected);
+
+ content = cricket::GetFirstVideoContent(answer->description());
+ ASSERT_TRUE(content != NULL);
+ EXPECT_FALSE(content->rejected);
+}
+
+// Test that an answer contains the correct media content descriptions when no
+// constraints have been set and the offer only contain audio.
+TEST_F(WebRtcSessionTest, CreateAudioAnswerWithoutConstraintsOrStreams) {
+ Init();
+ // Create a remote offer with audio only.
+ cricket::MediaSessionOptions options;
+ options.has_audio = true;
+ options.has_video = false;
+ talk_base::scoped_ptr<JsepSessionDescription> offer(
+ CreateRemoteOffer(options));
+ ASSERT_TRUE(cricket::GetFirstVideoContent(offer->description()) == NULL);
+ ASSERT_TRUE(cricket::GetFirstAudioContent(offer->description()) != NULL);
+
+ SetRemoteDescriptionWithoutError(offer.release());
+ talk_base::scoped_ptr<SessionDescriptionInterface> answer(
+ session_->CreateAnswer(NULL));
+ const cricket::ContentInfo* content =
+ cricket::GetFirstAudioContent(answer->description());
+ ASSERT_TRUE(content != NULL);
+ EXPECT_FALSE(content->rejected);
+
+ EXPECT_TRUE(cricket::GetFirstVideoContent(answer->description()) == NULL);
+}
+
+// Test that an answer contains the correct media content descriptions when no
+// constraints have been set.
+TEST_F(WebRtcSessionTest, CreateAnswerWithoutConstraints) {
+ Init();
+ // Create a remote offer with audio and video content.
+ talk_base::scoped_ptr<JsepSessionDescription> offer(CreateRemoteOffer());
+ SetRemoteDescriptionWithoutError(offer.release());
+ // Test with a stream with tracks.
+ mediastream_signaling_.SendAudioVideoStream1();
+ talk_base::scoped_ptr<SessionDescriptionInterface> answer(
+ session_->CreateAnswer(NULL));
+ const cricket::ContentInfo* content =
+ cricket::GetFirstAudioContent(answer->description());
+ ASSERT_TRUE(content != NULL);
+ EXPECT_FALSE(content->rejected);
+
+ content = cricket::GetFirstVideoContent(answer->description());
+ ASSERT_TRUE(content != NULL);
+ EXPECT_FALSE(content->rejected);
+}
+
+// Test that an answer contains the correct media content descriptions when
+// constraints have been set but no stream is sent.
+TEST_F(WebRtcSessionTest, CreateAnswerWithConstraintsWithoutStreams) {
+ Init();
+ // Create a remote offer with audio and video content.
+ talk_base::scoped_ptr<JsepSessionDescription> offer(CreateRemoteOffer());
+ SetRemoteDescriptionWithoutError(offer.release());
+
+ webrtc::FakeConstraints constraints_no_receive;
+ constraints_no_receive.SetMandatoryReceiveAudio(false);
+ constraints_no_receive.SetMandatoryReceiveVideo(false);
+
+ talk_base::scoped_ptr<SessionDescriptionInterface> answer(
+ session_->CreateAnswer(&constraints_no_receive));
+ const cricket::ContentInfo* content =
+ cricket::GetFirstAudioContent(answer->description());
+ ASSERT_TRUE(content != NULL);
+ EXPECT_TRUE(content->rejected);
+
+ content = cricket::GetFirstVideoContent(answer->description());
+ ASSERT_TRUE(content != NULL);
+ EXPECT_TRUE(content->rejected);
+}
+
+// Test that an answer contains the correct media content descriptions when
+// constraints have been set and streams are sent.
+TEST_F(WebRtcSessionTest, CreateAnswerWithConstraints) {
+ Init();
+ // Create a remote offer with audio and video content.
+ talk_base::scoped_ptr<JsepSessionDescription> offer(CreateRemoteOffer());
+ SetRemoteDescriptionWithoutError(offer.release());
+
+ webrtc::FakeConstraints constraints_no_receive;
+ constraints_no_receive.SetMandatoryReceiveAudio(false);
+ constraints_no_receive.SetMandatoryReceiveVideo(false);
+
+ // Test with a stream with tracks.
+ mediastream_signaling_.SendAudioVideoStream1();
+ talk_base::scoped_ptr<SessionDescriptionInterface> answer(
+ session_->CreateAnswer(&constraints_no_receive));
+
+ // TODO(perkj): Should the direction be set to SEND_ONLY?
+ const cricket::ContentInfo* content =
+ cricket::GetFirstAudioContent(answer->description());
+ ASSERT_TRUE(content != NULL);
+ EXPECT_FALSE(content->rejected);
+
+ // TODO(perkj): Should the direction be set to SEND_ONLY?
+ content = cricket::GetFirstVideoContent(answer->description());
+ ASSERT_TRUE(content != NULL);
+ EXPECT_FALSE(content->rejected);
+}
+
+TEST_F(WebRtcSessionTest, CreateOfferWithoutCNCodecs) {
+ AddCNCodecs();
+ Init();
+ webrtc::FakeConstraints constraints;
+ constraints.SetOptionalVAD(false);
+ talk_base::scoped_ptr<SessionDescriptionInterface> offer(
+ session_->CreateOffer(&constraints));
+ const cricket::ContentInfo* content =
+ cricket::GetFirstAudioContent(offer->description());
+ EXPECT_TRUE(content != NULL);
+ EXPECT_TRUE(VerifyNoCNCodecs(content));
+}
+
+TEST_F(WebRtcSessionTest, CreateAnswerWithoutCNCodecs) {
+ AddCNCodecs();
+ Init();
+ // Create a remote offer with audio and video content.
+ talk_base::scoped_ptr<JsepSessionDescription> offer(CreateRemoteOffer());
+ SetRemoteDescriptionWithoutError(offer.release());
+
+ webrtc::FakeConstraints constraints;
+ constraints.SetOptionalVAD(false);
+ talk_base::scoped_ptr<SessionDescriptionInterface> answer(
+ session_->CreateAnswer(&constraints));
+ const cricket::ContentInfo* content =
+ cricket::GetFirstAudioContent(answer->description());
+ ASSERT_TRUE(content != NULL);
+ EXPECT_TRUE(VerifyNoCNCodecs(content));
+}
+
+// This test verifies the call setup when remote answer with audio only and
+// later updates with video.
+TEST_F(WebRtcSessionTest, TestAVOfferWithAudioOnlyAnswer) {
+ Init();
+ EXPECT_TRUE(media_engine_->GetVideoChannel(0) == NULL);
+ EXPECT_TRUE(media_engine_->GetVoiceChannel(0) == NULL);
+
+ mediastream_signaling_.SendAudioVideoStream1();
+ SessionDescriptionInterface* offer = session_->CreateOffer(NULL);
+
+ cricket::MediaSessionOptions options;
+ options.has_video = false;
+ SessionDescriptionInterface* answer = CreateRemoteAnswer(offer, options);
+
+ // SetLocalDescription and SetRemoteDescriptions takes ownership of offer
+ // and answer;
+ SetLocalDescriptionWithoutError(offer);
+ SetRemoteDescriptionWithoutError(answer);
+
+ video_channel_ = media_engine_->GetVideoChannel(0);
+ voice_channel_ = media_engine_->GetVoiceChannel(0);
+
+ ASSERT_TRUE(video_channel_ == NULL);
+
+ ASSERT_EQ(0u, voice_channel_->recv_streams().size());
+ ASSERT_EQ(1u, voice_channel_->send_streams().size());
+ EXPECT_EQ(kAudioTrack1, voice_channel_->send_streams()[0].id);
+
+ // Let the remote end update the session descriptions, with Audio and Video.
+ mediastream_signaling_.SendAudioVideoStream2();
+ CreateAndSetRemoteOfferAndLocalAnswer();
+
+ video_channel_ = media_engine_->GetVideoChannel(0);
+ voice_channel_ = media_engine_->GetVoiceChannel(0);
+
+ ASSERT_TRUE(video_channel_ != NULL);
+ ASSERT_TRUE(voice_channel_ != NULL);
+
+ ASSERT_EQ(1u, video_channel_->recv_streams().size());
+ ASSERT_EQ(1u, video_channel_->send_streams().size());
+ EXPECT_EQ(kVideoTrack2, video_channel_->recv_streams()[0].id);
+ EXPECT_EQ(kVideoTrack2, video_channel_->send_streams()[0].id);
+ ASSERT_EQ(1u, voice_channel_->recv_streams().size());
+ ASSERT_EQ(1u, voice_channel_->send_streams().size());
+ EXPECT_EQ(kAudioTrack2, voice_channel_->recv_streams()[0].id);
+ EXPECT_EQ(kAudioTrack2, voice_channel_->send_streams()[0].id);
+
+ // Change session back to audio only.
+ mediastream_signaling_.UseOptionsAudioOnly();
+ CreateAndSetRemoteOfferAndLocalAnswer();
+
+ EXPECT_EQ(0u, video_channel_->recv_streams().size());
+ ASSERT_EQ(1u, voice_channel_->recv_streams().size());
+ EXPECT_EQ(kAudioTrack2, voice_channel_->recv_streams()[0].id);
+ ASSERT_EQ(1u, voice_channel_->send_streams().size());
+ EXPECT_EQ(kAudioTrack2, voice_channel_->send_streams()[0].id);
+}
+
+// This test verifies the call setup when remote answer with video only and
+// later updates with audio.
+TEST_F(WebRtcSessionTest, TestAVOfferWithVideoOnlyAnswer) {
+ Init();
+ EXPECT_TRUE(media_engine_->GetVideoChannel(0) == NULL);
+ EXPECT_TRUE(media_engine_->GetVoiceChannel(0) == NULL);
+ mediastream_signaling_.SendAudioVideoStream1();
+ SessionDescriptionInterface* offer = session_->CreateOffer(NULL);
+
+ cricket::MediaSessionOptions options;
+ options.has_audio = false;
+ options.has_video = true;
+ SessionDescriptionInterface* answer = CreateRemoteAnswer(
+ offer, options, cricket::SEC_ENABLED);
+
+ // SetLocalDescription and SetRemoteDescriptions takes ownership of offer
+ // and answer.
+ SetLocalDescriptionWithoutError(offer);
+ SetRemoteDescriptionWithoutError(answer);
+
+ video_channel_ = media_engine_->GetVideoChannel(0);
+ voice_channel_ = media_engine_->GetVoiceChannel(0);
+
+ ASSERT_TRUE(voice_channel_ == NULL);
+ ASSERT_TRUE(video_channel_ != NULL);
+
+ EXPECT_EQ(0u, video_channel_->recv_streams().size());
+ ASSERT_EQ(1u, video_channel_->send_streams().size());
+ EXPECT_EQ(kVideoTrack1, video_channel_->send_streams()[0].id);
+
+ // Update the session descriptions, with Audio and Video.
+ mediastream_signaling_.SendAudioVideoStream2();
+ CreateAndSetRemoteOfferAndLocalAnswer();
+
+ voice_channel_ = media_engine_->GetVoiceChannel(0);
+ ASSERT_TRUE(voice_channel_ != NULL);
+
+ ASSERT_EQ(1u, voice_channel_->recv_streams().size());
+ ASSERT_EQ(1u, voice_channel_->send_streams().size());
+ EXPECT_EQ(kAudioTrack2, voice_channel_->recv_streams()[0].id);
+ EXPECT_EQ(kAudioTrack2, voice_channel_->send_streams()[0].id);
+
+ // Change session back to video only.
+ mediastream_signaling_.UseOptionsVideoOnly();
+ CreateAndSetRemoteOfferAndLocalAnswer();
+
+ video_channel_ = media_engine_->GetVideoChannel(0);
+ voice_channel_ = media_engine_->GetVoiceChannel(0);
+
+ ASSERT_EQ(1u, video_channel_->recv_streams().size());
+ EXPECT_EQ(kVideoTrack2, video_channel_->recv_streams()[0].id);
+ ASSERT_EQ(1u, video_channel_->send_streams().size());
+ EXPECT_EQ(kVideoTrack2, video_channel_->send_streams()[0].id);
+}
+
+TEST_F(WebRtcSessionTest, TestDefaultSetSecurePolicy) {
+ Init();
+ EXPECT_EQ(cricket::SEC_REQUIRED, session_->secure_policy());
+}
+
+TEST_F(WebRtcSessionTest, VerifyCryptoParamsInSDP) {
+ Init();
+ mediastream_signaling_.SendAudioVideoStream1();
+ scoped_ptr<SessionDescriptionInterface> offer(
+ session_->CreateOffer(NULL));
+ VerifyCryptoParams(offer->description());
+ SetRemoteDescriptionWithoutError(offer.release());
+ const webrtc::SessionDescriptionInterface* answer =
+ session_->CreateAnswer(NULL);
+ VerifyCryptoParams(answer->description());
+}
+
+TEST_F(WebRtcSessionTest, VerifyNoCryptoParamsInSDP) {
+ Init();
+ session_->set_secure_policy(cricket::SEC_DISABLED);
+ mediastream_signaling_.SendAudioVideoStream1();
+ scoped_ptr<SessionDescriptionInterface> offer(
+ session_->CreateOffer(NULL));
+ VerifyNoCryptoParams(offer->description(), false);
+}
+
+TEST_F(WebRtcSessionTest, VerifyAnswerFromNonCryptoOffer) {
+ Init();
+ VerifyAnswerFromNonCryptoOffer();
+}
+
+TEST_F(WebRtcSessionTest, VerifyAnswerFromCryptoOffer) {
+ Init();
+ VerifyAnswerFromCryptoOffer();
+}
+
+TEST_F(WebRtcSessionTest, VerifyBundleFlagInPA) {
+ // This test verifies BUNDLE flag in PortAllocator, if BUNDLE information in
+ // local description is removed by the application, BUNDLE flag should be
+ // disabled in PortAllocator. By default BUNDLE is enabled in the WebRtc.
+ Init();
+ EXPECT_TRUE((cricket::PORTALLOCATOR_ENABLE_BUNDLE & allocator_.flags()) ==
+ cricket::PORTALLOCATOR_ENABLE_BUNDLE);
+ talk_base::scoped_ptr<SessionDescriptionInterface> offer(
+ session_->CreateOffer(NULL));
+ cricket::SessionDescription* offer_copy =
+ offer->description()->Copy();
+ offer_copy->RemoveGroupByName(cricket::GROUP_TYPE_BUNDLE);
+ JsepSessionDescription* modified_offer =
+ new JsepSessionDescription(JsepSessionDescription::kOffer);
+ modified_offer->Initialize(offer_copy, "1", "1");
+
+ SetLocalDescriptionWithoutError(modified_offer);
+ EXPECT_FALSE(allocator_.flags() & cricket::PORTALLOCATOR_ENABLE_BUNDLE);
+}
+
+TEST_F(WebRtcSessionTest, TestDisabledBundleInAnswer) {
+ Init();
+ mediastream_signaling_.SendAudioVideoStream1();
+ EXPECT_TRUE((cricket::PORTALLOCATOR_ENABLE_BUNDLE & allocator_.flags()) ==
+ cricket::PORTALLOCATOR_ENABLE_BUNDLE);
+ FakeConstraints constraints;
+ constraints.SetMandatoryUseRtpMux(true);
+ SessionDescriptionInterface* offer = session_->CreateOffer(&constraints);
+ SetLocalDescriptionWithoutError(offer);
+ mediastream_signaling_.SendAudioVideoStream2();
+ talk_base::scoped_ptr<SessionDescriptionInterface> answer(
+ CreateRemoteAnswer(session_->local_description()));
+ cricket::SessionDescription* answer_copy = answer->description()->Copy();
+ answer_copy->RemoveGroupByName(cricket::GROUP_TYPE_BUNDLE);
+ JsepSessionDescription* modified_answer =
+ new JsepSessionDescription(JsepSessionDescription::kAnswer);
+ modified_answer->Initialize(answer_copy, "1", "1");
+ SetRemoteDescriptionWithoutError(modified_answer);
+ EXPECT_TRUE((cricket::PORTALLOCATOR_ENABLE_BUNDLE & allocator_.flags()) ==
+ cricket::PORTALLOCATOR_ENABLE_BUNDLE);
+
+ video_channel_ = media_engine_->GetVideoChannel(0);
+ voice_channel_ = media_engine_->GetVoiceChannel(0);
+
+ ASSERT_EQ(1u, video_channel_->recv_streams().size());
+ EXPECT_TRUE(kVideoTrack2 == video_channel_->recv_streams()[0].id);
+
+ ASSERT_EQ(1u, voice_channel_->recv_streams().size());
+ EXPECT_TRUE(kAudioTrack2 == voice_channel_->recv_streams()[0].id);
+
+ ASSERT_EQ(1u, video_channel_->send_streams().size());
+ EXPECT_TRUE(kVideoTrack1 == video_channel_->send_streams()[0].id);
+ ASSERT_EQ(1u, voice_channel_->send_streams().size());
+ EXPECT_TRUE(kAudioTrack1 == voice_channel_->send_streams()[0].id);
+}
+
+TEST_F(WebRtcSessionTest, SetAudioPlayout) {
+ Init();
+ mediastream_signaling_.SendAudioVideoStream1();
+ CreateAndSetRemoteOfferAndLocalAnswer();
+ cricket::FakeVoiceMediaChannel* channel = media_engine_->GetVoiceChannel(0);
+ ASSERT_TRUE(channel != NULL);
+ ASSERT_EQ(1u, channel->recv_streams().size());
+ uint32 receive_ssrc = channel->recv_streams()[0].first_ssrc();
+ double left_vol, right_vol;
+ EXPECT_TRUE(channel->GetOutputScaling(receive_ssrc, &left_vol, &right_vol));
+ EXPECT_EQ(1, left_vol);
+ EXPECT_EQ(1, right_vol);
+ session_->SetAudioPlayout(receive_ssrc, false);
+ EXPECT_TRUE(channel->GetOutputScaling(receive_ssrc, &left_vol, &right_vol));
+ EXPECT_EQ(0, left_vol);
+ EXPECT_EQ(0, right_vol);
+ session_->SetAudioPlayout(receive_ssrc, true);
+ EXPECT_TRUE(channel->GetOutputScaling(receive_ssrc, &left_vol, &right_vol));
+ EXPECT_EQ(1, left_vol);
+ EXPECT_EQ(1, right_vol);
+}
+
+TEST_F(WebRtcSessionTest, SetAudioSend) {
+ Init();
+ mediastream_signaling_.SendAudioVideoStream1();
+ CreateAndSetRemoteOfferAndLocalAnswer();
+ cricket::FakeVoiceMediaChannel* channel = media_engine_->GetVoiceChannel(0);
+ ASSERT_TRUE(channel != NULL);
+ ASSERT_EQ(1u, channel->send_streams().size());
+ uint32 send_ssrc = channel->send_streams()[0].first_ssrc();
+ EXPECT_FALSE(channel->IsStreamMuted(send_ssrc));
+
+ cricket::AudioOptions options;
+ options.echo_cancellation.Set(true);
+
+ session_->SetAudioSend(send_ssrc, false, options);
+ EXPECT_TRUE(channel->IsStreamMuted(send_ssrc));
+ EXPECT_FALSE(channel->options().echo_cancellation.IsSet());
+
+ session_->SetAudioSend(send_ssrc, true, options);
+ EXPECT_FALSE(channel->IsStreamMuted(send_ssrc));
+ bool value;
+ EXPECT_TRUE(channel->options().echo_cancellation.Get(&value));
+ EXPECT_TRUE(value);
+}
+
+TEST_F(WebRtcSessionTest, SetVideoPlayout) {
+ Init();
+ mediastream_signaling_.SendAudioVideoStream1();
+ CreateAndSetRemoteOfferAndLocalAnswer();
+ cricket::FakeVideoMediaChannel* channel = media_engine_->GetVideoChannel(0);
+ ASSERT_TRUE(channel != NULL);
+ ASSERT_LT(0u, channel->renderers().size());
+ EXPECT_TRUE(channel->renderers().begin()->second == NULL);
+ ASSERT_EQ(1u, channel->recv_streams().size());
+ uint32 receive_ssrc = channel->recv_streams()[0].first_ssrc();
+ cricket::FakeVideoRenderer renderer;
+ session_->SetVideoPlayout(receive_ssrc, true, &renderer);
+ EXPECT_TRUE(channel->renderers().begin()->second == &renderer);
+ session_->SetVideoPlayout(receive_ssrc, false, &renderer);
+ EXPECT_TRUE(channel->renderers().begin()->second == NULL);
+}
+
+TEST_F(WebRtcSessionTest, SetVideoSend) {
+ Init();
+ mediastream_signaling_.SendAudioVideoStream1();
+ CreateAndSetRemoteOfferAndLocalAnswer();
+ cricket::FakeVideoMediaChannel* channel = media_engine_->GetVideoChannel(0);
+ ASSERT_TRUE(channel != NULL);
+ ASSERT_EQ(1u, channel->send_streams().size());
+ uint32 send_ssrc = channel->send_streams()[0].first_ssrc();
+ EXPECT_FALSE(channel->IsStreamMuted(send_ssrc));
+ cricket::VideoOptions* options = NULL;
+ session_->SetVideoSend(send_ssrc, false, options);
+ EXPECT_TRUE(channel->IsStreamMuted(send_ssrc));
+ session_->SetVideoSend(send_ssrc, true, options);
+ EXPECT_FALSE(channel->IsStreamMuted(send_ssrc));
+}
+
+TEST_F(WebRtcSessionTest, CanNotInsertDtmf) {
+ TestCanInsertDtmf(false);
+}
+
+TEST_F(WebRtcSessionTest, CanInsertDtmf) {
+ TestCanInsertDtmf(true);
+}
+
+TEST_F(WebRtcSessionTest, InsertDtmf) {
+ // Setup
+ Init();
+ mediastream_signaling_.SendAudioVideoStream1();
+ CreateAndSetRemoteOfferAndLocalAnswer();
+ FakeVoiceMediaChannel* channel = media_engine_->GetVoiceChannel(0);
+ EXPECT_EQ(0U, channel->dtmf_info_queue().size());
+
+ // Insert DTMF
+ const int expected_flags = DF_SEND;
+ const int expected_duration = 90;
+ session_->InsertDtmf(kAudioTrack1, 0, expected_duration);
+ session_->InsertDtmf(kAudioTrack1, 1, expected_duration);
+ session_->InsertDtmf(kAudioTrack1, 2, expected_duration);
+
+ // Verify
+ ASSERT_EQ(3U, channel->dtmf_info_queue().size());
+ const uint32 send_ssrc = channel->send_streams()[0].first_ssrc();
+ EXPECT_TRUE(CompareDtmfInfo(channel->dtmf_info_queue()[0], send_ssrc, 0,
+ expected_duration, expected_flags));
+ EXPECT_TRUE(CompareDtmfInfo(channel->dtmf_info_queue()[1], send_ssrc, 1,
+ expected_duration, expected_flags));
+ EXPECT_TRUE(CompareDtmfInfo(channel->dtmf_info_queue()[2], send_ssrc, 2,
+ expected_duration, expected_flags));
+}
+
+// This test verifies the |initiator| flag when session initiates the call.
+TEST_F(WebRtcSessionTest, TestInitiatorFlagAsOriginator) {
+ Init();
+ EXPECT_FALSE(session_->initiator());
+ SessionDescriptionInterface* offer = session_->CreateOffer(NULL);
+ SessionDescriptionInterface* answer = CreateRemoteAnswer(offer);
+ SetLocalDescriptionWithoutError(offer);
+ EXPECT_TRUE(session_->initiator());
+ SetRemoteDescriptionWithoutError(answer);
+ EXPECT_TRUE(session_->initiator());
+}
+
+// This test verifies the |initiator| flag when session receives the call.
+TEST_F(WebRtcSessionTest, TestInitiatorFlagAsReceiver) {
+ Init();
+ EXPECT_FALSE(session_->initiator());
+ SessionDescriptionInterface* offer = CreateRemoteOffer();
+ SetRemoteDescriptionWithoutError(offer);
+ SessionDescriptionInterface* answer = session_->CreateAnswer(NULL);
+
+ EXPECT_FALSE(session_->initiator());
+ SetLocalDescriptionWithoutError(answer);
+ EXPECT_FALSE(session_->initiator());
+}
+
+// This test verifies the ice protocol type at initiator of the call
+// if |a=ice-options:google-ice| is present in answer.
+TEST_F(WebRtcSessionTest, TestInitiatorGIceInAnswer) {
+ Init();
+ mediastream_signaling_.SendAudioVideoStream1();
+ SessionDescriptionInterface* offer = session_->CreateOffer(NULL);
+ SessionDescriptionInterface* answer = CreateRemoteAnswer(offer);
+ SetLocalDescriptionWithoutError(offer);
+ std::string sdp;
+ EXPECT_TRUE(answer->ToString(&sdp));
+ // Adding ice-options to the session level.
+ InjectAfter("t=0 0\r\n",
+ "a=ice-options:google-ice\r\n",
+ &sdp);
+ SessionDescriptionInterface* answer_with_gice =
+ CreateSessionDescription(JsepSessionDescription::kAnswer, sdp, NULL);
+ SetRemoteDescriptionWithoutError(answer_with_gice);
+ VerifyTransportType("audio", cricket::ICEPROTO_GOOGLE);
+ VerifyTransportType("video", cricket::ICEPROTO_GOOGLE);
+}
+
+// This test verifies the ice protocol type at initiator of the call
+// if ICE RFC5245 is supported in answer.
+TEST_F(WebRtcSessionTest, TestInitiatorIceInAnswer) {
+ Init();
+ mediastream_signaling_.SendAudioVideoStream1();
+ SessionDescriptionInterface* offer = session_->CreateOffer(NULL);
+ SessionDescriptionInterface* answer = CreateRemoteAnswer(offer);
+ SetLocalDescriptionWithoutError(offer);
+
+ SetRemoteDescriptionWithoutError(answer);
+ VerifyTransportType("audio", cricket::ICEPROTO_RFC5245);
+ VerifyTransportType("video", cricket::ICEPROTO_RFC5245);
+}
+
+// This test verifies the ice protocol type at receiver side of the call if
+// receiver decides to use google-ice.
+TEST_F(WebRtcSessionTest, TestReceiverGIceInOffer) {
+ Init();
+ mediastream_signaling_.SendAudioVideoStream1();
+ SessionDescriptionInterface* offer = session_->CreateOffer(NULL);
+ SetRemoteDescriptionWithoutError(offer);
+ SessionDescriptionInterface* answer = session_->CreateAnswer(NULL);
+ std::string sdp;
+ EXPECT_TRUE(answer->ToString(&sdp));
+ // Adding ice-options to the session level.
+ InjectAfter("t=0 0\r\n",
+ "a=ice-options:google-ice\r\n",
+ &sdp);
+ SessionDescriptionInterface* answer_with_gice =
+ CreateSessionDescription(JsepSessionDescription::kAnswer, sdp, NULL);
+ SetLocalDescriptionWithoutError(answer_with_gice);
+ VerifyTransportType("audio", cricket::ICEPROTO_GOOGLE);
+ VerifyTransportType("video", cricket::ICEPROTO_GOOGLE);
+}
+
+// This test verifies the ice protocol type at receiver side of the call if
+// receiver decides to use ice RFC 5245.
+TEST_F(WebRtcSessionTest, TestReceiverIceInOffer) {
+ Init();
+ mediastream_signaling_.SendAudioVideoStream1();
+ SessionDescriptionInterface* offer = session_->CreateOffer(NULL);
+ SetRemoteDescriptionWithoutError(offer);
+ SessionDescriptionInterface* answer = session_->CreateAnswer(NULL);
+ SetLocalDescriptionWithoutError(answer);
+ VerifyTransportType("audio", cricket::ICEPROTO_RFC5245);
+ VerifyTransportType("video", cricket::ICEPROTO_RFC5245);
+}
+
+// This test verifies the session state when ICE RFC5245 in offer and
+// ICE google-ice in answer.
+TEST_F(WebRtcSessionTest, TestIceOfferGIceOnlyAnswer) {
+ Init();
+ mediastream_signaling_.SendAudioVideoStream1();
+ talk_base::scoped_ptr<SessionDescriptionInterface> offer(
+ session_->CreateOffer(NULL));
+ std::string offer_str;
+ offer->ToString(&offer_str);
+ // Disable google-ice
+ const std::string gice_option = "google-ice";
+ const std::string xgoogle_xice = "xgoogle-xice";
+ talk_base::replace_substrs(gice_option.c_str(), gice_option.length(),
+ xgoogle_xice.c_str(), xgoogle_xice.length(),
+ &offer_str);
+ JsepSessionDescription *ice_only_offer =
+ new JsepSessionDescription(JsepSessionDescription::kOffer);
+ EXPECT_TRUE((ice_only_offer)->Initialize(offer_str, NULL));
+ SetLocalDescriptionWithoutError(ice_only_offer);
+ std::string original_offer_sdp;
+ EXPECT_TRUE(offer->ToString(&original_offer_sdp));
+ SessionDescriptionInterface* pranswer_with_gice =
+ CreateSessionDescription(JsepSessionDescription::kPrAnswer,
+ original_offer_sdp, NULL);
+ SetRemoteDescriptionExpectError(kPushDownPranswerTDFailed,
+ pranswer_with_gice);
+ SessionDescriptionInterface* answer_with_gice =
+ CreateSessionDescription(JsepSessionDescription::kAnswer,
+ original_offer_sdp, NULL);
+ SetRemoteDescriptionExpectError(kPushDownAnswerTDFailed, answer_with_gice);
+}
+
+// Verifing local offer and remote answer have matching m-lines as per RFC 3264.
+TEST_F(WebRtcSessionTest, TestIncorrectMLinesInRemoteAnswer) {
+ Init();
+ mediastream_signaling_.SendAudioVideoStream1();
+ SessionDescriptionInterface* offer = session_->CreateOffer(NULL);
+ SetLocalDescriptionWithoutError(offer);
+ talk_base::scoped_ptr<SessionDescriptionInterface> answer(
+ CreateRemoteAnswer(session_->local_description()));
+
+ cricket::SessionDescription* answer_copy = answer->description()->Copy();
+ answer_copy->RemoveContentByName("video");
+ JsepSessionDescription* modified_answer =
+ new JsepSessionDescription(JsepSessionDescription::kAnswer);
+
+ EXPECT_TRUE(modified_answer->Initialize(answer_copy,
+ answer->session_id(),
+ answer->session_version()));
+ SetRemoteDescriptionExpectError(kMlineMismatch, modified_answer);
+
+ // Modifying content names.
+ std::string sdp;
+ EXPECT_TRUE(answer->ToString(&sdp));
+ const std::string kAudioMid = "a=mid:audio";
+ const std::string kAudioMidReplaceStr = "a=mid:audio_content_name";
+
+ // Replacing |audio| with |audio_content_name|.
+ talk_base::replace_substrs(kAudioMid.c_str(), kAudioMid.length(),
+ kAudioMidReplaceStr.c_str(),
+ kAudioMidReplaceStr.length(),
+ &sdp);
+
+ SessionDescriptionInterface* modified_answer1 =
+ CreateSessionDescription(JsepSessionDescription::kAnswer, sdp, NULL);
+ SetRemoteDescriptionExpectError(kMlineMismatch, modified_answer1);
+
+ SetRemoteDescriptionWithoutError(answer.release());
+}
+
+// Verifying remote offer and local answer have matching m-lines as per
+// RFC 3264.
+TEST_F(WebRtcSessionTest, TestIncorrectMLinesInLocalAnswer) {
+ Init();
+ mediastream_signaling_.SendAudioVideoStream1();
+ SessionDescriptionInterface* offer = CreateRemoteOffer();
+ SetRemoteDescriptionWithoutError(offer);
+ SessionDescriptionInterface* answer = session_->CreateAnswer(NULL);
+
+ cricket::SessionDescription* answer_copy = answer->description()->Copy();
+ answer_copy->RemoveContentByName("video");
+ JsepSessionDescription* modified_answer =
+ new JsepSessionDescription(JsepSessionDescription::kAnswer);
+
+ EXPECT_TRUE(modified_answer->Initialize(answer_copy,
+ answer->session_id(),
+ answer->session_version()));
+ SetLocalDescriptionExpectError(kMlineMismatch, modified_answer);
+ SetLocalDescriptionWithoutError(answer);
+}
+
+// This test verifies that WebRtcSession does not start candidate allocation
+// before SetLocalDescription is called.
+TEST_F(WebRtcSessionTest, TestIceStartAfterSetLocalDescriptionOnly) {
+ Init();
+ mediastream_signaling_.SendAudioVideoStream1();
+ SessionDescriptionInterface* offer = CreateRemoteOffer();
+ cricket::Candidate candidate;
+ candidate.set_component(1);
+ JsepIceCandidate ice_candidate(kMediaContentName0, kMediaContentIndex0,
+ candidate);
+ EXPECT_TRUE(offer->AddCandidate(&ice_candidate));
+ cricket::Candidate candidate1;
+ candidate1.set_component(1);
+ JsepIceCandidate ice_candidate1(kMediaContentName1, kMediaContentIndex1,
+ candidate1);
+ EXPECT_TRUE(offer->AddCandidate(&ice_candidate1));
+ SetRemoteDescriptionWithoutError(offer);
+ ASSERT_TRUE(session_->GetTransportProxy("audio") != NULL);
+ ASSERT_TRUE(session_->GetTransportProxy("video") != NULL);
+
+ // Pump for 1 second and verify that no candidates are generated.
+ talk_base::Thread::Current()->ProcessMessages(1000);
+ EXPECT_TRUE(observer_.mline_0_candidates_.empty());
+ EXPECT_TRUE(observer_.mline_1_candidates_.empty());
+
+ SessionDescriptionInterface* answer = session_->CreateAnswer(NULL);
+ SetLocalDescriptionWithoutError(answer);
+ EXPECT_TRUE(session_->GetTransportProxy("audio")->negotiated());
+ EXPECT_TRUE(session_->GetTransportProxy("video")->negotiated());
+ EXPECT_TRUE_WAIT(observer_.oncandidatesready_, kIceCandidatesTimeout);
+}
+
+// This test verifies that crypto parameter is updated in local session
+// description as per security policy set in MediaSessionDescriptionFactory.
+TEST_F(WebRtcSessionTest, TestCryptoAfterSetLocalDescription) {
+ Init();
+ mediastream_signaling_.SendAudioVideoStream1();
+ talk_base::scoped_ptr<SessionDescriptionInterface> offer(
+ session_->CreateOffer(NULL));
+
+ // Making sure SetLocalDescription correctly sets crypto value in
+ // SessionDescription object after de-serialization of sdp string. The value
+ // will be set as per MediaSessionDescriptionFactory.
+ std::string offer_str;
+ offer->ToString(&offer_str);
+ SessionDescriptionInterface* jsep_offer_str =
+ CreateSessionDescription(JsepSessionDescription::kOffer, offer_str, NULL);
+ SetLocalDescriptionWithoutError(jsep_offer_str);
+ EXPECT_TRUE(session_->voice_channel()->secure_required());
+ EXPECT_TRUE(session_->video_channel()->secure_required());
+}
+
+// This test verifies the crypto parameter when security is disabled.
+TEST_F(WebRtcSessionTest, TestCryptoAfterSetLocalDescriptionWithDisabled) {
+ Init();
+ mediastream_signaling_.SendAudioVideoStream1();
+ session_->set_secure_policy(cricket::SEC_DISABLED);
+ talk_base::scoped_ptr<SessionDescriptionInterface> offer(
+ session_->CreateOffer(NULL));
+
+ // Making sure SetLocalDescription correctly sets crypto value in
+ // SessionDescription object after de-serialization of sdp string. The value
+ // will be set as per MediaSessionDescriptionFactory.
+ std::string offer_str;
+ offer->ToString(&offer_str);
+ SessionDescriptionInterface *jsep_offer_str =
+ CreateSessionDescription(JsepSessionDescription::kOffer, offer_str, NULL);
+ SetLocalDescriptionWithoutError(jsep_offer_str);
+ EXPECT_FALSE(session_->voice_channel()->secure_required());
+ EXPECT_FALSE(session_->video_channel()->secure_required());
+}
+
+// This test verifies that an answer contains new ufrag and password if an offer
+// with new ufrag and password is received.
+TEST_F(WebRtcSessionTest, TestCreateAnswerWithNewUfragAndPassword) {
+ Init();
+ cricket::MediaSessionOptions options;
+ options.has_audio = true;
+ options.has_video = true;
+ talk_base::scoped_ptr<JsepSessionDescription> offer(
+ CreateRemoteOffer(options));
+ SetRemoteDescriptionWithoutError(offer.release());
+
+ mediastream_signaling_.SendAudioVideoStream1();
+ talk_base::scoped_ptr<SessionDescriptionInterface> answer(
+ session_->CreateAnswer(NULL));
+ SetLocalDescriptionWithoutError(answer.release());
+
+ // Receive an offer with new ufrag and password.
+ options.transport_options.ice_restart = true;
+ talk_base::scoped_ptr<JsepSessionDescription> updated_offer1(
+ CreateRemoteOffer(options,
+ session_->remote_description()));
+ SetRemoteDescriptionWithoutError(updated_offer1.release());
+
+ talk_base::scoped_ptr<SessionDescriptionInterface> updated_answer1(
+ session_->CreateAnswer(NULL));
+
+ CompareIceUfragAndPassword(updated_answer1->description(),
+ session_->local_description()->description(),
+ false);
+
+ SetLocalDescriptionWithoutError(updated_answer1.release());
+
+ // Receive yet an offer without changed ufrag or password.
+ options.transport_options.ice_restart = false;
+ talk_base::scoped_ptr<JsepSessionDescription> updated_offer2(
+ CreateRemoteOffer(options,
+ session_->remote_description()));
+ SetRemoteDescriptionWithoutError(updated_offer2.release());
+
+ talk_base::scoped_ptr<SessionDescriptionInterface> updated_answer2(
+ session_->CreateAnswer(NULL));
+
+ CompareIceUfragAndPassword(updated_answer2->description(),
+ session_->local_description()->description(),
+ true);
+
+ SetLocalDescriptionWithoutError(updated_answer2.release());
+}
+
+TEST_F(WebRtcSessionTest, TestSessionContentError) {
+ Init();
+ mediastream_signaling_.SendAudioVideoStream1();
+ SessionDescriptionInterface* offer = session_->CreateOffer(NULL);
+ const std::string session_id_orig = offer->session_id();
+ const std::string session_version_orig = offer->session_version();
+ SetLocalDescriptionWithoutError(offer);
+
+ video_channel_ = media_engine_->GetVideoChannel(0);
+ video_channel_->set_fail_set_send_codecs(true);
+
+ mediastream_signaling_.SendAudioVideoStream2();
+ SessionDescriptionInterface* answer =
+ CreateRemoteAnswer(session_->local_description());
+ SetRemoteDescriptionExpectError("ERROR_CONTENT", answer);
+}
+
+// Runs the loopback call test with BUNDLE and STUN disabled.
+TEST_F(WebRtcSessionTest, TestIceStatesBasic) {
+ // Lets try with only UDP ports.
+ allocator_.set_flags(cricket::PORTALLOCATOR_ENABLE_SHARED_UFRAG |
+ cricket::PORTALLOCATOR_DISABLE_TCP |
+ cricket::PORTALLOCATOR_DISABLE_STUN |
+ cricket::PORTALLOCATOR_DISABLE_RELAY);
+ TestLoopbackCall();
+}
+
+// Regression-test for a crash which should have been an error.
+TEST_F(WebRtcSessionTest, TestNoStateTransitionPendingError) {
+ Init();
+ cricket::MediaSessionOptions options;
+ options.has_audio = true;
+ options.has_video = true;
+
+ session_->SetError(cricket::BaseSession::ERROR_CONTENT);
+ SessionDescriptionInterface* offer = CreateRemoteOffer(options);
+ SessionDescriptionInterface* answer =
+ CreateRemoteAnswer(offer, options);
+ SetRemoteDescriptionExpectError(kSessionError, offer);
+ SetLocalDescriptionExpectError(kSessionError, answer);
+ // Not crashing is our success.
+}
+
+TEST_F(WebRtcSessionTest, TestRtpDataChannel) {
+ constraints_.reset(new FakeConstraints());
+ constraints_->AddOptional(
+ webrtc::MediaConstraintsInterface::kEnableRtpDataChannels, true);
+ Init();
+
+ SetLocalDescriptionWithDataChannel();
+ EXPECT_EQ(cricket::DCT_RTP, data_engine_->last_channel_type());
+}
+
+TEST_F(WebRtcSessionTest, TestRtpDataChannelConstraintTakesPrecedence) {
+ MAYBE_SKIP_TEST(talk_base::SSLStreamAdapter::HaveDtlsSrtp);
+
+ constraints_.reset(new FakeConstraints());
+ constraints_->AddOptional(
+ webrtc::MediaConstraintsInterface::kEnableRtpDataChannels, true);
+ constraints_->AddOptional(
+ webrtc::MediaConstraintsInterface::kEnableSctpDataChannels, true);
+ constraints_->AddOptional(
+ webrtc::MediaConstraintsInterface::kEnableDtlsSrtp, true);
+ Init();
+
+ SetLocalDescriptionWithDataChannel();
+ EXPECT_EQ(cricket::DCT_RTP, data_engine_->last_channel_type());
+}
+
+TEST_F(WebRtcSessionTest, TestSctpDataChannelWithoutDtls) {
+ constraints_.reset(new FakeConstraints());
+ constraints_->AddOptional(
+ webrtc::MediaConstraintsInterface::kEnableSctpDataChannels, true);
+ Init();
+
+ SetLocalDescriptionWithDataChannel();
+ EXPECT_EQ(cricket::DCT_NONE, data_engine_->last_channel_type());
+}
+
+TEST_F(WebRtcSessionTest, TestSctpDataChannelWithDtls) {
+ MAYBE_SKIP_TEST(talk_base::SSLStreamAdapter::HaveDtlsSrtp);
+
+ constraints_.reset(new FakeConstraints());
+ constraints_->AddOptional(
+ webrtc::MediaConstraintsInterface::kEnableSctpDataChannels, true);
+ constraints_->AddOptional(
+ webrtc::MediaConstraintsInterface::kEnableDtlsSrtp, true);
+ Init();
+
+ SetLocalDescriptionWithDataChannel();
+ EXPECT_EQ(cricket::DCT_SCTP, data_engine_->last_channel_type());
+}
+// TODO(bemasc): Add a TestIceStatesBundle with BUNDLE enabled. That test
+// currently fails because upon disconnection and reconnection OnIceComplete is
+// called more than once without returning to IceGatheringGathering.