blob: cfe9ac8a3109553ce58209d70443ad74111ba902 [file] [log] [blame]
/*
* Copyright 2004 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "pc/media_session.h"
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/memory/memory.h"
#include "absl/strings/match.h"
#include "media/base/codec.h"
#include "media/base/test_utils.h"
#include "media/sctp/sctp_transport_internal.h"
#include "p2p/base/p2p_constants.h"
#include "p2p/base/transport_description.h"
#include "p2p/base/transport_info.h"
#include "pc/rtp_media_utils.h"
#include "pc/srtp_filter.h"
#include "rtc_base/checks.h"
#include "rtc_base/fake_ssl_identity.h"
#include "rtc_base/gunit.h"
#include "rtc_base/message_digest.h"
#include "rtc_base/ssl_adapter.h"
#include "rtc_base/strings/string_builder.h"
#include "rtc_base/unique_id_generator.h"
#include "test/field_trial.h"
#include "test/gmock.h"
#define ASSERT_CRYPTO(cd, s, cs) \
ASSERT_EQ(s, cd->cryptos().size()); \
ASSERT_EQ(cs, cd->cryptos()[0].cipher_suite)
typedef std::vector<cricket::Candidate> Candidates;
using cricket::AudioCodec;
using cricket::AudioContentDescription;
using cricket::ContentInfo;
using cricket::CryptoParamsVec;
using cricket::GetFirstAudioContent;
using cricket::GetFirstAudioContentDescription;
using cricket::GetFirstDataContent;
using cricket::GetFirstVideoContent;
using cricket::GetFirstVideoContentDescription;
using cricket::kAutoBandwidth;
using cricket::MEDIA_TYPE_AUDIO;
using cricket::MEDIA_TYPE_DATA;
using cricket::MEDIA_TYPE_VIDEO;
using cricket::MediaContentDescription;
using cricket::MediaDescriptionOptions;
using cricket::MediaProtocolType;
using cricket::MediaSessionDescriptionFactory;
using cricket::MediaSessionOptions;
using cricket::MediaType;
using cricket::RidDescription;
using cricket::RidDirection;
using cricket::SctpDataContentDescription;
using cricket::SEC_DISABLED;
using cricket::SEC_ENABLED;
using cricket::SEC_REQUIRED;
using cricket::SessionDescription;
using cricket::SimulcastDescription;
using cricket::SimulcastLayer;
using cricket::SimulcastLayerList;
using cricket::SsrcGroup;
using cricket::StreamParams;
using cricket::StreamParamsVec;
using cricket::TransportDescription;
using cricket::TransportDescriptionFactory;
using cricket::TransportInfo;
using cricket::VideoCodec;
using cricket::VideoContentDescription;
using rtc::kCsAeadAes128Gcm;
using rtc::kCsAeadAes256Gcm;
using rtc::kCsAesCm128HmacSha1_32;
using rtc::kCsAesCm128HmacSha1_80;
using rtc::UniqueRandomIdGenerator;
using ::testing::Contains;
using ::testing::Each;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::Eq;
using ::testing::Field;
using ::testing::IsEmpty;
using ::testing::IsFalse;
using ::testing::Ne;
using ::testing::Not;
using ::testing::Pointwise;
using ::testing::SizeIs;
using webrtc::RtpExtension;
using webrtc::RtpTransceiverDirection;
static const AudioCodec kAudioCodecs1[] = {
AudioCodec(103, "ISAC", 16000, -1, 1),
AudioCodec(102, "iLBC", 8000, 13300, 1),
AudioCodec(0, "PCMU", 8000, 64000, 1),
AudioCodec(8, "PCMA", 8000, 64000, 1),
AudioCodec(117, "red", 8000, 0, 1),
AudioCodec(107, "CN", 48000, 0, 1)};
static const AudioCodec kAudioCodecs2[] = {
AudioCodec(126, "foo", 16000, 22000, 1),
AudioCodec(0, "PCMU", 8000, 64000, 1),
AudioCodec(127, "iLBC", 8000, 13300, 1),
};
static const AudioCodec kAudioCodecsAnswer[] = {
AudioCodec(102, "iLBC", 8000, 13300, 1),
AudioCodec(0, "PCMU", 8000, 64000, 1),
};
static const VideoCodec kVideoCodecs1[] = {VideoCodec(96, "H264-SVC"),
VideoCodec(97, "H264")};
static const VideoCodec kVideoCodecs1Reverse[] = {VideoCodec(97, "H264"),
VideoCodec(96, "H264-SVC")};
static const VideoCodec kVideoCodecs2[] = {VideoCodec(126, "H264"),
VideoCodec(127, "H263")};
static const VideoCodec kVideoCodecsAnswer[] = {VideoCodec(97, "H264")};
static const RtpExtension kAudioRtpExtension1[] = {
RtpExtension("urn:ietf:params:rtp-hdrext:ssrc-audio-level", 8),
RtpExtension("http://google.com/testing/audio_something", 10),
};
static const RtpExtension kAudioRtpExtensionEncrypted1[] = {
RtpExtension("urn:ietf:params:rtp-hdrext:ssrc-audio-level", 8),
RtpExtension("http://google.com/testing/audio_something", 10),
RtpExtension("urn:ietf:params:rtp-hdrext:ssrc-audio-level", 12, true),
RtpExtension("http://google.com/testing/audio_something", 11, true),
};
static const RtpExtension kAudioRtpExtension2[] = {
RtpExtension("urn:ietf:params:rtp-hdrext:ssrc-audio-level", 2),
RtpExtension("http://google.com/testing/audio_something_else", 8),
RtpExtension("http://google.com/testing/both_audio_and_video", 7),
};
static const RtpExtension kAudioRtpExtension3[] = {
RtpExtension("http://google.com/testing/audio_something", 2),
RtpExtension("http://google.com/testing/both_audio_and_video", 3),
};
static const RtpExtension kAudioRtpExtension3ForEncryption[] = {
RtpExtension("http://google.com/testing/audio_something", 2),
// Use RTP extension that supports encryption.
RtpExtension("urn:ietf:params:rtp-hdrext:toffset", 3),
};
static const RtpExtension kAudioRtpExtension3ForEncryptionOffer[] = {
RtpExtension("http://google.com/testing/audio_something", 2),
RtpExtension("urn:ietf:params:rtp-hdrext:toffset", 3),
RtpExtension("http://google.com/testing/audio_something", 14, true),
RtpExtension("urn:ietf:params:rtp-hdrext:toffset", 13, true),
};
static const RtpExtension kVideoRtpExtension3ForEncryptionOffer[] = {
RtpExtension("http://google.com/testing/video_something", 4),
RtpExtension("urn:ietf:params:rtp-hdrext:toffset", 3),
RtpExtension("http://google.com/testing/video_something", 12, true),
RtpExtension("urn:ietf:params:rtp-hdrext:toffset", 13, true),
};
static const RtpExtension kAudioRtpExtensionAnswer[] = {
RtpExtension("urn:ietf:params:rtp-hdrext:ssrc-audio-level", 8),
};
static const RtpExtension kAudioRtpExtensionEncryptedAnswer[] = {
RtpExtension("urn:ietf:params:rtp-hdrext:ssrc-audio-level", 12, true),
};
static const RtpExtension kVideoRtpExtension1[] = {
RtpExtension("urn:ietf:params:rtp-hdrext:toffset", 14),
RtpExtension("http://google.com/testing/video_something", 13),
};
static const RtpExtension kVideoRtpExtensionEncrypted1[] = {
RtpExtension("urn:ietf:params:rtp-hdrext:toffset", 14),
RtpExtension("http://google.com/testing/video_something", 13),
RtpExtension("urn:ietf:params:rtp-hdrext:toffset", 9, true),
RtpExtension("http://google.com/testing/video_something", 7, true),
};
static const RtpExtension kVideoRtpExtension2[] = {
RtpExtension("urn:ietf:params:rtp-hdrext:toffset", 2),
RtpExtension("http://google.com/testing/video_something_else", 14),
RtpExtension("http://google.com/testing/both_audio_and_video", 7),
};
static const RtpExtension kVideoRtpExtension3[] = {
RtpExtension("http://google.com/testing/video_something", 4),
RtpExtension("http://google.com/testing/both_audio_and_video", 5),
};
static const RtpExtension kVideoRtpExtension3ForEncryption[] = {
RtpExtension("http://google.com/testing/video_something", 4),
// Use RTP extension that supports encryption.
RtpExtension("urn:ietf:params:rtp-hdrext:toffset", 5),
};
static const RtpExtension kVideoRtpExtensionAnswer[] = {
RtpExtension("urn:ietf:params:rtp-hdrext:toffset", 14),
};
static const RtpExtension kVideoRtpExtensionEncryptedAnswer[] = {
RtpExtension("urn:ietf:params:rtp-hdrext:toffset", 9, true),
};
static const RtpExtension kRtpExtensionTransportSequenceNumber01[] = {
RtpExtension("http://www.ietf.org/id/"
"draft-holmer-rmcat-transport-wide-cc-extensions-01",
1),
};
static const RtpExtension kRtpExtensionTransportSequenceNumber01And02[] = {
RtpExtension("http://www.ietf.org/id/"
"draft-holmer-rmcat-transport-wide-cc-extensions-01",
1),
RtpExtension(
"http://www.webrtc.org/experiments/rtp-hdrext/transport-wide-cc-02",
2),
};
static const RtpExtension kRtpExtensionTransportSequenceNumber02[] = {
RtpExtension(
"http://www.webrtc.org/experiments/rtp-hdrext/transport-wide-cc-02",
2),
};
static const RtpExtension kRtpExtensionGenericFrameDescriptorUri00[] = {
RtpExtension("http://www.webrtc.org/experiments/rtp-hdrext/"
"generic-frame-descriptor-00",
3),
};
static const uint32_t kSimulcastParamsSsrc[] = {10, 11, 20, 21, 30, 31};
static const uint32_t kSimSsrc[] = {10, 20, 30};
static const uint32_t kFec1Ssrc[] = {10, 11};
static const uint32_t kFec2Ssrc[] = {20, 21};
static const uint32_t kFec3Ssrc[] = {30, 31};
static const char kMediaStream1[] = "stream_1";
static const char kMediaStream2[] = "stream_2";
static const char kVideoTrack1[] = "video_1";
static const char kVideoTrack2[] = "video_2";
static const char kAudioTrack1[] = "audio_1";
static const char kAudioTrack2[] = "audio_2";
static const char kAudioTrack3[] = "audio_3";
static const char* kMediaProtocols[] = {"RTP/AVP", "RTP/SAVP", "RTP/AVPF",
"RTP/SAVPF"};
static const char* kMediaProtocolsDtls[] = {
"TCP/TLS/RTP/SAVPF", "TCP/TLS/RTP/SAVP", "UDP/TLS/RTP/SAVPF",
"UDP/TLS/RTP/SAVP"};
// SRTP cipher name negotiated by the tests. This must be updated if the
// default changes.
static const char* kDefaultSrtpCryptoSuite = kCsAesCm128HmacSha1_80;
static const char* kDefaultSrtpCryptoSuiteGcm = kCsAeadAes256Gcm;
// These constants are used to make the code using "AddMediaDescriptionOptions"
// more readable.
static constexpr bool kStopped = true;
static constexpr bool kActive = false;
static bool IsMediaContentOfType(const ContentInfo* content,
MediaType media_type) {
RTC_DCHECK(content);
return content->media_description()->type() == media_type;
}
static RtpTransceiverDirection GetMediaDirection(const ContentInfo* content) {
RTC_DCHECK(content);
return content->media_description()->direction();
}
static void AddRtxCodec(const VideoCodec& rtx_codec,
std::vector<VideoCodec>* codecs) {
ASSERT_FALSE(cricket::FindCodecById(*codecs, rtx_codec.id));
codecs->push_back(rtx_codec);
}
template <class T>
static std::vector<std::string> GetCodecNames(const std::vector<T>& codecs) {
std::vector<std::string> codec_names;
codec_names.reserve(codecs.size());
for (const auto& codec : codecs) {
codec_names.push_back(codec.name);
}
return codec_names;
}
// This is used for test only. MIDs are not the identification of the
// MediaDescriptionOptions since some end points may not support MID and the SDP
// may not contain 'mid'.
std::vector<MediaDescriptionOptions>::iterator FindFirstMediaDescriptionByMid(
const std::string& mid,
MediaSessionOptions* opts) {
return absl::c_find_if(
opts->media_description_options,
[&mid](const MediaDescriptionOptions& t) { return t.mid == mid; });
}
std::vector<MediaDescriptionOptions>::const_iterator
FindFirstMediaDescriptionByMid(const std::string& mid,
const MediaSessionOptions& opts) {
return absl::c_find_if(
opts.media_description_options,
[&mid](const MediaDescriptionOptions& t) { return t.mid == mid; });
}
// Add a media section to the `session_options`.
static void AddMediaDescriptionOptions(MediaType type,
const std::string& mid,
RtpTransceiverDirection direction,
bool stopped,
MediaSessionOptions* opts) {
opts->media_description_options.push_back(
MediaDescriptionOptions(type, mid, direction, stopped));
}
static void AddAudioVideoSections(RtpTransceiverDirection direction,
MediaSessionOptions* opts) {
AddMediaDescriptionOptions(MEDIA_TYPE_AUDIO, "audio", direction, kActive,
opts);
AddMediaDescriptionOptions(MEDIA_TYPE_VIDEO, "video", direction, kActive,
opts);
}
static void AddDataSection(RtpTransceiverDirection direction,
MediaSessionOptions* opts) {
AddMediaDescriptionOptions(MEDIA_TYPE_DATA, "data", direction, kActive, opts);
}
static void AttachSenderToMediaDescriptionOptions(
const std::string& mid,
MediaType type,
const std::string& track_id,
const std::vector<std::string>& stream_ids,
const std::vector<RidDescription>& rids,
const SimulcastLayerList& simulcast_layers,
int num_sim_layer,
MediaSessionOptions* session_options) {
auto it = FindFirstMediaDescriptionByMid(mid, session_options);
switch (type) {
case MEDIA_TYPE_AUDIO:
it->AddAudioSender(track_id, stream_ids);
break;
case MEDIA_TYPE_VIDEO:
it->AddVideoSender(track_id, stream_ids, rids, simulcast_layers,
num_sim_layer);
break;
default:
RTC_DCHECK_NOTREACHED();
}
}
static void AttachSenderToMediaDescriptionOptions(
const std::string& mid,
MediaType type,
const std::string& track_id,
const std::vector<std::string>& stream_ids,
int num_sim_layer,
MediaSessionOptions* session_options) {
AttachSenderToMediaDescriptionOptions(mid, type, track_id, stream_ids, {},
SimulcastLayerList(), num_sim_layer,
session_options);
}
static void DetachSenderFromMediaSection(const std::string& mid,
const std::string& track_id,
MediaSessionOptions* session_options) {
std::vector<cricket::SenderOptions>& sender_options_list =
FindFirstMediaDescriptionByMid(mid, session_options)->sender_options;
auto sender_it =
absl::c_find_if(sender_options_list,
[track_id](const cricket::SenderOptions& sender_options) {
return sender_options.track_id == track_id;
});
RTC_DCHECK(sender_it != sender_options_list.end());
sender_options_list.erase(sender_it);
}
// Helper function used to create a default MediaSessionOptions for Plan B SDP.
// (https://tools.ietf.org/html/draft-uberti-rtcweb-plan-00).
static MediaSessionOptions CreatePlanBMediaSessionOptions() {
MediaSessionOptions session_options;
AddMediaDescriptionOptions(MEDIA_TYPE_AUDIO, "audio",
RtpTransceiverDirection::kRecvOnly, kActive,
&session_options);
return session_options;
}
// prefers GCM SDES crypto suites by removing non-GCM defaults.
void PreferGcmCryptoParameters(CryptoParamsVec* cryptos) {
cryptos->erase(
std::remove_if(cryptos->begin(), cryptos->end(),
[](const cricket::CryptoParams& crypto) {
return crypto.cipher_suite != kCsAeadAes256Gcm &&
crypto.cipher_suite != kCsAeadAes128Gcm;
}),
cryptos->end());
}
// TODO(zhihuang): Most of these tests were written while MediaSessionOptions
// was designed for Plan B SDP, where only one audio "m=" section and one video
// "m=" section could be generated, and ordering couldn't be controlled. Many of
// these tests may be obsolete as a result, and should be refactored or removed.
class MediaSessionDescriptionFactoryTest : public ::testing::Test {
public:
MediaSessionDescriptionFactoryTest()
: f1_(&tdf1_, &ssrc_generator1), f2_(&tdf2_, &ssrc_generator2) {
f1_.set_audio_codecs(MAKE_VECTOR(kAudioCodecs1),
MAKE_VECTOR(kAudioCodecs1));
f1_.set_video_codecs(MAKE_VECTOR(kVideoCodecs1),
MAKE_VECTOR(kVideoCodecs1));
f2_.set_audio_codecs(MAKE_VECTOR(kAudioCodecs2),
MAKE_VECTOR(kAudioCodecs2));
f2_.set_video_codecs(MAKE_VECTOR(kVideoCodecs2),
MAKE_VECTOR(kVideoCodecs2));
tdf1_.set_certificate(rtc::RTCCertificate::Create(
std::unique_ptr<rtc::SSLIdentity>(new rtc::FakeSSLIdentity("id1"))));
tdf2_.set_certificate(rtc::RTCCertificate::Create(
std::unique_ptr<rtc::SSLIdentity>(new rtc::FakeSSLIdentity("id2"))));
}
// Create a video StreamParamsVec object with:
// - one video stream with 3 simulcast streams and FEC,
StreamParamsVec CreateComplexVideoStreamParamsVec() {
SsrcGroup sim_group("SIM", MAKE_VECTOR(kSimSsrc));
SsrcGroup fec_group1("FEC", MAKE_VECTOR(kFec1Ssrc));
SsrcGroup fec_group2("FEC", MAKE_VECTOR(kFec2Ssrc));
SsrcGroup fec_group3("FEC", MAKE_VECTOR(kFec3Ssrc));
std::vector<SsrcGroup> ssrc_groups;
ssrc_groups.push_back(sim_group);
ssrc_groups.push_back(fec_group1);
ssrc_groups.push_back(fec_group2);
ssrc_groups.push_back(fec_group3);
StreamParams simulcast_params;
simulcast_params.id = kVideoTrack1;
simulcast_params.ssrcs = MAKE_VECTOR(kSimulcastParamsSsrc);
simulcast_params.ssrc_groups = ssrc_groups;
simulcast_params.cname = "Video_SIM_FEC";
simulcast_params.set_stream_ids({kMediaStream1});
StreamParamsVec video_streams;
video_streams.push_back(simulcast_params);
return video_streams;
}
bool CompareCryptoParams(const CryptoParamsVec& c1,
const CryptoParamsVec& c2) {
if (c1.size() != c2.size())
return false;
for (size_t i = 0; i < c1.size(); ++i)
if (c1[i].tag != c2[i].tag || c1[i].cipher_suite != c2[i].cipher_suite ||
c1[i].key_params != c2[i].key_params ||
c1[i].session_params != c2[i].session_params)
return false;
return true;
}
// Returns true if the transport info contains "renomination" as an
// ICE option.
bool GetIceRenomination(const TransportInfo* transport_info) {
return absl::c_linear_search(transport_info->description.transport_options,
"renomination");
}
void TestTransportInfo(bool offer,
const MediaSessionOptions& options,
bool has_current_desc) {
const std::string current_audio_ufrag = "current_audio_ufrag";
const std::string current_audio_pwd = "current_audio_pwd";
const std::string current_video_ufrag = "current_video_ufrag";
const std::string current_video_pwd = "current_video_pwd";
const std::string current_data_ufrag = "current_data_ufrag";
const std::string current_data_pwd = "current_data_pwd";
std::unique_ptr<SessionDescription> current_desc;
std::unique_ptr<SessionDescription> desc;
if (has_current_desc) {
current_desc = std::make_unique<SessionDescription>();
current_desc->AddTransportInfo(TransportInfo(
"audio",
TransportDescription(current_audio_ufrag, current_audio_pwd)));
current_desc->AddTransportInfo(TransportInfo(
"video",
TransportDescription(current_video_ufrag, current_video_pwd)));
current_desc->AddTransportInfo(TransportInfo(
"data", TransportDescription(current_data_ufrag, current_data_pwd)));
}
if (offer) {
desc = f1_.CreateOffer(options, current_desc.get());
} else {
std::unique_ptr<SessionDescription> offer;
offer = f1_.CreateOffer(options, NULL);
desc = f1_.CreateAnswer(offer.get(), options, current_desc.get());
}
ASSERT_TRUE(desc.get() != NULL);
const TransportInfo* ti_audio = desc->GetTransportInfoByName("audio");
if (options.has_audio()) {
EXPECT_TRUE(ti_audio != NULL);
if (has_current_desc) {
EXPECT_EQ(current_audio_ufrag, ti_audio->description.ice_ufrag);
EXPECT_EQ(current_audio_pwd, ti_audio->description.ice_pwd);
} else {
EXPECT_EQ(static_cast<size_t>(cricket::ICE_UFRAG_LENGTH),
ti_audio->description.ice_ufrag.size());
EXPECT_EQ(static_cast<size_t>(cricket::ICE_PWD_LENGTH),
ti_audio->description.ice_pwd.size());
}
auto media_desc_options_it =
FindFirstMediaDescriptionByMid("audio", options);
EXPECT_EQ(
media_desc_options_it->transport_options.enable_ice_renomination,
GetIceRenomination(ti_audio));
} else {
EXPECT_TRUE(ti_audio == NULL);
}
const TransportInfo* ti_video = desc->GetTransportInfoByName("video");
if (options.has_video()) {
EXPECT_TRUE(ti_video != NULL);
auto media_desc_options_it =
FindFirstMediaDescriptionByMid("video", options);
if (options.bundle_enabled) {
EXPECT_EQ(ti_audio->description.ice_ufrag,
ti_video->description.ice_ufrag);
EXPECT_EQ(ti_audio->description.ice_pwd, ti_video->description.ice_pwd);
} else {
if (has_current_desc) {
EXPECT_EQ(current_video_ufrag, ti_video->description.ice_ufrag);
EXPECT_EQ(current_video_pwd, ti_video->description.ice_pwd);
} else {
EXPECT_EQ(static_cast<size_t>(cricket::ICE_UFRAG_LENGTH),
ti_video->description.ice_ufrag.size());
EXPECT_EQ(static_cast<size_t>(cricket::ICE_PWD_LENGTH),
ti_video->description.ice_pwd.size());
}
}
EXPECT_EQ(
media_desc_options_it->transport_options.enable_ice_renomination,
GetIceRenomination(ti_video));
} else {
EXPECT_TRUE(ti_video == NULL);
}
const TransportInfo* ti_data = desc->GetTransportInfoByName("data");
if (options.has_data()) {
EXPECT_TRUE(ti_data != NULL);
if (options.bundle_enabled) {
EXPECT_EQ(ti_audio->description.ice_ufrag,
ti_data->description.ice_ufrag);
EXPECT_EQ(ti_audio->description.ice_pwd, ti_data->description.ice_pwd);
} else {
if (has_current_desc) {
EXPECT_EQ(current_data_ufrag, ti_data->description.ice_ufrag);
EXPECT_EQ(current_data_pwd, ti_data->description.ice_pwd);
} else {
EXPECT_EQ(static_cast<size_t>(cricket::ICE_UFRAG_LENGTH),
ti_data->description.ice_ufrag.size());
EXPECT_EQ(static_cast<size_t>(cricket::ICE_PWD_LENGTH),
ti_data->description.ice_pwd.size());
}
}
auto media_desc_options_it =
FindFirstMediaDescriptionByMid("data", options);
EXPECT_EQ(
media_desc_options_it->transport_options.enable_ice_renomination,
GetIceRenomination(ti_data));
} else {
EXPECT_TRUE(ti_data == NULL);
}
}
void TestCryptoWithBundle(bool offer) {
f1_.set_secure(SEC_ENABLED);
MediaSessionOptions options;
AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &options);
std::unique_ptr<SessionDescription> ref_desc;
std::unique_ptr<SessionDescription> desc;
if (offer) {
options.bundle_enabled = false;
ref_desc = f1_.CreateOffer(options, NULL);
options.bundle_enabled = true;
desc = f1_.CreateOffer(options, ref_desc.get());
} else {
options.bundle_enabled = true;
ref_desc = f1_.CreateOffer(options, NULL);
desc = f1_.CreateAnswer(ref_desc.get(), options, NULL);
}
ASSERT_TRUE(desc);
const cricket::MediaContentDescription* audio_media_desc =
desc->GetContentDescriptionByName("audio");
ASSERT_TRUE(audio_media_desc);
const cricket::MediaContentDescription* video_media_desc =
desc->GetContentDescriptionByName("video");
ASSERT_TRUE(video_media_desc);
EXPECT_TRUE(CompareCryptoParams(audio_media_desc->cryptos(),
video_media_desc->cryptos()));
EXPECT_EQ(1u, audio_media_desc->cryptos().size());
EXPECT_EQ(kDefaultSrtpCryptoSuite,
audio_media_desc->cryptos()[0].cipher_suite);
// Verify the selected crypto is one from the reference audio
// media content.
const cricket::MediaContentDescription* ref_audio_media_desc =
ref_desc->GetContentDescriptionByName("audio");
bool found = false;
for (size_t i = 0; i < ref_audio_media_desc->cryptos().size(); ++i) {
if (ref_audio_media_desc->cryptos()[i].Matches(
audio_media_desc->cryptos()[0])) {
found = true;
break;
}
}
EXPECT_TRUE(found);
}
// This test that the audio and video media direction is set to
// `expected_direction_in_answer` in an answer if the offer direction is set
// to `direction_in_offer` and the answer is willing to both send and receive.
void TestMediaDirectionInAnswer(
RtpTransceiverDirection direction_in_offer,
RtpTransceiverDirection expected_direction_in_answer) {
MediaSessionOptions offer_opts;
AddAudioVideoSections(direction_in_offer, &offer_opts);
std::unique_ptr<SessionDescription> offer =
f1_.CreateOffer(offer_opts, NULL);
ASSERT_TRUE(offer.get() != NULL);
ContentInfo* ac_offer = offer->GetContentByName("audio");
ASSERT_TRUE(ac_offer != NULL);
ContentInfo* vc_offer = offer->GetContentByName("video");
ASSERT_TRUE(vc_offer != NULL);
MediaSessionOptions answer_opts;
AddAudioVideoSections(RtpTransceiverDirection::kSendRecv, &answer_opts);
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), answer_opts, NULL);
const AudioContentDescription* acd_answer =
GetFirstAudioContentDescription(answer.get());
EXPECT_EQ(expected_direction_in_answer, acd_answer->direction());
const VideoContentDescription* vcd_answer =
GetFirstVideoContentDescription(answer.get());
EXPECT_EQ(expected_direction_in_answer, vcd_answer->direction());
}
bool VerifyNoCNCodecs(const cricket::ContentInfo* content) {
RTC_DCHECK(content);
RTC_CHECK(content->media_description());
const cricket::AudioContentDescription* audio_desc =
content->media_description()->as_audio();
RTC_CHECK(audio_desc);
for (const cricket::AudioCodec& codec : audio_desc->codecs()) {
if (codec.name == "CN") {
return false;
}
}
return true;
}
void TestVideoGcmCipher(bool gcm_offer, bool gcm_answer) {
MediaSessionOptions offer_opts;
AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &offer_opts);
offer_opts.crypto_options.srtp.enable_gcm_crypto_suites = gcm_offer;
MediaSessionOptions answer_opts;
AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &answer_opts);
answer_opts.crypto_options.srtp.enable_gcm_crypto_suites = gcm_answer;
f1_.set_secure(SEC_ENABLED);
f2_.set_secure(SEC_ENABLED);
std::unique_ptr<SessionDescription> offer =
f1_.CreateOffer(offer_opts, NULL);
ASSERT_TRUE(offer.get() != NULL);
if (gcm_offer && gcm_answer) {
for (cricket::ContentInfo& content : offer->contents()) {
auto cryptos = content.media_description()->cryptos();
PreferGcmCryptoParameters(&cryptos);
content.media_description()->set_cryptos(cryptos);
}
}
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), answer_opts, NULL);
const ContentInfo* ac = answer->GetContentByName("audio");
const ContentInfo* vc = answer->GetContentByName("video");
ASSERT_TRUE(ac != NULL);
ASSERT_TRUE(vc != NULL);
EXPECT_EQ(MediaProtocolType::kRtp, ac->type);
EXPECT_EQ(MediaProtocolType::kRtp, vc->type);
const AudioContentDescription* acd = ac->media_description()->as_audio();
const VideoContentDescription* vcd = vc->media_description()->as_video();
EXPECT_EQ(MEDIA_TYPE_AUDIO, acd->type());
EXPECT_THAT(acd->codecs(), ElementsAreArray(kAudioCodecsAnswer));
EXPECT_EQ(kAutoBandwidth, acd->bandwidth()); // negotiated auto bw
EXPECT_EQ(0U, acd->first_ssrc()); // no sender is attached
EXPECT_TRUE(acd->rtcp_mux()); // negotiated rtcp-mux
if (gcm_offer && gcm_answer) {
ASSERT_CRYPTO(acd, 1U, kDefaultSrtpCryptoSuiteGcm);
} else {
ASSERT_CRYPTO(acd, 1U, kDefaultSrtpCryptoSuite);
}
EXPECT_EQ(MEDIA_TYPE_VIDEO, vcd->type());
EXPECT_THAT(vcd->codecs(), ElementsAreArray(kVideoCodecsAnswer));
EXPECT_EQ(0U, vcd->first_ssrc()); // no sender is attached
EXPECT_TRUE(vcd->rtcp_mux()); // negotiated rtcp-mux
if (gcm_offer && gcm_answer) {
ASSERT_CRYPTO(vcd, 1U, kDefaultSrtpCryptoSuiteGcm);
} else {
ASSERT_CRYPTO(vcd, 1U, kDefaultSrtpCryptoSuite);
}
EXPECT_EQ(cricket::kMediaProtocolSavpf, vcd->protocol());
}
void TestTransportSequenceNumberNegotiation(
const cricket::RtpHeaderExtensions& local,
const cricket::RtpHeaderExtensions& offered,
const cricket::RtpHeaderExtensions& expectedAnswer) {
MediaSessionOptions opts;
AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &opts);
SetAudioVideoRtpHeaderExtensions(offered, offered, &opts);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, NULL);
ASSERT_TRUE(offer.get() != NULL);
SetAudioVideoRtpHeaderExtensions(local, local, &opts);
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), opts, NULL);
EXPECT_EQ(
expectedAnswer,
GetFirstAudioContentDescription(answer.get())->rtp_header_extensions());
EXPECT_EQ(
expectedAnswer,
GetFirstVideoContentDescription(answer.get())->rtp_header_extensions());
}
std::vector<webrtc::RtpHeaderExtensionCapability>
HeaderExtensionCapabilitiesFromRtpExtensions(
cricket::RtpHeaderExtensions extensions) {
std::vector<webrtc::RtpHeaderExtensionCapability> capabilities;
for (const auto& extension : extensions) {
webrtc::RtpHeaderExtensionCapability capability(
extension.uri, extension.id,
webrtc::RtpTransceiverDirection::kSendRecv);
capabilities.push_back(capability);
}
return capabilities;
}
void SetAudioVideoRtpHeaderExtensions(cricket::RtpHeaderExtensions audio_exts,
cricket::RtpHeaderExtensions video_exts,
MediaSessionOptions* opts) {
auto audio_caps = HeaderExtensionCapabilitiesFromRtpExtensions(audio_exts);
auto video_caps = HeaderExtensionCapabilitiesFromRtpExtensions(video_exts);
for (auto& entry : opts->media_description_options) {
switch (entry.type) {
case MEDIA_TYPE_AUDIO:
entry.header_extensions = audio_caps;
break;
case MEDIA_TYPE_VIDEO:
entry.header_extensions = video_caps;
break;
default:
break;
}
}
}
protected:
UniqueRandomIdGenerator ssrc_generator1;
UniqueRandomIdGenerator ssrc_generator2;
MediaSessionDescriptionFactory f1_;
MediaSessionDescriptionFactory f2_;
TransportDescriptionFactory tdf1_;
TransportDescriptionFactory tdf2_;
};
// Create a typical audio offer, and ensure it matches what we expect.
TEST_F(MediaSessionDescriptionFactoryTest, TestCreateAudioOffer) {
f1_.set_secure(SEC_ENABLED);
std::unique_ptr<SessionDescription> offer =
f1_.CreateOffer(CreatePlanBMediaSessionOptions(), NULL);
ASSERT_TRUE(offer.get() != NULL);
const ContentInfo* ac = offer->GetContentByName("audio");
const ContentInfo* vc = offer->GetContentByName("video");
ASSERT_TRUE(ac != NULL);
ASSERT_TRUE(vc == NULL);
EXPECT_EQ(MediaProtocolType::kRtp, ac->type);
const AudioContentDescription* acd = ac->media_description()->as_audio();
EXPECT_EQ(MEDIA_TYPE_AUDIO, acd->type());
EXPECT_EQ(f1_.audio_sendrecv_codecs(), acd->codecs());
EXPECT_EQ(0U, acd->first_ssrc()); // no sender is attached.
EXPECT_EQ(kAutoBandwidth, acd->bandwidth()); // default bandwidth (auto)
EXPECT_TRUE(acd->rtcp_mux()); // rtcp-mux defaults on
ASSERT_CRYPTO(acd, 1U, kDefaultSrtpCryptoSuite);
EXPECT_EQ(cricket::kMediaProtocolSavpf, acd->protocol());
}
// Create a typical video offer, and ensure it matches what we expect.
TEST_F(MediaSessionDescriptionFactoryTest, TestCreateVideoOffer) {
MediaSessionOptions opts;
AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &opts);
f1_.set_secure(SEC_ENABLED);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, NULL);
ASSERT_TRUE(offer.get() != NULL);
const ContentInfo* ac = offer->GetContentByName("audio");
const ContentInfo* vc = offer->GetContentByName("video");
ASSERT_TRUE(ac != NULL);
ASSERT_TRUE(vc != NULL);
EXPECT_EQ(MediaProtocolType::kRtp, ac->type);
EXPECT_EQ(MediaProtocolType::kRtp, vc->type);
const AudioContentDescription* acd = ac->media_description()->as_audio();
const VideoContentDescription* vcd = vc->media_description()->as_video();
EXPECT_EQ(MEDIA_TYPE_AUDIO, acd->type());
EXPECT_EQ(f1_.audio_sendrecv_codecs(), acd->codecs());
EXPECT_EQ(0U, acd->first_ssrc()); // no sender is attached
EXPECT_EQ(kAutoBandwidth, acd->bandwidth()); // default bandwidth (auto)
EXPECT_TRUE(acd->rtcp_mux()); // rtcp-mux defaults on
ASSERT_CRYPTO(acd, 1U, kDefaultSrtpCryptoSuite);
EXPECT_EQ(cricket::kMediaProtocolSavpf, acd->protocol());
EXPECT_EQ(MEDIA_TYPE_VIDEO, vcd->type());
EXPECT_EQ(f1_.video_sendrecv_codecs(), vcd->codecs());
EXPECT_EQ(0U, vcd->first_ssrc()); // no sender is attached
EXPECT_EQ(kAutoBandwidth, vcd->bandwidth()); // default bandwidth (auto)
EXPECT_TRUE(vcd->rtcp_mux()); // rtcp-mux defaults on
ASSERT_CRYPTO(vcd, 1U, kDefaultSrtpCryptoSuite);
EXPECT_EQ(cricket::kMediaProtocolSavpf, vcd->protocol());
}
// Test creating an offer with bundle where the Codecs have the same dynamic
// RTP playlod type. The test verifies that the offer don't contain the
// duplicate RTP payload types.
TEST_F(MediaSessionDescriptionFactoryTest, TestBundleOfferWithSameCodecPlType) {
const VideoCodec& offered_video_codec = f2_.video_sendrecv_codecs()[0];
const AudioCodec& offered_audio_codec = f2_.audio_sendrecv_codecs()[0];
ASSERT_EQ(offered_video_codec.id, offered_audio_codec.id);
MediaSessionOptions opts;
AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &opts);
opts.bundle_enabled = true;
std::unique_ptr<SessionDescription> offer = f2_.CreateOffer(opts, NULL);
const VideoContentDescription* vcd =
GetFirstVideoContentDescription(offer.get());
const AudioContentDescription* acd =
GetFirstAudioContentDescription(offer.get());
ASSERT_TRUE(NULL != vcd);
ASSERT_TRUE(NULL != acd);
EXPECT_NE(vcd->codecs()[0].id, acd->codecs()[0].id);
EXPECT_EQ(vcd->codecs()[0].name, offered_video_codec.name);
EXPECT_EQ(acd->codecs()[0].name, offered_audio_codec.name);
}
// Test creating an updated offer with bundle, audio, video and data
// after an audio only session has been negotiated.
TEST_F(MediaSessionDescriptionFactoryTest,
TestCreateUpdatedVideoOfferWithBundle) {
f1_.set_secure(SEC_ENABLED);
f2_.set_secure(SEC_ENABLED);
MediaSessionOptions opts;
AddMediaDescriptionOptions(MEDIA_TYPE_AUDIO, "audio",
RtpTransceiverDirection::kRecvOnly, kActive,
&opts);
AddMediaDescriptionOptions(MEDIA_TYPE_VIDEO, "video",
RtpTransceiverDirection::kInactive, kStopped,
&opts);
opts.bundle_enabled = true;
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, NULL);
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), opts, NULL);
MediaSessionOptions updated_opts;
AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &updated_opts);
updated_opts.bundle_enabled = true;
std::unique_ptr<SessionDescription> updated_offer(
f1_.CreateOffer(updated_opts, answer.get()));
const AudioContentDescription* acd =
GetFirstAudioContentDescription(updated_offer.get());
const VideoContentDescription* vcd =
GetFirstVideoContentDescription(updated_offer.get());
EXPECT_TRUE(NULL != vcd);
EXPECT_TRUE(NULL != acd);
ASSERT_CRYPTO(acd, 1U, kDefaultSrtpCryptoSuite);
EXPECT_EQ(cricket::kMediaProtocolSavpf, acd->protocol());
ASSERT_CRYPTO(vcd, 1U, kDefaultSrtpCryptoSuite);
EXPECT_EQ(cricket::kMediaProtocolSavpf, vcd->protocol());
}
// Create an SCTP data offer with bundle without error.
TEST_F(MediaSessionDescriptionFactoryTest, TestCreateSctpDataOffer) {
MediaSessionOptions opts;
opts.bundle_enabled = true;
AddDataSection(RtpTransceiverDirection::kSendRecv, &opts);
f1_.set_secure(SEC_ENABLED);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, NULL);
EXPECT_TRUE(offer.get() != NULL);
EXPECT_TRUE(offer->GetContentByName("data") != NULL);
auto dcd = GetFirstSctpDataContentDescription(offer.get());
ASSERT_TRUE(dcd);
// Since this transport is insecure, the protocol should be "SCTP".
EXPECT_EQ(cricket::kMediaProtocolSctp, dcd->protocol());
}
// Create an SCTP data offer with bundle without error.
TEST_F(MediaSessionDescriptionFactoryTest, TestCreateSecureSctpDataOffer) {
MediaSessionOptions opts;
opts.bundle_enabled = true;
AddDataSection(RtpTransceiverDirection::kSendRecv, &opts);
f1_.set_secure(SEC_ENABLED);
tdf1_.set_secure(SEC_ENABLED);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, NULL);
EXPECT_TRUE(offer.get() != NULL);
EXPECT_TRUE(offer->GetContentByName("data") != NULL);
auto dcd = GetFirstSctpDataContentDescription(offer.get());
ASSERT_TRUE(dcd);
// The protocol should now be "UDP/DTLS/SCTP"
EXPECT_EQ(cricket::kMediaProtocolUdpDtlsSctp, dcd->protocol());
}
// Test creating an sctp data channel from an already generated offer.
TEST_F(MediaSessionDescriptionFactoryTest, TestCreateImplicitSctpDataOffer) {
MediaSessionOptions opts;
opts.bundle_enabled = true;
AddDataSection(RtpTransceiverDirection::kSendRecv, &opts);
f1_.set_secure(SEC_ENABLED);
std::unique_ptr<SessionDescription> offer1(f1_.CreateOffer(opts, NULL));
ASSERT_TRUE(offer1.get() != NULL);
const ContentInfo* data = offer1->GetContentByName("data");
ASSERT_TRUE(data != NULL);
ASSERT_EQ(cricket::kMediaProtocolSctp, data->media_description()->protocol());
std::unique_ptr<SessionDescription> offer2(
f1_.CreateOffer(opts, offer1.get()));
data = offer2->GetContentByName("data");
ASSERT_TRUE(data != NULL);
EXPECT_EQ(cricket::kMediaProtocolSctp, data->media_description()->protocol());
}
// Test that if BUNDLE is enabled and all media sections are rejected then the
// BUNDLE group is not present in the re-offer.
TEST_F(MediaSessionDescriptionFactoryTest, ReOfferNoBundleGroupIfAllRejected) {
MediaSessionOptions opts;
opts.bundle_enabled = true;
AddMediaDescriptionOptions(MEDIA_TYPE_AUDIO, "audio",
RtpTransceiverDirection::kSendRecv, kActive,
&opts);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, nullptr);
opts.media_description_options[0].stopped = true;
std::unique_ptr<SessionDescription> reoffer =
f1_.CreateOffer(opts, offer.get());
EXPECT_FALSE(reoffer->GetGroupByName(cricket::GROUP_TYPE_BUNDLE));
}
// Test that if BUNDLE is enabled and the remote re-offer does not include a
// BUNDLE group since all media sections are rejected, then the re-answer also
// does not include a BUNDLE group.
TEST_F(MediaSessionDescriptionFactoryTest, ReAnswerNoBundleGroupIfAllRejected) {
MediaSessionOptions opts;
opts.bundle_enabled = true;
AddMediaDescriptionOptions(MEDIA_TYPE_AUDIO, "audio",
RtpTransceiverDirection::kSendRecv, kActive,
&opts);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, nullptr);
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), opts, nullptr);
opts.media_description_options[0].stopped = true;
std::unique_ptr<SessionDescription> reoffer =
f1_.CreateOffer(opts, offer.get());
std::unique_ptr<SessionDescription> reanswer =
f2_.CreateAnswer(reoffer.get(), opts, answer.get());
EXPECT_FALSE(reanswer->GetGroupByName(cricket::GROUP_TYPE_BUNDLE));
}
// Test that if BUNDLE is enabled and the previous offerer-tagged media section
// was rejected then the new offerer-tagged media section is the non-rejected
// media section.
TEST_F(MediaSessionDescriptionFactoryTest, ReOfferChangeBundleOffererTagged) {
MediaSessionOptions opts;
opts.bundle_enabled = true;
AddMediaDescriptionOptions(MEDIA_TYPE_AUDIO, "audio",
RtpTransceiverDirection::kSendRecv, kActive,
&opts);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, nullptr);
// Reject the audio m= section and add a video m= section.
opts.media_description_options[0].stopped = true;
AddMediaDescriptionOptions(MEDIA_TYPE_VIDEO, "video",
RtpTransceiverDirection::kSendRecv, kActive,
&opts);
std::unique_ptr<SessionDescription> reoffer =
f1_.CreateOffer(opts, offer.get());
const cricket::ContentGroup* bundle_group =
reoffer->GetGroupByName(cricket::GROUP_TYPE_BUNDLE);
ASSERT_TRUE(bundle_group);
EXPECT_FALSE(bundle_group->HasContentName("audio"));
EXPECT_TRUE(bundle_group->HasContentName("video"));
}
// Test that if BUNDLE is enabled and the previous offerer-tagged media section
// was rejected and a new media section is added, then the re-answer BUNDLE
// group will contain only the non-rejected media section.
TEST_F(MediaSessionDescriptionFactoryTest, ReAnswerChangedBundleOffererTagged) {
MediaSessionOptions opts;
opts.bundle_enabled = true;
AddMediaDescriptionOptions(MEDIA_TYPE_AUDIO, "audio",
RtpTransceiverDirection::kSendRecv, kActive,
&opts);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, nullptr);
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), opts, nullptr);
// Reject the audio m= section and add a video m= section.
opts.media_description_options[0].stopped = true;
AddMediaDescriptionOptions(MEDIA_TYPE_VIDEO, "video",
RtpTransceiverDirection::kSendRecv, kActive,
&opts);
std::unique_ptr<SessionDescription> reoffer =
f1_.CreateOffer(opts, offer.get());
std::unique_ptr<SessionDescription> reanswer =
f2_.CreateAnswer(reoffer.get(), opts, answer.get());
const cricket::ContentGroup* bundle_group =
reanswer->GetGroupByName(cricket::GROUP_TYPE_BUNDLE);
ASSERT_TRUE(bundle_group);
EXPECT_FALSE(bundle_group->HasContentName("audio"));
EXPECT_TRUE(bundle_group->HasContentName("video"));
}
TEST_F(MediaSessionDescriptionFactoryTest,
CreateAnswerForOfferWithMultipleBundleGroups) {
// Create an offer with 4 m= sections, initially without BUNDLE groups.
MediaSessionOptions opts;
opts.bundle_enabled = false;
AddMediaDescriptionOptions(MEDIA_TYPE_AUDIO, "1",
RtpTransceiverDirection::kSendRecv, kActive,
&opts);
AddMediaDescriptionOptions(MEDIA_TYPE_AUDIO, "2",
RtpTransceiverDirection::kSendRecv, kActive,
&opts);
AddMediaDescriptionOptions(MEDIA_TYPE_AUDIO, "3",
RtpTransceiverDirection::kSendRecv, kActive,
&opts);
AddMediaDescriptionOptions(MEDIA_TYPE_AUDIO, "4",
RtpTransceiverDirection::kSendRecv, kActive,
&opts);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, nullptr);
ASSERT_TRUE(offer->groups().empty());
// Munge the offer to have two groups. Offers like these cannot be generated
// without munging, but it is valid to receive such offers from remote
// endpoints.
cricket::ContentGroup bundle_group1(cricket::GROUP_TYPE_BUNDLE);
bundle_group1.AddContentName("1");
bundle_group1.AddContentName("2");
cricket::ContentGroup bundle_group2(cricket::GROUP_TYPE_BUNDLE);
bundle_group2.AddContentName("3");
bundle_group2.AddContentName("4");
offer->AddGroup(bundle_group1);
offer->AddGroup(bundle_group2);
// If BUNDLE is enabled, the answer to this offer should accept both BUNDLE
// groups.
opts.bundle_enabled = true;
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), opts, nullptr);
std::vector<const cricket::ContentGroup*> answer_groups =
answer->GetGroupsByName(cricket::GROUP_TYPE_BUNDLE);
ASSERT_EQ(answer_groups.size(), 2u);
EXPECT_EQ(answer_groups[0]->content_names().size(), 2u);
EXPECT_TRUE(answer_groups[0]->HasContentName("1"));
EXPECT_TRUE(answer_groups[0]->HasContentName("2"));
EXPECT_EQ(answer_groups[1]->content_names().size(), 2u);
EXPECT_TRUE(answer_groups[1]->HasContentName("3"));
EXPECT_TRUE(answer_groups[1]->HasContentName("4"));
// If BUNDLE is disabled, the answer to this offer should reject both BUNDLE
// groups.
opts.bundle_enabled = false;
answer = f2_.CreateAnswer(offer.get(), opts, nullptr);
answer_groups = answer->GetGroupsByName(cricket::GROUP_TYPE_BUNDLE);
// Rejected groups are still listed, but they are empty.
ASSERT_EQ(answer_groups.size(), 2u);
EXPECT_TRUE(answer_groups[0]->content_names().empty());
EXPECT_TRUE(answer_groups[1]->content_names().empty());
}
// Test that if the BUNDLE offerer-tagged media section is changed in a reoffer
// and there is still a non-rejected media section that was in the initial
// offer, then the ICE credentials do not change in the reoffer offerer-tagged
// media section.
TEST_F(MediaSessionDescriptionFactoryTest,
ReOfferChangeBundleOffererTaggedKeepsIceCredentials) {
MediaSessionOptions opts;
opts.bundle_enabled = true;
AddAudioVideoSections(RtpTransceiverDirection::kSendRecv, &opts);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, nullptr);
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), opts, nullptr);
// Reject the audio m= section.
opts.media_description_options[0].stopped = true;
std::unique_ptr<SessionDescription> reoffer =
f1_.CreateOffer(opts, offer.get());
const TransportDescription* offer_tagged =
offer->GetTransportDescriptionByName("audio");
ASSERT_TRUE(offer_tagged);
const TransportDescription* reoffer_tagged =
reoffer->GetTransportDescriptionByName("video");
ASSERT_TRUE(reoffer_tagged);
EXPECT_EQ(offer_tagged->ice_ufrag, reoffer_tagged->ice_ufrag);
EXPECT_EQ(offer_tagged->ice_pwd, reoffer_tagged->ice_pwd);
}
// Test that if the BUNDLE offerer-tagged media section is changed in a reoffer
// and there is still a non-rejected media section that was in the initial
// offer, then the ICE credentials do not change in the reanswer answerer-tagged
// media section.
TEST_F(MediaSessionDescriptionFactoryTest,
ReAnswerChangeBundleOffererTaggedKeepsIceCredentials) {
MediaSessionOptions opts;
opts.bundle_enabled = true;
AddAudioVideoSections(RtpTransceiverDirection::kSendRecv, &opts);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, nullptr);
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), opts, nullptr);
// Reject the audio m= section.
opts.media_description_options[0].stopped = true;
std::unique_ptr<SessionDescription> reoffer =
f1_.CreateOffer(opts, offer.get());
std::unique_ptr<SessionDescription> reanswer =
f2_.CreateAnswer(reoffer.get(), opts, answer.get());
const TransportDescription* answer_tagged =
answer->GetTransportDescriptionByName("audio");
ASSERT_TRUE(answer_tagged);
const TransportDescription* reanswer_tagged =
reanswer->GetTransportDescriptionByName("video");
ASSERT_TRUE(reanswer_tagged);
EXPECT_EQ(answer_tagged->ice_ufrag, reanswer_tagged->ice_ufrag);
EXPECT_EQ(answer_tagged->ice_pwd, reanswer_tagged->ice_pwd);
}
// Create an audio, video offer without legacy StreamParams.
TEST_F(MediaSessionDescriptionFactoryTest,
TestCreateOfferWithoutLegacyStreams) {
MediaSessionOptions opts;
AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &opts);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, NULL);
ASSERT_TRUE(offer.get() != NULL);
const ContentInfo* ac = offer->GetContentByName("audio");
const ContentInfo* vc = offer->GetContentByName("video");
ASSERT_TRUE(ac != NULL);
ASSERT_TRUE(vc != NULL);
const AudioContentDescription* acd = ac->media_description()->as_audio();
const VideoContentDescription* vcd = vc->media_description()->as_video();
EXPECT_FALSE(vcd->has_ssrcs()); // No StreamParams.
EXPECT_FALSE(acd->has_ssrcs()); // No StreamParams.
}
// Creates an audio+video sendonly offer.
TEST_F(MediaSessionDescriptionFactoryTest, TestCreateSendOnlyOffer) {
MediaSessionOptions opts;
AddAudioVideoSections(RtpTransceiverDirection::kSendOnly, &opts);
AttachSenderToMediaDescriptionOptions("video", MEDIA_TYPE_VIDEO, kVideoTrack1,
{kMediaStream1}, 1, &opts);
AttachSenderToMediaDescriptionOptions("audio", MEDIA_TYPE_AUDIO, kAudioTrack1,
{kMediaStream1}, 1, &opts);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, NULL);
ASSERT_TRUE(offer.get() != NULL);
EXPECT_EQ(2u, offer->contents().size());
EXPECT_TRUE(IsMediaContentOfType(&offer->contents()[0], MEDIA_TYPE_AUDIO));
EXPECT_TRUE(IsMediaContentOfType(&offer->contents()[1], MEDIA_TYPE_VIDEO));
EXPECT_EQ(RtpTransceiverDirection::kSendOnly,
GetMediaDirection(&offer->contents()[0]));
EXPECT_EQ(RtpTransceiverDirection::kSendOnly,
GetMediaDirection(&offer->contents()[1]));
}
// Verifies that the order of the media contents in the current
// SessionDescription is preserved in the new SessionDescription.
TEST_F(MediaSessionDescriptionFactoryTest, TestCreateOfferContentOrder) {
MediaSessionOptions opts;
AddDataSection(RtpTransceiverDirection::kSendRecv, &opts);
std::unique_ptr<SessionDescription> offer1(f1_.CreateOffer(opts, NULL));
ASSERT_TRUE(offer1.get() != NULL);
EXPECT_EQ(1u, offer1->contents().size());
EXPECT_TRUE(IsMediaContentOfType(&offer1->contents()[0], MEDIA_TYPE_DATA));
AddMediaDescriptionOptions(MEDIA_TYPE_VIDEO, "video",
RtpTransceiverDirection::kRecvOnly, kActive,
&opts);
std::unique_ptr<SessionDescription> offer2(
f1_.CreateOffer(opts, offer1.get()));
ASSERT_TRUE(offer2.get() != NULL);
EXPECT_EQ(2u, offer2->contents().size());
EXPECT_TRUE(IsMediaContentOfType(&offer2->contents()[0], MEDIA_TYPE_DATA));
EXPECT_TRUE(IsMediaContentOfType(&offer2->contents()[1], MEDIA_TYPE_VIDEO));
AddMediaDescriptionOptions(MEDIA_TYPE_AUDIO, "audio",
RtpTransceiverDirection::kRecvOnly, kActive,
&opts);
std::unique_ptr<SessionDescription> offer3(
f1_.CreateOffer(opts, offer2.get()));
ASSERT_TRUE(offer3.get() != NULL);
EXPECT_EQ(3u, offer3->contents().size());
EXPECT_TRUE(IsMediaContentOfType(&offer3->contents()[0], MEDIA_TYPE_DATA));
EXPECT_TRUE(IsMediaContentOfType(&offer3->contents()[1], MEDIA_TYPE_VIDEO));
EXPECT_TRUE(IsMediaContentOfType(&offer3->contents()[2], MEDIA_TYPE_AUDIO));
}
// Create a typical audio answer, and ensure it matches what we expect.
TEST_F(MediaSessionDescriptionFactoryTest, TestCreateAudioAnswer) {
f1_.set_secure(SEC_ENABLED);
f2_.set_secure(SEC_ENABLED);
std::unique_ptr<SessionDescription> offer =
f1_.CreateOffer(CreatePlanBMediaSessionOptions(), NULL);
ASSERT_TRUE(offer.get() != NULL);
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), CreatePlanBMediaSessionOptions(), NULL);
const ContentInfo* ac = answer->GetContentByName("audio");
const ContentInfo* vc = answer->GetContentByName("video");
ASSERT_TRUE(ac != NULL);
ASSERT_TRUE(vc == NULL);
EXPECT_EQ(MediaProtocolType::kRtp, ac->type);
const AudioContentDescription* acd = ac->media_description()->as_audio();
EXPECT_EQ(MEDIA_TYPE_AUDIO, acd->type());
EXPECT_THAT(acd->codecs(), ElementsAreArray(kAudioCodecsAnswer));
EXPECT_EQ(0U, acd->first_ssrc()); // no sender is attached
EXPECT_EQ(kAutoBandwidth, acd->bandwidth()); // negotiated auto bw
EXPECT_TRUE(acd->rtcp_mux()); // negotiated rtcp-mux
ASSERT_CRYPTO(acd, 1U, kDefaultSrtpCryptoSuite);
EXPECT_EQ(cricket::kMediaProtocolSavpf, acd->protocol());
}
// Create a typical audio answer with GCM ciphers enabled, and ensure it
// matches what we expect.
TEST_F(MediaSessionDescriptionFactoryTest, TestCreateAudioAnswerGcm) {
f1_.set_secure(SEC_ENABLED);
f2_.set_secure(SEC_ENABLED);
MediaSessionOptions opts = CreatePlanBMediaSessionOptions();
opts.crypto_options.srtp.enable_gcm_crypto_suites = true;
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, NULL);
ASSERT_TRUE(offer.get() != NULL);
for (cricket::ContentInfo& content : offer->contents()) {
auto cryptos = content.media_description()->cryptos();
PreferGcmCryptoParameters(&cryptos);
content.media_description()->set_cryptos(cryptos);
}
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), opts, NULL);
const ContentInfo* ac = answer->GetContentByName("audio");
const ContentInfo* vc = answer->GetContentByName("video");
ASSERT_TRUE(ac != NULL);
ASSERT_TRUE(vc == NULL);
EXPECT_EQ(MediaProtocolType::kRtp, ac->type);
const AudioContentDescription* acd = ac->media_description()->as_audio();
EXPECT_EQ(MEDIA_TYPE_AUDIO, acd->type());
EXPECT_THAT(acd->codecs(), ElementsAreArray(kAudioCodecsAnswer));
EXPECT_EQ(0U, acd->first_ssrc()); // no sender is attached
EXPECT_EQ(kAutoBandwidth, acd->bandwidth()); // negotiated auto bw
EXPECT_TRUE(acd->rtcp_mux()); // negotiated rtcp-mux
ASSERT_CRYPTO(acd, 1U, kDefaultSrtpCryptoSuiteGcm);
EXPECT_EQ(cricket::kMediaProtocolSavpf, acd->protocol());
}
// Create a typical video answer, and ensure it matches what we expect.
TEST_F(MediaSessionDescriptionFactoryTest, TestCreateVideoAnswer) {
MediaSessionOptions opts;
AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &opts);
f1_.set_secure(SEC_ENABLED);
f2_.set_secure(SEC_ENABLED);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, NULL);
ASSERT_TRUE(offer.get() != NULL);
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), opts, NULL);
const ContentInfo* ac = answer->GetContentByName("audio");
const ContentInfo* vc = answer->GetContentByName("video");
ASSERT_TRUE(ac != NULL);
ASSERT_TRUE(vc != NULL);
EXPECT_EQ(MediaProtocolType::kRtp, ac->type);
EXPECT_EQ(MediaProtocolType::kRtp, vc->type);
const AudioContentDescription* acd = ac->media_description()->as_audio();
const VideoContentDescription* vcd = vc->media_description()->as_video();
EXPECT_EQ(MEDIA_TYPE_AUDIO, acd->type());
EXPECT_THAT(acd->codecs(), ElementsAreArray(kAudioCodecsAnswer));
EXPECT_EQ(kAutoBandwidth, acd->bandwidth()); // negotiated auto bw
EXPECT_EQ(0U, acd->first_ssrc()); // no sender is attached
EXPECT_TRUE(acd->rtcp_mux()); // negotiated rtcp-mux
ASSERT_CRYPTO(acd, 1U, kDefaultSrtpCryptoSuite);
EXPECT_EQ(MEDIA_TYPE_VIDEO, vcd->type());
EXPECT_THAT(vcd->codecs(), ElementsAreArray(kVideoCodecsAnswer));
EXPECT_EQ(0U, vcd->first_ssrc()); // no sender is attached
EXPECT_TRUE(vcd->rtcp_mux()); // negotiated rtcp-mux
ASSERT_CRYPTO(vcd, 1U, kDefaultSrtpCryptoSuite);
EXPECT_EQ(cricket::kMediaProtocolSavpf, vcd->protocol());
}
// Create a typical video answer with GCM ciphers enabled, and ensure it
// matches what we expect.
TEST_F(MediaSessionDescriptionFactoryTest, TestCreateVideoAnswerGcm) {
TestVideoGcmCipher(true, true);
}
// Create a typical video answer with GCM ciphers enabled for the offer only,
// and ensure it matches what we expect.
TEST_F(MediaSessionDescriptionFactoryTest, TestCreateVideoAnswerGcmOffer) {
TestVideoGcmCipher(true, false);
}
// Create a typical video answer with GCM ciphers enabled for the answer only,
// and ensure it matches what we expect.
TEST_F(MediaSessionDescriptionFactoryTest, TestCreateVideoAnswerGcmAnswer) {
TestVideoGcmCipher(false, true);
}
// The use_sctpmap flag should be set in an Sctp DataContentDescription by
// default. The answer's use_sctpmap flag should match the offer's.
TEST_F(MediaSessionDescriptionFactoryTest, TestCreateDataAnswerUsesSctpmap) {
MediaSessionOptions opts;
AddDataSection(RtpTransceiverDirection::kSendRecv, &opts);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, NULL);
ASSERT_TRUE(offer.get() != NULL);
ContentInfo* dc_offer = offer->GetContentByName("data");
ASSERT_TRUE(dc_offer != NULL);
SctpDataContentDescription* dcd_offer =
dc_offer->media_description()->as_sctp();
EXPECT_TRUE(dcd_offer->use_sctpmap());
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), opts, NULL);
const ContentInfo* dc_answer = answer->GetContentByName("data");
ASSERT_TRUE(dc_answer != NULL);
const SctpDataContentDescription* dcd_answer =
dc_answer->media_description()->as_sctp();
EXPECT_TRUE(dcd_answer->use_sctpmap());
}
// The answer's use_sctpmap flag should match the offer's.
TEST_F(MediaSessionDescriptionFactoryTest, TestCreateDataAnswerWithoutSctpmap) {
MediaSessionOptions opts;
AddDataSection(RtpTransceiverDirection::kSendRecv, &opts);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, NULL);
ASSERT_TRUE(offer.get() != NULL);
ContentInfo* dc_offer = offer->GetContentByName("data");
ASSERT_TRUE(dc_offer != NULL);
SctpDataContentDescription* dcd_offer =
dc_offer->media_description()->as_sctp();
dcd_offer->set_use_sctpmap(false);
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), opts, NULL);
const ContentInfo* dc_answer = answer->GetContentByName("data");
ASSERT_TRUE(dc_answer != NULL);
const SctpDataContentDescription* dcd_answer =
dc_answer->media_description()->as_sctp();
EXPECT_FALSE(dcd_answer->use_sctpmap());
}
// Test that a valid answer will be created for "DTLS/SCTP", "UDP/DTLS/SCTP"
// and "TCP/DTLS/SCTP" offers.
TEST_F(MediaSessionDescriptionFactoryTest,
TestCreateDataAnswerToDifferentOfferedProtos) {
// Need to enable DTLS offer/answer generation (disabled by default in this
// test).
f1_.set_secure(SEC_ENABLED);
f2_.set_secure(SEC_ENABLED);
tdf1_.set_secure(SEC_ENABLED);
tdf2_.set_secure(SEC_ENABLED);
MediaSessionOptions opts;
AddDataSection(RtpTransceiverDirection::kSendRecv, &opts);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, nullptr);
ASSERT_TRUE(offer.get() != nullptr);
ContentInfo* dc_offer = offer->GetContentByName("data");
ASSERT_TRUE(dc_offer != nullptr);
SctpDataContentDescription* dcd_offer =
dc_offer->media_description()->as_sctp();
ASSERT_TRUE(dcd_offer);
std::vector<std::string> protos = {"DTLS/SCTP", "UDP/DTLS/SCTP",
"TCP/DTLS/SCTP"};
for (const std::string& proto : protos) {
dcd_offer->set_protocol(proto);
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), opts, nullptr);
const ContentInfo* dc_answer = answer->GetContentByName("data");
ASSERT_TRUE(dc_answer != nullptr);
const SctpDataContentDescription* dcd_answer =
dc_answer->media_description()->as_sctp();
EXPECT_FALSE(dc_answer->rejected);
EXPECT_EQ(proto, dcd_answer->protocol());
}
}
TEST_F(MediaSessionDescriptionFactoryTest,
TestCreateDataAnswerToOfferWithDefinedMessageSize) {
// Need to enable DTLS offer/answer generation (disabled by default in this
// test).
f1_.set_secure(SEC_ENABLED);
f2_.set_secure(SEC_ENABLED);
tdf1_.set_secure(SEC_ENABLED);
tdf2_.set_secure(SEC_ENABLED);
MediaSessionOptions opts;
AddDataSection(RtpTransceiverDirection::kSendRecv, &opts);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, nullptr);
ASSERT_TRUE(offer.get() != nullptr);
ContentInfo* dc_offer = offer->GetContentByName("data");
ASSERT_TRUE(dc_offer != nullptr);
SctpDataContentDescription* dcd_offer =
dc_offer->media_description()->as_sctp();
ASSERT_TRUE(dcd_offer);
dcd_offer->set_max_message_size(1234);
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), opts, nullptr);
const ContentInfo* dc_answer = answer->GetContentByName("data");
ASSERT_TRUE(dc_answer != nullptr);
const SctpDataContentDescription* dcd_answer =
dc_answer->media_description()->as_sctp();
EXPECT_FALSE(dc_answer->rejected);
EXPECT_EQ(1234, dcd_answer->max_message_size());
}
TEST_F(MediaSessionDescriptionFactoryTest,
TestCreateDataAnswerToOfferWithZeroMessageSize) {
// Need to enable DTLS offer/answer generation (disabled by default in this
// test).
f1_.set_secure(SEC_ENABLED);
f2_.set_secure(SEC_ENABLED);
tdf1_.set_secure(SEC_ENABLED);
tdf2_.set_secure(SEC_ENABLED);
MediaSessionOptions opts;
AddDataSection(RtpTransceiverDirection::kSendRecv, &opts);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, nullptr);
ASSERT_TRUE(offer.get() != nullptr);
ContentInfo* dc_offer = offer->GetContentByName("data");
ASSERT_TRUE(dc_offer != nullptr);
SctpDataContentDescription* dcd_offer =
dc_offer->media_description()->as_sctp();
ASSERT_TRUE(dcd_offer);
dcd_offer->set_max_message_size(0);
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), opts, nullptr);
const ContentInfo* dc_answer = answer->GetContentByName("data");
ASSERT_TRUE(dc_answer != nullptr);
const SctpDataContentDescription* dcd_answer =
dc_answer->media_description()->as_sctp();
EXPECT_FALSE(dc_answer->rejected);
EXPECT_EQ(cricket::kSctpSendBufferSize, dcd_answer->max_message_size());
}
// Verifies that the order of the media contents in the offer is preserved in
// the answer.
TEST_F(MediaSessionDescriptionFactoryTest, TestCreateAnswerContentOrder) {
MediaSessionOptions opts;
// Creates a data only offer.
AddDataSection(RtpTransceiverDirection::kSendRecv, &opts);
std::unique_ptr<SessionDescription> offer1(f1_.CreateOffer(opts, NULL));
ASSERT_TRUE(offer1.get() != NULL);
// Appends audio to the offer.
AddMediaDescriptionOptions(MEDIA_TYPE_AUDIO, "audio",
RtpTransceiverDirection::kRecvOnly, kActive,
&opts);
std::unique_ptr<SessionDescription> offer2(
f1_.CreateOffer(opts, offer1.get()));
ASSERT_TRUE(offer2.get() != NULL);
// Appends video to the offer.
AddMediaDescriptionOptions(MEDIA_TYPE_VIDEO, "video",
RtpTransceiverDirection::kRecvOnly, kActive,
&opts);
std::unique_ptr<SessionDescription> offer3(
f1_.CreateOffer(opts, offer2.get()));
ASSERT_TRUE(offer3.get() != NULL);
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer3.get(), opts, NULL);
ASSERT_TRUE(answer.get() != NULL);
EXPECT_EQ(3u, answer->contents().size());
EXPECT_TRUE(IsMediaContentOfType(&answer->contents()[0], MEDIA_TYPE_DATA));
EXPECT_TRUE(IsMediaContentOfType(&answer->contents()[1], MEDIA_TYPE_AUDIO));
EXPECT_TRUE(IsMediaContentOfType(&answer->contents()[2], MEDIA_TYPE_VIDEO));
}
// TODO(deadbeef): Extend these tests to ensure the correct direction with other
// answerer settings.
// This test that the media direction is set to send/receive in an answer if
// the offer is send receive.
TEST_F(MediaSessionDescriptionFactoryTest, CreateAnswerToSendReceiveOffer) {
TestMediaDirectionInAnswer(RtpTransceiverDirection::kSendRecv,
RtpTransceiverDirection::kSendRecv);
}
// This test that the media direction is set to receive only in an answer if
// the offer is send only.
TEST_F(MediaSessionDescriptionFactoryTest, CreateAnswerToSendOnlyOffer) {
TestMediaDirectionInAnswer(RtpTransceiverDirection::kSendOnly,
RtpTransceiverDirection::kRecvOnly);
}
// This test that the media direction is set to send only in an answer if
// the offer is recv only.
TEST_F(MediaSessionDescriptionFactoryTest, CreateAnswerToRecvOnlyOffer) {
TestMediaDirectionInAnswer(RtpTransceiverDirection::kRecvOnly,
RtpTransceiverDirection::kSendOnly);
}
// This test that the media direction is set to inactive in an answer if
// the offer is inactive.
TEST_F(MediaSessionDescriptionFactoryTest, CreateAnswerToInactiveOffer) {
TestMediaDirectionInAnswer(RtpTransceiverDirection::kInactive,
RtpTransceiverDirection::kInactive);
}
// Test that the media protocol is RTP/AVPF if DTLS and SDES are disabled.
TEST_F(MediaSessionDescriptionFactoryTest, AudioOfferAnswerWithCryptoDisabled) {
MediaSessionOptions opts = CreatePlanBMediaSessionOptions();
f1_.set_secure(SEC_DISABLED);
f2_.set_secure(SEC_DISABLED);
tdf1_.set_secure(SEC_DISABLED);
tdf2_.set_secure(SEC_DISABLED);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, NULL);
const AudioContentDescription* offer_acd =
GetFirstAudioContentDescription(offer.get());
ASSERT_TRUE(offer_acd != NULL);
EXPECT_EQ(cricket::kMediaProtocolAvpf, offer_acd->protocol());
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), opts, NULL);
const ContentInfo* ac_answer = answer->GetContentByName("audio");
ASSERT_TRUE(ac_answer != NULL);
EXPECT_FALSE(ac_answer->rejected);
const AudioContentDescription* answer_acd =
GetFirstAudioContentDescription(answer.get());
ASSERT_TRUE(answer_acd != NULL);
EXPECT_EQ(cricket::kMediaProtocolAvpf, answer_acd->protocol());
}
// Create a video offer and answer and ensure the RTP header extensions
// matches what we expect.
TEST_F(MediaSessionDescriptionFactoryTest, TestOfferAnswerWithRtpExtensions) {
MediaSessionOptions opts;
AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &opts);
SetAudioVideoRtpHeaderExtensions(MAKE_VECTOR(kAudioRtpExtension1),
MAKE_VECTOR(kVideoRtpExtension1), &opts);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, NULL);
ASSERT_TRUE(offer.get() != NULL);
SetAudioVideoRtpHeaderExtensions(MAKE_VECTOR(kAudioRtpExtension2),
MAKE_VECTOR(kVideoRtpExtension2), &opts);
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), opts, NULL);
EXPECT_EQ(
MAKE_VECTOR(kAudioRtpExtension1),
GetFirstAudioContentDescription(offer.get())->rtp_header_extensions());
EXPECT_EQ(
MAKE_VECTOR(kVideoRtpExtension1),
GetFirstVideoContentDescription(offer.get())->rtp_header_extensions());
EXPECT_EQ(
MAKE_VECTOR(kAudioRtpExtensionAnswer),
GetFirstAudioContentDescription(answer.get())->rtp_header_extensions());
EXPECT_EQ(
MAKE_VECTOR(kVideoRtpExtensionAnswer),
GetFirstVideoContentDescription(answer.get())->rtp_header_extensions());
}
// Create a audio/video offer and answer and ensure that the
// TransportSequenceNumber RTP header extensions are handled correctly. 02 is
// supported and should take precedence even though not listed among locally
// supported extensions.
TEST_F(MediaSessionDescriptionFactoryTest,
TestOfferAnswerWithTransportSequenceNumberInOffer) {
TestTransportSequenceNumberNegotiation(
MAKE_VECTOR(kRtpExtensionTransportSequenceNumber01), // Local.
MAKE_VECTOR(kRtpExtensionTransportSequenceNumber01), // Offer.
MAKE_VECTOR(kRtpExtensionTransportSequenceNumber01)); // Expected answer.
}
TEST_F(MediaSessionDescriptionFactoryTest,
TestOfferAnswerWithTransportSequenceNumber01And02InOffer) {
TestTransportSequenceNumberNegotiation(
MAKE_VECTOR(kRtpExtensionTransportSequenceNumber01), // Local.
MAKE_VECTOR(kRtpExtensionTransportSequenceNumber01And02), // Offer.
MAKE_VECTOR(kRtpExtensionTransportSequenceNumber02)); // Expected answer.
}
TEST_F(MediaSessionDescriptionFactoryTest,
TestOfferAnswerWithTransportSequenceNumber02InOffer) {
TestTransportSequenceNumberNegotiation(
MAKE_VECTOR(kRtpExtensionTransportSequenceNumber01), // Local.
MAKE_VECTOR(kRtpExtensionTransportSequenceNumber02), // Offer.
MAKE_VECTOR(kRtpExtensionTransportSequenceNumber02)); // Expected answer.
}
TEST_F(MediaSessionDescriptionFactoryTest,
TestNegotiateFrameDescriptorWhenUnexposedLocally) {
MediaSessionOptions opts;
AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &opts);
SetAudioVideoRtpHeaderExtensions(
MAKE_VECTOR(kRtpExtensionGenericFrameDescriptorUri00),
MAKE_VECTOR(kRtpExtensionGenericFrameDescriptorUri00), &opts);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, nullptr);
SetAudioVideoRtpHeaderExtensions(
MAKE_VECTOR(kRtpExtensionTransportSequenceNumber01),
MAKE_VECTOR(kRtpExtensionTransportSequenceNumber01), &opts);
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), opts, nullptr);
EXPECT_THAT(
GetFirstAudioContentDescription(answer.get())->rtp_header_extensions(),
ElementsAreArray(kRtpExtensionGenericFrameDescriptorUri00));
EXPECT_THAT(
GetFirstVideoContentDescription(answer.get())->rtp_header_extensions(),
ElementsAreArray(kRtpExtensionGenericFrameDescriptorUri00));
}
TEST_F(MediaSessionDescriptionFactoryTest,
TestNegotiateFrameDescriptorWhenExposedLocally) {
MediaSessionOptions opts;
AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &opts);
SetAudioVideoRtpHeaderExtensions(
MAKE_VECTOR(kRtpExtensionGenericFrameDescriptorUri00),
MAKE_VECTOR(kRtpExtensionGenericFrameDescriptorUri00), &opts);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, nullptr);
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), opts, nullptr);
EXPECT_THAT(
GetFirstAudioContentDescription(answer.get())->rtp_header_extensions(),
ElementsAreArray(kRtpExtensionGenericFrameDescriptorUri00));
EXPECT_THAT(
GetFirstVideoContentDescription(answer.get())->rtp_header_extensions(),
ElementsAreArray(kRtpExtensionGenericFrameDescriptorUri00));
}
TEST_F(MediaSessionDescriptionFactoryTest,
NegotiateDependencyDescriptorWhenUnexposedLocally) {
MediaSessionOptions opts;
AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &opts);
RtpExtension offer_dd(RtpExtension::kDependencyDescriptorUri, 7);
SetAudioVideoRtpHeaderExtensions({}, {offer_dd}, &opts);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, nullptr);
RtpExtension local_tsn(RtpExtension::kTransportSequenceNumberUri, 5);
SetAudioVideoRtpHeaderExtensions({}, {local_tsn}, &opts);
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), opts, nullptr);
EXPECT_THAT(
GetFirstVideoContentDescription(answer.get())->rtp_header_extensions(),
ElementsAre(offer_dd));
}
TEST_F(MediaSessionDescriptionFactoryTest,
NegotiateDependencyDescriptorWhenExposedLocally) {
MediaSessionOptions opts;
AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &opts);
RtpExtension offer_dd(RtpExtension::kDependencyDescriptorUri, 7);
RtpExtension local_dd(RtpExtension::kDependencyDescriptorUri, 5);
SetAudioVideoRtpHeaderExtensions({}, {offer_dd}, &opts);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, nullptr);
SetAudioVideoRtpHeaderExtensions({}, {local_dd}, &opts);
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), opts, nullptr);
EXPECT_THAT(
GetFirstVideoContentDescription(answer.get())->rtp_header_extensions(),
ElementsAre(offer_dd));
}
TEST_F(MediaSessionDescriptionFactoryTest,
NegotiateAbsoluteCaptureTimeWhenUnexposedLocally) {
MediaSessionOptions opts;
AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &opts);
const cricket::RtpHeaderExtensions offered_extensions = {
RtpExtension(RtpExtension::kAbsoluteCaptureTimeUri, 7)};
const cricket::RtpHeaderExtensions local_extensions = {
RtpExtension(RtpExtension::kTransportSequenceNumberUri, 5)};
SetAudioVideoRtpHeaderExtensions(offered_extensions, offered_extensions,
&opts);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, nullptr);
SetAudioVideoRtpHeaderExtensions(local_extensions, local_extensions, &opts);
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), opts, nullptr);
EXPECT_THAT(
GetFirstVideoContentDescription(answer.get())->rtp_header_extensions(),
ElementsAreArray(offered_extensions));
EXPECT_THAT(
GetFirstAudioContentDescription(answer.get())->rtp_header_extensions(),
ElementsAreArray(offered_extensions));
}
TEST_F(MediaSessionDescriptionFactoryTest,
NegotiateAbsoluteCaptureTimeWhenExposedLocally) {
MediaSessionOptions opts;
AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &opts);
const cricket::RtpHeaderExtensions offered_extensions = {
RtpExtension(RtpExtension::kAbsoluteCaptureTimeUri, 7)};
const cricket::RtpHeaderExtensions local_extensions = {
RtpExtension(RtpExtension::kAbsoluteCaptureTimeUri, 5)};
SetAudioVideoRtpHeaderExtensions(offered_extensions, offered_extensions,
&opts);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, nullptr);
SetAudioVideoRtpHeaderExtensions(local_extensions, local_extensions, &opts);
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), opts, nullptr);
EXPECT_THAT(
GetFirstVideoContentDescription(answer.get())->rtp_header_extensions(),
ElementsAreArray(offered_extensions));
EXPECT_THAT(
GetFirstAudioContentDescription(answer.get())->rtp_header_extensions(),
ElementsAreArray(offered_extensions));
}
TEST_F(MediaSessionDescriptionFactoryTest,
DoNotNegotiateAbsoluteCaptureTimeWhenNotOffered) {
MediaSessionOptions opts;
AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &opts);
const cricket::RtpHeaderExtensions offered_extensions = {
RtpExtension(RtpExtension::kTransportSequenceNumberUri, 7)};
const cricket::RtpHeaderExtensions local_extensions = {
RtpExtension(RtpExtension::kAbsoluteCaptureTimeUri, 5)};
SetAudioVideoRtpHeaderExtensions(offered_extensions, offered_extensions,
&opts);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, nullptr);
SetAudioVideoRtpHeaderExtensions(local_extensions, local_extensions, &opts);
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), opts, nullptr);
EXPECT_THAT(
GetFirstVideoContentDescription(answer.get())->rtp_header_extensions(),
IsEmpty());
EXPECT_THAT(
GetFirstAudioContentDescription(answer.get())->rtp_header_extensions(),
IsEmpty());
}
TEST_F(MediaSessionDescriptionFactoryTest,
OffersUnstoppedExtensionsWithAudioVideoExtensionStopped) {
MediaSessionOptions opts;
AddMediaDescriptionOptions(MEDIA_TYPE_AUDIO, "audio",
RtpTransceiverDirection::kSendRecv, kActive,
&opts);
opts.media_description_options.back().header_extensions = {
webrtc::RtpHeaderExtensionCapability("uri1", 1,
RtpTransceiverDirection::kStopped),
webrtc::RtpHeaderExtensionCapability("uri2", 3,
RtpTransceiverDirection::kSendOnly)};
AddMediaDescriptionOptions(MEDIA_TYPE_VIDEO, "video1",
RtpTransceiverDirection::kSendRecv, kActive,
&opts);
opts.media_description_options.back().header_extensions = {
webrtc::RtpHeaderExtensionCapability("uri1", 1,
RtpTransceiverDirection::kStopped),
webrtc::RtpHeaderExtensionCapability("uri3", 7,
RtpTransceiverDirection::kSendOnly)};
auto offer = f1_.CreateOffer(opts, nullptr);
EXPECT_THAT(
offer->contents(),
ElementsAre(
Property(&ContentInfo::media_description,
Pointee(Property(
&MediaContentDescription::rtp_header_extensions,
ElementsAre(Field(&RtpExtension::uri, "uri2"))))),
Property(&ContentInfo::media_description,
Pointee(Property(
&MediaContentDescription::rtp_header_extensions,
ElementsAre(Field(&RtpExtension::uri, "uri3")))))));
}
TEST_F(MediaSessionDescriptionFactoryTest,
OffersUnstoppedExtensionsWithAudioExtensionStopped) {
MediaSessionOptions opts;
AddMediaDescriptionOptions(MEDIA_TYPE_AUDIO, "audio",
RtpTransceiverDirection::kSendRecv, kActive,
&opts);
opts.media_description_options.back().header_extensions = {
webrtc::RtpHeaderExtensionCapability("uri1", 1,
RtpTransceiverDirection::kSendOnly),
webrtc::RtpHeaderExtensionCapability("uri2", 3,
RtpTransceiverDirection::kStopped)};
AddMediaDescriptionOptions(MEDIA_TYPE_VIDEO, "video1",
RtpTransceiverDirection::kSendRecv, kActive,
&opts);
opts.media_description_options.back().header_extensions = {
webrtc::RtpHeaderExtensionCapability("uri42", 42,
RtpTransceiverDirection::kSendRecv),
webrtc::RtpHeaderExtensionCapability("uri3", 7,
RtpTransceiverDirection::kSendOnly)};
auto offer = f1_.CreateOffer(opts, nullptr);
EXPECT_THAT(
offer->contents(),
ElementsAre(
Property(&ContentInfo::media_description,
Pointee(Property(
&MediaContentDescription::rtp_header_extensions,
ElementsAre(Field(&RtpExtension::uri, "uri1"))))),
Property(
&ContentInfo::media_description,
Pointee(Property(
&MediaContentDescription::rtp_header_extensions,
UnorderedElementsAre(Field(&RtpExtension::uri, "uri3"),
Field(&RtpExtension::uri, "uri42")))))));
}
TEST_F(MediaSessionDescriptionFactoryTest,
OffersUnstoppedExtensionsWithVideoExtensionStopped) {
MediaSessionOptions opts;
AddMediaDescriptionOptions(MEDIA_TYPE_AUDIO, "audio",
RtpTransceiverDirection::kSendRecv, kActive,
&opts);
opts.media_description_options.back().header_extensions = {
webrtc::RtpHeaderExtensionCapability("uri1", 5,
RtpTransceiverDirection::kSendOnly),
webrtc::RtpHeaderExtensionCapability("uri2", 7,
RtpTransceiverDirection::kSendRecv)};
AddMediaDescriptionOptions(MEDIA_TYPE_VIDEO, "video1",
RtpTransceiverDirection::kSendRecv, kActive,
&opts);
opts.media_description_options.back().header_extensions = {
webrtc::RtpHeaderExtensionCapability("uri42", 42,
RtpTransceiverDirection::kSendRecv),
webrtc::RtpHeaderExtensionCapability("uri3", 7,
RtpTransceiverDirection::kStopped)};
auto offer = f1_.CreateOffer(opts, nullptr);
EXPECT_THAT(
offer->contents(),
ElementsAre(
Property(
&ContentInfo::media_description,
Pointee(Property(
&MediaContentDescription::rtp_header_extensions,
UnorderedElementsAre(Field(&RtpExtension::uri, "uri1"),
Field(&RtpExtension::uri, "uri2"))))),
Property(&ContentInfo::media_description,
Pointee(Property(
&MediaContentDescription::rtp_header_extensions,
ElementsAre(Field(&RtpExtension::uri, "uri42")))))));
}
TEST_F(MediaSessionDescriptionFactoryTest, AnswersUnstoppedExtensions) {
MediaSessionOptions opts;
AddMediaDescriptionOptions(MEDIA_TYPE_AUDIO, "audio",
RtpTransceiverDirection::kSendRecv, kActive,
&opts);
opts.media_description_options.back().header_extensions = {
webrtc::RtpHeaderExtensionCapability("uri1", 4,
RtpTransceiverDirection::kStopped),
webrtc::RtpHeaderExtensionCapability("uri2", 3,
RtpTransceiverDirection::kSendOnly),
webrtc::RtpHeaderExtensionCapability("uri3", 2,
RtpTransceiverDirection::kRecvOnly),
webrtc::RtpHeaderExtensionCapability("uri4", 1,
RtpTransceiverDirection::kSendRecv)};
auto offer = f1_.CreateOffer(opts, nullptr);
opts.media_description_options.back().header_extensions = {
webrtc::RtpHeaderExtensionCapability("uri1", 4,
RtpTransceiverDirection::kSendOnly),
webrtc::RtpHeaderExtensionCapability("uri2", 3,
RtpTransceiverDirection::kRecvOnly),
webrtc::RtpHeaderExtensionCapability("uri3", 2,
RtpTransceiverDirection::kStopped),
webrtc::RtpHeaderExtensionCapability("uri4", 1,
RtpTransceiverDirection::kSendRecv)};
auto answer = f2_.CreateAnswer(offer.get(), opts, nullptr);
EXPECT_THAT(
answer->contents(),
ElementsAre(Property(
&ContentInfo::media_description,
Pointee(Property(&MediaContentDescription::rtp_header_extensions,
ElementsAre(Field(&RtpExtension::uri, "uri2"),
Field(&RtpExtension::uri, "uri4")))))));
}
TEST_F(MediaSessionDescriptionFactoryTest,
AppendsUnstoppedExtensionsToCurrentDescription) {
MediaSessionOptions opts;
AddMediaDescriptionOptions(MEDIA_TYPE_AUDIO, "audio",
RtpTransceiverDirection::kSendRecv, kActive,
&opts);
opts.media_description_options.back().header_extensions = {
webrtc::RtpHeaderExtensionCapability("uri1", 1,
RtpTransceiverDirection::kSendRecv)};
auto offer = f1_.CreateOffer(opts, nullptr);
opts.media_description_options.back().header_extensions = {
webrtc::RtpHeaderExtensionCapability("uri1", 2,
RtpTransceiverDirection::kSendRecv),
webrtc::RtpHeaderExtensionCapability("uri2", 3,
RtpTransceiverDirection::kRecvOnly),
webrtc::RtpHeaderExtensionCapability("uri3", 5,
RtpTransceiverDirection::kStopped),
webrtc::RtpHeaderExtensionCapability("uri4", 6,
RtpTransceiverDirection::kSendRecv)};
auto offer2 = f1_.CreateOffer(opts, offer.get());
EXPECT_THAT(
offer2->contents(),
ElementsAre(Property(
&ContentInfo::media_description,
Pointee(Property(&MediaContentDescription::rtp_header_extensions,
ElementsAre(Field(&RtpExtension::uri, "uri1"),
Field(&RtpExtension::uri, "uri2"),
Field(&RtpExtension::uri, "uri4")))))));
}
TEST_F(MediaSessionDescriptionFactoryTest,
AppendsStoppedExtensionIfKnownAndPresentInTheOffer) {
MediaSessionOptions opts;
AddMediaDescriptionOptions(MEDIA_TYPE_VIDEO, "video",
RtpTransceiverDirection::kSendRecv, kActive,
&opts);
opts.media_description_options.back().header_extensions = {
webrtc::RtpHeaderExtensionCapability("uri1", 1,
RtpTransceiverDirection::kSendRecv),
webrtc::RtpHeaderExtensionCapability("uri2", 1,
RtpTransceiverDirection::kSendRecv)};
auto offer = f1_.CreateOffer(opts, nullptr);
// Now add "uri2" as stopped to the options verify that the offer contains
// uri2 since it's already present since before.
opts.media_description_options.back().header_extensions = {
webrtc::RtpHeaderExtensionCapability("uri1", 1,
RtpTransceiverDirection::kSendRecv),
webrtc::RtpHeaderExtensionCapability("uri2", 2,
RtpTransceiverDirection::kStopped)};
auto offer2 = f1_.CreateOffer(opts, offer.get());
EXPECT_THAT(
offer2->contents(),
ElementsAre(Property(
&ContentInfo::media_description,
Pointee(Property(&MediaContentDescription::rtp_header_extensions,
ElementsAre(Field(&RtpExtension::uri, "uri1"),
Field(&RtpExtension::uri, "uri2")))))));
}
TEST_F(MediaSessionDescriptionFactoryTest,
TestOfferAnswerWithEncryptedRtpExtensionsBoth) {
MediaSessionOptions opts;
AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &opts);
f1_.set_enable_encrypted_rtp_header_extensions(true);
f2_.set_enable_encrypted_rtp_header_extensions(true);
SetAudioVideoRtpHeaderExtensions(MAKE_VECTOR(kAudioRtpExtension1),
MAKE_VECTOR(kVideoRtpExtension1), &opts);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, NULL);
ASSERT_TRUE(offer.get() != NULL);
SetAudioVideoRtpHeaderExtensions(MAKE_VECTOR(kAudioRtpExtension2),
MAKE_VECTOR(kVideoRtpExtension2), &opts);
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), opts, NULL);
EXPECT_EQ(
MAKE_VECTOR(kAudioRtpExtensionEncrypted1),
GetFirstAudioContentDescription(offer.get())->rtp_header_extensions());
EXPECT_EQ(
MAKE_VECTOR(kVideoRtpExtensionEncrypted1),
GetFirstVideoContentDescription(offer.get())->rtp_header_extensions());
EXPECT_EQ(
MAKE_VECTOR(kAudioRtpExtensionEncryptedAnswer),
GetFirstAudioContentDescription(answer.get())->rtp_header_extensions());
EXPECT_EQ(
MAKE_VECTOR(kVideoRtpExtensionEncryptedAnswer),
GetFirstVideoContentDescription(answer.get())->rtp_header_extensions());
}
TEST_F(MediaSessionDescriptionFactoryTest,
TestOfferAnswerWithEncryptedRtpExtensionsOffer) {
MediaSessionOptions opts;
AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &opts);
f1_.set_enable_encrypted_rtp_header_extensions(true);
SetAudioVideoRtpHeaderExtensions(MAKE_VECTOR(kAudioRtpExtension1),
MAKE_VECTOR(kVideoRtpExtension1), &opts);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, NULL);
ASSERT_TRUE(offer.get() != NULL);
SetAudioVideoRtpHeaderExtensions(MAKE_VECTOR(kAudioRtpExtension2),
MAKE_VECTOR(kVideoRtpExtension2), &opts);
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), opts, NULL);
EXPECT_EQ(
MAKE_VECTOR(kAudioRtpExtensionEncrypted1),
GetFirstAudioContentDescription(offer.get())->rtp_header_extensions());
EXPECT_EQ(
MAKE_VECTOR(kVideoRtpExtensionEncrypted1),
GetFirstVideoContentDescription(offer.get())->rtp_header_extensions());
EXPECT_EQ(
MAKE_VECTOR(kAudioRtpExtensionAnswer),
GetFirstAudioContentDescription(answer.get())->rtp_header_extensions());
EXPECT_EQ(
MAKE_VECTOR(kVideoRtpExtensionAnswer),
GetFirstVideoContentDescription(answer.get())->rtp_header_extensions());
}
TEST_F(MediaSessionDescriptionFactoryTest,
TestOfferAnswerWithEncryptedRtpExtensionsAnswer) {
MediaSessionOptions opts;
AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &opts);
f2_.set_enable_encrypted_rtp_header_extensions(true);
SetAudioVideoRtpHeaderExtensions(MAKE_VECTOR(kAudioRtpExtension1),
MAKE_VECTOR(kVideoRtpExtension1), &opts);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, NULL);
ASSERT_TRUE(offer.get() != NULL);
SetAudioVideoRtpHeaderExtensions(MAKE_VECTOR(kAudioRtpExtension2),
MAKE_VECTOR(kVideoRtpExtension2), &opts);
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), opts, NULL);
EXPECT_EQ(
MAKE_VECTOR(kAudioRtpExtension1),
GetFirstAudioContentDescription(offer.get())->rtp_header_extensions());
EXPECT_EQ(
MAKE_VECTOR(kVideoRtpExtension1),
GetFirstVideoContentDescription(offer.get())->rtp_header_extensions());
EXPECT_EQ(
MAKE_VECTOR(kAudioRtpExtensionAnswer),
GetFirstAudioContentDescription(answer.get())->rtp_header_extensions());
EXPECT_EQ(
MAKE_VECTOR(kVideoRtpExtensionAnswer),
GetFirstVideoContentDescription(answer.get())->rtp_header_extensions());
}
// Create an audio, video, data answer without legacy StreamParams.
TEST_F(MediaSessionDescriptionFactoryTest,
TestCreateAnswerWithoutLegacyStreams) {
MediaSessionOptions opts;
AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &opts);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, NULL);
ASSERT_TRUE(offer.get() != NULL);
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), opts, NULL);
const ContentInfo* ac = answer->GetContentByName("audio");
const ContentInfo* vc = answer->GetContentByName("video");
ASSERT_TRUE(ac != NULL);
ASSERT_TRUE(vc != NULL);
const AudioContentDescription* acd = ac->media_description()->as_audio();
const VideoContentDescription* vcd = vc->media_description()->as_video();
EXPECT_FALSE(acd->has_ssrcs()); // No StreamParams.
EXPECT_FALSE(vcd->has_ssrcs()); // No StreamParams.
}
// Create a typical video answer, and ensure it matches what we expect.
TEST_F(MediaSessionDescriptionFactoryTest, TestCreateVideoAnswerRtcpMux) {
MediaSessionOptions offer_opts;
AddAudioVideoSections(RtpTransceiverDirection::kSendRecv, &offer_opts);
MediaSessionOptions answer_opts;
AddAudioVideoSections(RtpTransceiverDirection::kSendRecv, &answer_opts);
std::unique_ptr<SessionDescription> offer;
std::unique_ptr<SessionDescription> answer;
offer_opts.rtcp_mux_enabled = true;
answer_opts.rtcp_mux_enabled = true;
offer = f1_.CreateOffer(offer_opts, NULL);
answer = f2_.CreateAnswer(offer.get(), answer_opts, NULL);
ASSERT_TRUE(NULL != GetFirstAudioContentDescription(offer.get()));
ASSERT_TRUE(NULL != GetFirstVideoContentDescription(offer.get()));
ASSERT_TRUE(NULL != GetFirstAudioContentDescription(answer.get()));
ASSERT_TRUE(NULL != GetFirstVideoContentDescription(answer.get()));
EXPECT_TRUE(GetFirstAudioContentDescription(offer.get())->rtcp_mux());
EXPECT_TRUE(GetFirstVideoContentDescription(offer.get())->rtcp_mux());
EXPECT_TRUE(GetFirstAudioContentDescription(answer.get())->rtcp_mux());
EXPECT_TRUE(GetFirstVideoContentDescription(answer.get())->rtcp_mux());
offer_opts.rtcp_mux_enabled = true;
answer_opts.rtcp_mux_enabled = false;
offer = f1_.CreateOffer(offer_opts, NULL);
answer = f2_.CreateAnswer(offer.get(), answer_opts, NULL);
ASSERT_TRUE(NULL != GetFirstAudioContentDescription(offer.get()));
ASSERT_TRUE(NULL != GetFirstVideoContentDescription(offer.get()));
ASSERT_TRUE(NULL != GetFirstAudioContentDescription(answer.get()));
ASSERT_TRUE(NULL != GetFirstVideoContentDescription(answer.get()));
EXPECT_TRUE(GetFirstAudioContentDescription(offer.get())->rtcp_mux());
EXPECT_TRUE(GetFirstVideoContentDescription(offer.get())->rtcp_mux());
EXPECT_FALSE(GetFirstAudioContentDescription(answer.get())->rtcp_mux());
EXPECT_FALSE(GetFirstVideoContentDescription(answer.get())->rtcp_mux());
offer_opts.rtcp_mux_enabled = false;
answer_opts.rtcp_mux_enabled = true;
offer = f1_.CreateOffer(offer_opts, NULL);
answer = f2_.CreateAnswer(offer.get(), answer_opts, NULL);
ASSERT_TRUE(NULL != GetFirstAudioContentDescription(offer.get()));
ASSERT_TRUE(NULL != GetFirstVideoContentDescription(offer.get()));
ASSERT_TRUE(NULL != GetFirstAudioContentDescription(answer.get()));
ASSERT_TRUE(NULL != GetFirstVideoContentDescription(answer.get()));
EXPECT_FALSE(GetFirstAudioContentDescription(offer.get())->rtcp_mux());
EXPECT_FALSE(GetFirstVideoContentDescription(offer.get())->rtcp_mux());
EXPECT_FALSE(GetFirstAudioContentDescription(answer.get())->rtcp_mux());
EXPECT_FALSE(GetFirstVideoContentDescription(answer.get())->rtcp_mux());
offer_opts.rtcp_mux_enabled = false;
answer_opts.rtcp_mux_enabled = false;
offer = f1_.CreateOffer(offer_opts, NULL);
answer = f2_.CreateAnswer(offer.get(), answer_opts, NULL);
ASSERT_TRUE(NULL != GetFirstAudioContentDescription(offer.get()));
ASSERT_TRUE(NULL != GetFirstVideoContentDescription(offer.get()));
ASSERT_TRUE(NULL != GetFirstAudioContentDescription(answer.get()));
ASSERT_TRUE(NULL != GetFirstVideoContentDescription(answer.get()));
EXPECT_FALSE(GetFirstAudioContentDescription(offer.get())->rtcp_mux());
EXPECT_FALSE(GetFirstVideoContentDescription(offer.get())->rtcp_mux());
EXPECT_FALSE(GetFirstAudioContentDescription(answer.get())->rtcp_mux());
EXPECT_FALSE(GetFirstVideoContentDescription(answer.get())->rtcp_mux());
}
// Create an audio-only answer to a video offer.
TEST_F(MediaSessionDescriptionFactoryTest, TestCreateAudioAnswerToVideo) {
MediaSessionOptions opts;
AddMediaDescriptionOptions(MEDIA_TYPE_AUDIO, "audio",
RtpTransceiverDirection::kRecvOnly, kActive,
&opts);
AddMediaDescriptionOptions(MEDIA_TYPE_VIDEO, "video",
RtpTransceiverDirection::kRecvOnly, kActive,
&opts);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, NULL);
ASSERT_TRUE(offer.get() != NULL);
opts.media_description_options[1].stopped = true;
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), opts, NULL);
const ContentInfo* ac = answer->GetContentByName("audio");
const ContentInfo* vc = answer->GetContentByName("video");
ASSERT_TRUE(ac != NULL);
ASSERT_TRUE(vc != NULL);
ASSERT_TRUE(vc->media_description() != NULL);
EXPECT_TRUE(vc->rejected);
}
// Create an answer that rejects the contents which are rejected in the offer.
TEST_F(MediaSessionDescriptionFactoryTest,
CreateAnswerToOfferWithRejectedMedia) {
MediaSessionOptions opts;
AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &opts);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, NULL);
ASSERT_TRUE(offer.get() != NULL);
ContentInfo* ac = offer->GetContentByName("audio");
ContentInfo* vc = offer->GetContentByName("video");
ASSERT_TRUE(ac != NULL);
ASSERT_TRUE(vc != NULL);
ac->rejected = true;
vc->rejected = true;
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), opts, NULL);
ac = answer->GetContentByName("audio");
vc = answer->GetContentByName("video");
ASSERT_TRUE(ac != NULL);
ASSERT_TRUE(vc != NULL);
EXPECT_TRUE(ac->rejected);
EXPECT_TRUE(vc->rejected);
}
TEST_F(MediaSessionDescriptionFactoryTest,
OfferAndAnswerDoesNotHaveMixedByteSessionAttribute) {
MediaSessionOptions opts;
std::unique_ptr<SessionDescription> offer =
f1_.CreateOffer(opts, /*current_description=*/nullptr);
offer->set_extmap_allow_mixed(false);
std::unique_ptr<SessionDescription> answer(
f2_.CreateAnswer(offer.get(), opts, /*current_description=*/nullptr));
EXPECT_FALSE(answer->extmap_allow_mixed());
}
TEST_F(MediaSessionDescriptionFactoryTest,
OfferAndAnswerHaveMixedByteSessionAttribute) {
MediaSessionOptions opts;
std::unique_ptr<SessionDescription> offer =
f1_.CreateOffer(opts, /*current_description=*/nullptr);
offer->set_extmap_allow_mixed(true);
std::unique_ptr<SessionDescription> answer_support(
f2_.CreateAnswer(offer.get(), opts, /*current_description=*/nullptr));
EXPECT_TRUE(answer_support->extmap_allow_mixed());
}
TEST_F(MediaSessionDescriptionFactoryTest,
OfferAndAnswerDoesNotHaveMixedByteMediaAttributes) {
MediaSessionOptions opts;
AddAudioVideoSections(RtpTransceiverDirection::kSendRecv, &opts);
std::unique_ptr<SessionDescription> offer =
f1_.CreateOffer(opts, /*current_description=*/nullptr);
offer->set_extmap_allow_mixed(false);
MediaContentDescription* audio_offer =
offer->GetContentDescriptionByName("audio");
MediaContentDescription* video_offer =
offer->GetContentDescriptionByName("video");
ASSERT_EQ(MediaContentDescription::kNo,
audio_offer->extmap_allow_mixed_enum());
ASSERT_EQ(MediaContentDescription::kNo,
video_offer->extmap_allow_mixed_enum());
std::unique_ptr<SessionDescription> answer(
f2_.CreateAnswer(offer.get(), opts, /*current_description=*/nullptr));
MediaContentDescription* audio_answer =
answer->GetContentDescriptionByName("audio");
MediaContentDescription* video_answer =
answer->GetContentDescriptionByName("video");
EXPECT_EQ(MediaContentDescription::kNo,
audio_answer->extmap_allow_mixed_enum());
EXPECT_EQ(MediaContentDescription::kNo,
video_answer->extmap_allow_mixed_enum());
}
TEST_F(MediaSessionDescriptionFactoryTest,
OfferAndAnswerHaveSameMixedByteMediaAttributes) {
MediaSessionOptions opts;
AddAudioVideoSections(RtpTransceiverDirection::kSendRecv, &opts);
std::unique_ptr<SessionDescription> offer =
f1_.CreateOffer(opts, /*current_description=*/nullptr);
offer->set_extmap_allow_mixed(false);
MediaContentDescription* audio_offer =
offer->GetContentDescriptionByName("audio");
audio_offer->set_extmap_allow_mixed_enum(MediaContentDescription::kMedia);
MediaContentDescription* video_offer =
offer->GetContentDescriptionByName("video");
video_offer->set_extmap_allow_mixed_enum(MediaContentDescription::kMedia);
std::unique_ptr<SessionDescription> answer(
f2_.CreateAnswer(offer.get(), opts, /*current_description=*/nullptr));
MediaContentDescription* audio_answer =
answer->GetContentDescriptionByName("audio");
MediaContentDescription* video_answer =
answer->GetContentDescriptionByName("video");
EXPECT_EQ(MediaContentDescription::kMedia,
audio_answer->extmap_allow_mixed_enum());
EXPECT_EQ(MediaContentDescription::kMedia,
video_answer->extmap_allow_mixed_enum());
}
TEST_F(MediaSessionDescriptionFactoryTest,
OfferAndAnswerHaveDifferentMixedByteMediaAttributes) {
MediaSessionOptions opts;
AddAudioVideoSections(RtpTransceiverDirection::kSendRecv, &opts);
std::unique_ptr<SessionDescription> offer =
f1_.CreateOffer(opts, /*current_description=*/nullptr);
offer->set_extmap_allow_mixed(false);
MediaContentDescription* audio_offer =
offer->GetContentDescriptionByName("audio");
audio_offer->set_extmap_allow_mixed_enum(MediaContentDescription::kNo);
MediaContentDescription* video_offer =
offer->GetContentDescriptionByName("video");
video_offer->set_extmap_allow_mixed_enum(MediaContentDescription::kMedia);
std::unique_ptr<SessionDescription> answer(
f2_.CreateAnswer(offer.get(), opts, /*current_description=*/nullptr));
MediaContentDescription* audio_answer =
answer->GetContentDescriptionByName("audio");
MediaContentDescription* video_answer =
answer->GetContentDescriptionByName("video");
EXPECT_EQ(MediaContentDescription::kNo,
audio_answer->extmap_allow_mixed_enum());
EXPECT_EQ(MediaContentDescription::kMedia,
video_answer->extmap_allow_mixed_enum());
}
// Create an audio and video offer with:
// - one video track
// - two audio tracks
// and ensure it matches what we expect. Also updates the initial offer by
// adding a new video track and replaces one of the audio tracks.
TEST_F(MediaSessionDescriptionFactoryTest, TestCreateMultiStreamVideoOffer) {
MediaSessionOptions opts;
AddAudioVideoSections(RtpTransceiverDirection::kSendRecv, &opts);
AttachSenderToMediaDescriptionOptions("video", MEDIA_TYPE_VIDEO, kVideoTrack1,
{kMediaStream1}, 1, &opts);
AttachSenderToMediaDescriptionOptions("audio", MEDIA_TYPE_AUDIO, kAudioTrack1,
{kMediaStream1}, 1, &opts);
AttachSenderToMediaDescriptionOptions("audio", MEDIA_TYPE_AUDIO, kAudioTrack2,
{kMediaStream1}, 1, &opts);
f1_.set_secure(SEC_ENABLED);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, NULL);
ASSERT_TRUE(offer.get() != NULL);
const ContentInfo* ac = offer->GetContentByName("audio");
const ContentInfo* vc = offer->GetContentByName("video");
ASSERT_TRUE(ac != NULL);
ASSERT_TRUE(vc != NULL);
const AudioContentDescription* acd = ac->media_description()->as_audio();
const VideoContentDescription* vcd = vc->media_description()->as_video();
EXPECT_EQ(MEDIA_TYPE_AUDIO, acd->type());
EXPECT_EQ(f1_.audio_sendrecv_codecs(), acd->codecs());
const StreamParamsVec& audio_streams = acd->streams();
ASSERT_EQ(2U, audio_streams.size());
EXPECT_EQ(audio_streams[0].cname, audio_streams[1].cname);
EXPECT_EQ(kAudioTrack1, audio_streams[0].id);
ASSERT_EQ(1U, audio_streams[0].ssrcs.size());
EXPECT_NE(0U, audio_streams[0].ssrcs[0]);
EXPECT_EQ(kAudioTrack2, audio_streams[1].id);
ASSERT_EQ(1U, audio_streams[1].ssrcs.size());
EXPECT_NE(0U, audio_streams[1].ssrcs[0]);
EXPECT_EQ(kAutoBandwidth, acd->bandwidth()); // default bandwidth (auto)
EXPECT_TRUE(acd->rtcp_mux()); // rtcp-mux defaults on
ASSERT_CRYPTO(acd, 1U, kDefaultSrtpCryptoSuite);
EXPECT_EQ(MEDIA_TYPE_VIDEO, vcd->type());
EXPECT_EQ(f1_.video_sendrecv_codecs(), vcd->codecs());
ASSERT_CRYPTO(vcd, 1U, kDefaultSrtpCryptoSuite);
const StreamParamsVec& video_streams = vcd->streams();
ASSERT_EQ(1U, video_streams.size());
EXPECT_EQ(video_streams[0].cname, audio_streams[0].cname);
EXPECT_EQ(kVideoTrack1, video_streams[0].id);
EXPECT_EQ(kAutoBandwidth, vcd->bandwidth()); // default bandwidth (auto)
EXPECT_TRUE(vcd->rtcp_mux()); // rtcp-mux defaults on
// Update the offer. Add a new video track that is not synched to the
// other tracks and replace audio track 2 with audio track 3.
AttachSenderToMediaDescriptionOptions("video", MEDIA_TYPE_VIDEO, kVideoTrack2,
{kMediaStream2}, 1, &opts);
DetachSenderFromMediaSection("audio", kAudioTrack2, &opts);
AttachSenderToMediaDescriptionOptions("audio", MEDIA_TYPE_AUDIO, kAudioTrack3,
{kMediaStream1}, 1, &opts);
std::unique_ptr<SessionDescription> updated_offer(
f1_.CreateOffer(opts, offer.get()));
ASSERT_TRUE(updated_offer.get() != NULL);
ac = updated_offer->GetContentByName("audio");
vc = updated_offer->GetContentByName("video");
ASSERT_TRUE(ac != NULL);
ASSERT_TRUE(vc != NULL);
const AudioContentDescription* updated_acd =
ac->media_description()->as_audio();
const VideoContentDescription* updated_vcd =
vc->media_description()->as_video();
EXPECT_EQ(acd->type(), updated_acd->type());
EXPECT_EQ(acd->codecs(), updated_acd->codecs());
EXPECT_EQ(vcd->type(), updated_vcd->type());
EXPECT_EQ(vcd->codecs(), updated_vcd->codecs());
ASSERT_CRYPTO(updated_acd, 1U, kDefaultSrtpCryptoSuite);
EXPECT_TRUE(CompareCryptoParams(acd->cryptos(), updated_acd->cryptos()));
ASSERT_CRYPTO(updated_vcd, 1U, kDefaultSrtpCryptoSuite);
EXPECT_TRUE(CompareCryptoParams(vcd->cryptos(), updated_vcd->cryptos()));
const StreamParamsVec& updated_audio_streams = updated_acd->streams();
ASSERT_EQ(2U, updated_audio_streams.size());
EXPECT_EQ(audio_streams[0], updated_audio_streams[0]);
EXPECT_EQ(kAudioTrack3, updated_audio_streams[1].id); // New audio track.
ASSERT_EQ(1U, updated_audio_streams[1].ssrcs.size());
EXPECT_NE(0U, updated_audio_streams[1].ssrcs[0]);
EXPECT_EQ(updated_audio_streams[0].cname, updated_audio_streams[1].cname);
const StreamParamsVec& updated_video_streams = updated_vcd->streams();
ASSERT_EQ(2U, updated_video_streams.size());
EXPECT_EQ(video_streams[0], updated_video_streams[0]);
EXPECT_EQ(kVideoTrack2, updated_video_streams[1].id);
// All the media streams in one PeerConnection share one RTCP CNAME.
EXPECT_EQ(updated_video_streams[1].cname, updated_video_streams[0].cname);
}
// Create an offer with simulcast video stream.
TEST_F(MediaSessionDescriptionFactoryTest, TestCreateSimulcastVideoOffer) {
MediaSessionOptions opts;
AddMediaDescriptionOptions(MEDIA_TYPE_AUDIO, "audio",
RtpTransceiverDirection::kRecvOnly, kActive,
&opts);
AddMediaDescriptionOptions(MEDIA_TYPE_VIDEO, "video",
RtpTransceiverDirection::kSendRecv, kActive,
&opts);
const int num_sim_layers = 3;
AttachSenderToMediaDescriptionOptions("video", MEDIA_TYPE_VIDEO, kVideoTrack1,
{kMediaStream1}, num_sim_layers, &opts);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, NULL);
ASSERT_TRUE(offer.get() != NULL);
const ContentInfo* vc = offer->GetContentByName("video");
ASSERT_TRUE(vc != NULL);
const VideoContentDescription* vcd = vc->media_description()->as_video();
const StreamParamsVec& video_streams = vcd->streams();
ASSERT_EQ(1U, video_streams.size());
EXPECT_EQ(kVideoTrack1, video_streams[0].id);
const SsrcGroup* sim_ssrc_group =
video_streams[0].get_ssrc_group(cricket::kSimSsrcGroupSemantics);
ASSERT_TRUE(sim_ssrc_group != NULL);
EXPECT_EQ(static_cast<size_t>(num_sim_layers), sim_ssrc_group->ssrcs.size());
}
MATCHER(RidDescriptionEquals, "Verifies that two RidDescriptions are equal.") {
const RidDescription& rid1 = ::testing::get<0>(arg);
const RidDescription& rid2 = ::testing::get<1>(arg);
return rid1.rid == rid2.rid && rid1.direction == rid2.direction;
}
static void CheckSimulcastInSessionDescription(
const SessionDescription* description,
const std::string& content_name,
const std::vector<RidDescription>& send_rids,
const SimulcastLayerList& send_layers) {
ASSERT_NE(description, nullptr);
const ContentInfo* content = description->GetContentByName(content_name);
ASSERT_NE(content, nullptr);
const MediaContentDescription* cd = content->media_description();
ASSERT_NE(cd, nullptr);
const StreamParamsVec& streams = cd->streams();
ASSERT_THAT(streams, SizeIs(1));
const StreamParams& stream = streams[0];
ASSERT_THAT(stream.ssrcs, IsEmpty());
EXPECT_TRUE(stream.has_rids());
const std::vector<RidDescription> rids = stream.rids();
EXPECT_THAT(rids, Pointwise(RidDescriptionEquals(), send_rids));
EXPECT_TRUE(cd->HasSimulcast());
const SimulcastDescription& simulcast = cd->simulcast_description();
EXPECT_THAT(simulcast.send_layers(), SizeIs(send_layers.size()));
EXPECT_THAT(simulcast.send_layers(), Pointwise(Eq(), send_layers));
ASSERT_THAT(simulcast.receive_layers().GetAllLayers(), SizeIs(0));
}
// Create an offer with spec-compliant simulcast video stream.
TEST_F(MediaSessionDescriptionFactoryTest, TestCreateCompliantSimulcastOffer) {
MediaSessionOptions opts;
AddMediaDescriptionOptions(MEDIA_TYPE_VIDEO, "video",
RtpTransceiverDirection::kSendRecv, kActive,
&opts);
std::vector<RidDescription> send_rids;
send_rids.push_back(RidDescription("f", RidDirection::kSend));
send_rids.push_back(RidDescription("h", RidDirection::kSend));
send_rids.push_back(RidDescription("q", RidDirection::kSend));
SimulcastLayerList simulcast_layers;
simulcast_layers.AddLayer(SimulcastLayer(send_rids[0].rid, false));
simulcast_layers.AddLayer(SimulcastLayer(send_rids[1].rid, true));
simulcast_layers.AddLayer(SimulcastLayer(send_rids[2].rid, false));
AttachSenderToMediaDescriptionOptions("video", MEDIA_TYPE_VIDEO, kVideoTrack1,
{kMediaStream1}, send_rids,
simulcast_layers, 0, &opts);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, nullptr);
CheckSimulcastInSessionDescription(offer.get(), "video", send_rids,
simulcast_layers);
}
// Create an offer that signals RIDs (not SSRCs) without Simulcast.
// In this scenario, RIDs do not need to be negotiated (there is only one).
TEST_F(MediaSessionDescriptionFactoryTest, TestOfferWithRidsNoSimulcast) {
MediaSessionOptions opts;
AddMediaDescriptionOptions(MEDIA_TYPE_VIDEO, "video",
RtpTransceiverDirection::kSendRecv, kActive,
&opts);
RidDescription rid("f", RidDirection::kSend);
AttachSenderToMediaDescriptionOptions("video", MEDIA_TYPE_VIDEO, kVideoTrack1,
{kMediaStream1}, {rid},
SimulcastLayerList(), 0, &opts);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(opts, nullptr);
ASSERT_NE(offer.get(), nullptr);
const ContentInfo* content = offer->GetContentByName("video");
ASSERT_NE(content, nullptr);
const MediaContentDescription* cd = content->media_description();
ASSERT_NE(cd, nullptr);
const StreamParamsVec& streams = cd->streams();
ASSERT_THAT(streams, SizeIs(1));
const StreamParams& stream = streams[0];
ASSERT_THAT(stream.ssrcs, IsEmpty());
EXPECT_FALSE(stream.has_rids());
EXPECT_FALSE(cd->HasSimulcast());
}
// Create an answer with spec-compliant simulcast video stream.
// In this scenario, the SFU is the caller requesting that we send Simulcast.
TEST_F(MediaSessionDescriptionFactoryTest, TestCreateCompliantSimulcastAnswer) {
MediaSessionOptions offer_opts;
AddMediaDescriptionOptions(MEDIA_TYPE_VIDEO, "video",
RtpTransceiverDirection::kSendRecv, kActive,
&offer_opts);
AttachSenderToMediaDescriptionOptions("video", MEDIA_TYPE_VIDEO, kVideoTrack1,
{kMediaStream1}, 1, &offer_opts);
std::unique_ptr<SessionDescription> offer =
f1_.CreateOffer(offer_opts, nullptr);
MediaSessionOptions answer_opts;
AddMediaDescriptionOptions(MEDIA_TYPE_VIDEO, "video",
RtpTransceiverDirection::kSendRecv, kActive,
&answer_opts);
std::vector<RidDescription> rid_descriptions{
RidDescription("f", RidDirection::kSend),
RidDescription("h", RidDirection::kSend),
RidDescription("q", RidDirection::kSend),
};
SimulcastLayerList simulcast_layers;
simulcast_layers.AddLayer(SimulcastLayer(rid_descriptions[0].rid, false));
simulcast_layers.AddLayer(SimulcastLayer(rid_descriptions[1].rid, true));
simulcast_layers.AddLayer(SimulcastLayer(rid_descriptions[2].rid, false));
AttachSenderToMediaDescriptionOptions("video", MEDIA_TYPE_VIDEO, kVideoTrack1,
{kMediaStream1}, rid_descriptions,
simulcast_layers, 0, &answer_opts);
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), answer_opts, nullptr);
CheckSimulcastInSessionDescription(answer.get(), "video", rid_descriptions,
simulcast_layers);
}
// Create an answer that signals RIDs (not SSRCs) without Simulcast.
// In this scenario, RIDs do not need to be negotiated (there is only one).
// Note that RID Direction is not the same as the transceiver direction.
TEST_F(MediaSessionDescriptionFactoryTest, TestAnswerWithRidsNoSimulcast) {
MediaSessionOptions offer_opts;
AddMediaDescriptionOptions(MEDIA_TYPE_VIDEO, "video",
RtpTransceiverDirection::kSendRecv, kActive,
&offer_opts);
RidDescription rid_offer("f", RidDirection::kSend);
AttachSenderToMediaDescriptionOptions("video", MEDIA_TYPE_VIDEO, kVideoTrack1,
{kMediaStream1}, {rid_offer},
SimulcastLayerList(), 0, &offer_opts);
std::unique_ptr<SessionDescription> offer =
f1_.CreateOffer(offer_opts, nullptr);
MediaSessionOptions answer_opts;
AddMediaDescriptionOptions(MEDIA_TYPE_VIDEO, "video",
RtpTransceiverDirection::kSendRecv, kActive,
&answer_opts);
RidDescription rid_answer("f", RidDirection::kReceive);
AttachSenderToMediaDescriptionOptions("video", MEDIA_TYPE_VIDEO, kVideoTrack1,
{kMediaStream1}, {rid_answer},
SimulcastLayerList(), 0, &answer_opts);
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), answer_opts, nullptr);
ASSERT_NE(answer.get(), nullptr);
const ContentInfo* content = offer->GetContentByName("video");
ASSERT_NE(content, nullptr);
const MediaContentDescription* cd = content->media_description();
ASSERT_NE(cd, nullptr);
const StreamParamsVec& streams = cd->streams();
ASSERT_THAT(streams, SizeIs(1));
const StreamParams& stream = streams[0];
ASSERT_THAT(stream.ssrcs, IsEmpty());
EXPECT_FALSE(stream.has_rids());
EXPECT_FALSE(cd->HasSimulcast());
}
// Create an audio and video answer to a standard video offer with:
// - one video track
// - two audio tracks
// - two data tracks
// and ensure it matches what we expect. Also updates the initial answer by
// adding a new video track and removes one of the audio tracks.
TEST_F(MediaSessionDescriptionFactoryTest, TestCreateMultiStreamVideoAnswer) {
MediaSessionOptions offer_opts;
AddMediaDescriptionOptions(MEDIA_TYPE_AUDIO, "audio",
RtpTransceiverDirection::kRecvOnly, kActive,
&offer_opts);
AddMediaDescriptionOptions(MEDIA_TYPE_VIDEO, "video",
RtpTransceiverDirection::kRecvOnly, kActive,
&offer_opts);
f1_.set_secure(SEC_ENABLED);
f2_.set_secure(SEC_ENABLED);
std::unique_ptr<SessionDescription> offer = f1_.CreateOffer(offer_opts, NULL);
MediaSessionOptions answer_opts;
AddMediaDescriptionOptions(MEDIA_TYPE_AUDIO, "audio",
RtpTransceiverDirection::kSendRecv, kActive,
&answer_opts);
AddMediaDescriptionOptions(MEDIA_TYPE_VIDEO, "video",
RtpTransceiverDirection::kSendRecv, kActive,
&answer_opts);
AttachSenderToMediaDescriptionOptions("video", MEDIA_TYPE_VIDEO, kVideoTrack1,
{kMediaStream1}, 1, &answer_opts);
AttachSenderToMediaDescriptionOptions("audio", MEDIA_TYPE_AUDIO, kAudioTrack1,
{kMediaStream1}, 1, &answer_opts);
AttachSenderToMediaDescriptionOptions("audio", MEDIA_TYPE_AUDIO, kAudioTrack2,
{kMediaStream1}, 1, &answer_opts);
std::unique_ptr<SessionDescription> answer =
f2_.CreateAnswer(offer.get(), answer_opts, NULL);
ASSERT_TRUE(answer.get() != NULL);
const ContentInfo* ac = answer->GetContentByName("audio");
const ContentInfo* vc = answer->GetContentByName("video");
ASSERT_TRUE(ac != NULL);
ASSERT_TRUE(vc != NULL);
const AudioContentDescription* acd = ac->media_description()->as_audio();
const VideoContentDescription* vcd = vc->media_description()->as_video();
ASSERT_CRYPTO(acd, 1U, kDefaultSrtpCryptoSuite);
ASSERT_CRYPTO(vcd, 1U, kDefaultSrtpCryptoSuite);
EXPECT_EQ(MEDIA_TYPE_AUDIO, acd->type());