Revert "Move allocation and rtp conversion logic out of payload router."
This reverts commit 1da4d79ba3275b3fa48cad3b2c0949e0d3b7afe7.
Reason for revert: Need to revert https://webrtc-review.googlesource.com/c/src/+/88220
This causes a merge conflict. So need to revert this first.
Original change's description:
> Move allocation and rtp conversion logic out of payload router.
>
> Makes it easier to write tests, and allows for moving rtp module
> ownership into the payload router in the future.
>
> The RtpPayloadParams class is split into declaration and definition and
> moved into separate files.
>
> Bug: webrtc:9517
> Change-Id: I8700628edff19abcacfe8d3a20e4ba7476f712ad
> Reviewed-on: https://webrtc-review.googlesource.com/88564
> Commit-Queue: Stefan Holmer <stefan@webrtc.org>
> Reviewed-by: Sebastian Jansson <srte@webrtc.org>
> Cr-Commit-Position: refs/heads/master@{#23983}
TBR=sprang@webrtc.org,stefan@webrtc.org,srte@webrtc.org
Change-Id: I342c4bf483d975c87c706fe7f76f44e2dc60fe4c
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: webrtc:9517
Reviewed-on: https://webrtc-review.googlesource.com/88821
Reviewed-by: JT Teh <jtteh@webrtc.org>
Commit-Queue: JT Teh <jtteh@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#23991}
diff --git a/BUILD.gn b/BUILD.gn
index 4420843..81266eb 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -422,7 +422,6 @@
"api:rtc_api_unittests",
"api/audio/test:audio_api_unittests",
"api/audio_codecs/test:audio_codecs_api_unittests",
- "api/video/test:rtc_api_video_unittests",
"api/video_codecs/test:video_codecs_api_unittests",
"p2p:libstunprober_unittests",
"p2p:rtc_p2p_unittests",
diff --git a/api/video/test/BUILD.gn b/api/video/test/BUILD.gn
deleted file mode 100644
index 60609b3..0000000
--- a/api/video/test/BUILD.gn
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
-#
-# Use of this source code is governed by a BSD-style license
-# that can be found in the LICENSE file in the root of the source
-# tree. An additional intellectual property rights grant can be found
-# in the file PATENTS. All contributing project authors may
-# be found in the AUTHORS file in the root of the source tree.
-
-import("../../../webrtc.gni")
-
-rtc_source_set("rtc_api_video_unittests") {
- testonly = true
-
- sources = [
- "video_bitrate_allocation_unittest.cc",
- ]
-
- if (!build_with_chromium && is_clang) {
- # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
- suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
- }
-
- deps = [
- "..:video_bitrate_allocation",
- "../../../test:test_support",
- ]
-}
diff --git a/api/video/test/video_bitrate_allocation_unittest.cc b/api/video/test/video_bitrate_allocation_unittest.cc
deleted file mode 100644
index f22957b..0000000
--- a/api/video/test/video_bitrate_allocation_unittest.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <memory>
-#include <string>
-
-#include "api/video/video_bitrate_allocation.h"
-#include "test/gtest.h"
-
-namespace webrtc {
-TEST(VideoBitrateAllocation, SimulcastTargetBitrate) {
- VideoBitrateAllocation bitrate;
- bitrate.SetBitrate(0, 0, 10000);
- bitrate.SetBitrate(0, 1, 20000);
- bitrate.SetBitrate(1, 0, 40000);
- bitrate.SetBitrate(1, 1, 80000);
-
- VideoBitrateAllocation layer0_bitrate;
- layer0_bitrate.SetBitrate(0, 0, 10000);
- layer0_bitrate.SetBitrate(0, 1, 20000);
-
- VideoBitrateAllocation layer1_bitrate;
- layer1_bitrate.SetBitrate(0, 0, 40000);
- layer1_bitrate.SetBitrate(0, 1, 80000);
-
- std::vector<absl::optional<VideoBitrateAllocation>> layer_allocations =
- bitrate.GetSimulcastAllocations();
-
- EXPECT_EQ(layer0_bitrate, layer_allocations[0]);
- EXPECT_EQ(layer1_bitrate, layer_allocations[1]);
-}
-
-TEST(VideoBitrateAllocation, SimulcastTargetBitrateWithInactiveStream) {
- // Create bitrate allocation with bitrate only for the first and third stream.
- VideoBitrateAllocation bitrate;
- bitrate.SetBitrate(0, 0, 10000);
- bitrate.SetBitrate(0, 1, 20000);
- bitrate.SetBitrate(2, 0, 40000);
- bitrate.SetBitrate(2, 1, 80000);
-
- VideoBitrateAllocation layer0_bitrate;
- layer0_bitrate.SetBitrate(0, 0, 10000);
- layer0_bitrate.SetBitrate(0, 1, 20000);
-
- VideoBitrateAllocation layer2_bitrate;
- layer2_bitrate.SetBitrate(0, 0, 40000);
- layer2_bitrate.SetBitrate(0, 1, 80000);
-
- std::vector<absl::optional<VideoBitrateAllocation>> layer_allocations =
- bitrate.GetSimulcastAllocations();
-
- EXPECT_EQ(layer0_bitrate, layer_allocations[0]);
- EXPECT_FALSE(layer_allocations[1]);
- EXPECT_EQ(layer2_bitrate, layer_allocations[2]);
-}
-} // namespace webrtc
diff --git a/api/video/video_bitrate_allocation.cc b/api/video/video_bitrate_allocation.cc
index 6c5ad1e..d5a1db5 100644
--- a/api/video/video_bitrate_allocation.cc
+++ b/api/video/video_bitrate_allocation.cc
@@ -107,23 +107,6 @@
return temporal_rates;
}
-std::vector<absl::optional<VideoBitrateAllocation>>
-VideoBitrateAllocation::GetSimulcastAllocations() const {
- std::vector<absl::optional<VideoBitrateAllocation>> bitrates;
- for (size_t si = 0; si < kMaxSpatialLayers; ++si) {
- absl::optional<VideoBitrateAllocation> layer_bitrate;
- if (IsSpatialLayerUsed(si)) {
- layer_bitrate = VideoBitrateAllocation();
- for (int tl = 0; tl < kMaxTemporalStreams; ++tl) {
- if (HasBitrate(si, tl))
- layer_bitrate->SetBitrate(0, tl, GetBitrate(si, tl));
- }
- }
- bitrates.push_back(layer_bitrate);
- }
- return bitrates;
-}
-
bool VideoBitrateAllocation::operator==(
const VideoBitrateAllocation& other) const {
for (size_t si = 0; si < kMaxSpatialLayers; ++si) {
diff --git a/api/video/video_bitrate_allocation.h b/api/video/video_bitrate_allocation.h
index ce61734..ab5bfae 100644
--- a/api/video/video_bitrate_allocation.h
+++ b/api/video/video_bitrate_allocation.h
@@ -62,12 +62,6 @@
// layer with a defined bitrate.
std::vector<uint32_t> GetTemporalLayerAllocation(size_t spatial_index) const;
- // Returns one VideoBitrateAllocation for each spatial layer. This is used to
- // configure simulcast streams. Note that the length of the returned vector is
- // always kMaxSpatialLayers, the optional is unset for unused layers.
- std::vector<absl::optional<VideoBitrateAllocation>> GetSimulcastAllocations()
- const;
-
uint32_t get_sum_bps() const { return sum_; } // Sum of all bitrates.
uint32_t get_sum_kbps() const {
// Round down to not exceed the allocated bitrate.
diff --git a/call/BUILD.gn b/call/BUILD.gn
index 821a164..007c400 100644
--- a/call/BUILD.gn
+++ b/call/BUILD.gn
@@ -100,8 +100,6 @@
sources = [
"payload_router.cc",
"payload_router.h",
- "rtp_payload_params.cc",
- "rtp_payload_params.h",
"rtp_transport_controller_send.cc",
"rtp_transport_controller_send.h",
]
@@ -283,7 +281,6 @@
"rtcp_demuxer_unittest.cc",
"rtp_bitrate_configurator_unittest.cc",
"rtp_demuxer_unittest.cc",
- "rtp_payload_params_unittest.cc",
"rtp_rtcp_demuxer_helper_unittest.cc",
"rtx_receive_stream_unittest.cc",
]
diff --git a/call/payload_router.cc b/call/payload_router.cc
index cca4bd3..cf24b42 100644
--- a/call/payload_router.cc
+++ b/call/payload_router.cc
@@ -14,27 +14,157 @@
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "modules/video_coding/include/video_codec_interface.h"
#include "rtc_base/checks.h"
+#include "rtc_base/random.h"
+#include "rtc_base/timeutils.h"
namespace webrtc {
namespace {
-absl::optional<size_t> GetSimulcastIdx(const CodecSpecificInfo* info) {
- if (!info)
- return absl::nullopt;
+// Map information from info into rtp.
+void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) {
+ RTC_DCHECK(info);
+ rtp->codec = info->codecType;
switch (info->codecType) {
- case kVideoCodecVP8:
- return absl::optional<size_t>(info->codecSpecific.VP8.simulcastIdx);
- case kVideoCodecH264:
- return absl::optional<size_t>(info->codecSpecific.H264.simulcast_idx);
+ case kVideoCodecVP8: {
+ rtp->vp8().InitRTPVideoHeaderVP8();
+ rtp->vp8().nonReference = info->codecSpecific.VP8.nonReference;
+ rtp->vp8().temporalIdx = info->codecSpecific.VP8.temporalIdx;
+ rtp->vp8().layerSync = info->codecSpecific.VP8.layerSync;
+ rtp->vp8().keyIdx = info->codecSpecific.VP8.keyIdx;
+ rtp->simulcastIdx = info->codecSpecific.VP8.simulcastIdx;
+ return;
+ }
+ case kVideoCodecVP9: {
+ rtp->vp9().InitRTPVideoHeaderVP9();
+ rtp->vp9().inter_pic_predicted =
+ info->codecSpecific.VP9.inter_pic_predicted;
+ rtp->vp9().flexible_mode = info->codecSpecific.VP9.flexible_mode;
+ rtp->vp9().ss_data_available = info->codecSpecific.VP9.ss_data_available;
+ rtp->vp9().non_ref_for_inter_layer_pred =
+ info->codecSpecific.VP9.non_ref_for_inter_layer_pred;
+ rtp->vp9().temporal_idx = info->codecSpecific.VP9.temporal_idx;
+ rtp->vp9().spatial_idx = info->codecSpecific.VP9.spatial_idx;
+ rtp->vp9().temporal_up_switch =
+ info->codecSpecific.VP9.temporal_up_switch;
+ rtp->vp9().inter_layer_predicted =
+ info->codecSpecific.VP9.inter_layer_predicted;
+ rtp->vp9().gof_idx = info->codecSpecific.VP9.gof_idx;
+ rtp->vp9().num_spatial_layers =
+ info->codecSpecific.VP9.num_spatial_layers;
+
+ if (info->codecSpecific.VP9.ss_data_available) {
+ rtp->vp9().spatial_layer_resolution_present =
+ info->codecSpecific.VP9.spatial_layer_resolution_present;
+ if (info->codecSpecific.VP9.spatial_layer_resolution_present) {
+ for (size_t i = 0; i < info->codecSpecific.VP9.num_spatial_layers;
+ ++i) {
+ rtp->vp9().width[i] = info->codecSpecific.VP9.width[i];
+ rtp->vp9().height[i] = info->codecSpecific.VP9.height[i];
+ }
+ }
+ rtp->vp9().gof.CopyGofInfoVP9(info->codecSpecific.VP9.gof);
+ }
+
+ rtp->vp9().num_ref_pics = info->codecSpecific.VP9.num_ref_pics;
+ for (int i = 0; i < info->codecSpecific.VP9.num_ref_pics; ++i) {
+ rtp->vp9().pid_diff[i] = info->codecSpecific.VP9.p_diff[i];
+ }
+ rtp->vp9().end_of_picture = info->codecSpecific.VP9.end_of_picture;
+ return;
+ }
+ case kVideoCodecH264: {
+ auto& h264_header = rtp->video_type_header.emplace<RTPVideoHeaderH264>();
+ h264_header.packetization_mode =
+ info->codecSpecific.H264.packetization_mode;
+ rtp->simulcastIdx = info->codecSpecific.H264.simulcast_idx;
+ return;
+ }
case kVideoCodecMultiplex:
case kVideoCodecGeneric:
- return absl::optional<size_t>(info->codecSpecific.generic.simulcast_idx);
+ rtp->codec = kVideoCodecGeneric;
+ rtp->simulcastIdx = info->codecSpecific.generic.simulcast_idx;
+ return;
default:
- return absl::nullopt;
+ return;
}
}
+
+void SetVideoTiming(VideoSendTiming* timing, const EncodedImage& image) {
+ if (image.timing_.flags == VideoSendTiming::TimingFrameFlags::kInvalid ||
+ image.timing_.flags == VideoSendTiming::TimingFrameFlags::kNotTriggered) {
+ timing->flags = VideoSendTiming::TimingFrameFlags::kInvalid;
+ return;
+ }
+
+ timing->encode_start_delta_ms = VideoSendTiming::GetDeltaCappedMs(
+ image.capture_time_ms_, image.timing_.encode_start_ms);
+ timing->encode_finish_delta_ms = VideoSendTiming::GetDeltaCappedMs(
+ image.capture_time_ms_, image.timing_.encode_finish_ms);
+ timing->packetization_finish_delta_ms = 0;
+ timing->pacer_exit_delta_ms = 0;
+ timing->network_timestamp_delta_ms = 0;
+ timing->network2_timestamp_delta_ms = 0;
+ timing->flags = image.timing_.flags;
+}
+
} // namespace
+// State for setting picture id and tl0 pic idx, for VP8 and VP9
+// TODO(nisse): Make these properties not codec specific.
+class PayloadRouter::RtpPayloadParams final {
+ public:
+ RtpPayloadParams(const uint32_t ssrc, const RtpPayloadState* state)
+ : ssrc_(ssrc) {
+ Random random(rtc::TimeMicros());
+ state_.picture_id =
+ state ? state->picture_id : (random.Rand<int16_t>() & 0x7FFF);
+ state_.tl0_pic_idx = state ? state->tl0_pic_idx : (random.Rand<uint8_t>());
+ }
+ ~RtpPayloadParams() {}
+
+ void Set(RTPVideoHeader* rtp_video_header, bool first_frame_in_picture) {
+ // Always set picture id. Set tl0_pic_idx iff temporal index is set.
+ if (first_frame_in_picture) {
+ state_.picture_id =
+ (static_cast<uint16_t>(state_.picture_id) + 1) & 0x7FFF;
+ }
+ if (rtp_video_header->codec == kVideoCodecVP8) {
+ rtp_video_header->vp8().pictureId = state_.picture_id;
+
+ if (rtp_video_header->vp8().temporalIdx != kNoTemporalIdx) {
+ if (rtp_video_header->vp8().temporalIdx == 0) {
+ ++state_.tl0_pic_idx;
+ }
+ rtp_video_header->vp8().tl0PicIdx = state_.tl0_pic_idx;
+ }
+ }
+ if (rtp_video_header->codec == kVideoCodecVP9) {
+ rtp_video_header->vp9().picture_id = state_.picture_id;
+
+ // Note that in the case that we have no temporal layers but we do have
+ // spatial layers, packets will carry layering info with a temporal_idx of
+ // zero, and we then have to set and increment tl0_pic_idx.
+ if (rtp_video_header->vp9().temporal_idx != kNoTemporalIdx ||
+ rtp_video_header->vp9().spatial_idx != kNoSpatialIdx) {
+ if (first_frame_in_picture &&
+ (rtp_video_header->vp9().temporal_idx == 0 ||
+ rtp_video_header->vp9().temporal_idx == kNoTemporalIdx)) {
+ ++state_.tl0_pic_idx;
+ }
+ rtp_video_header->vp9().tl0_pic_idx = state_.tl0_pic_idx;
+ }
+ }
+ }
+
+ uint32_t ssrc() const { return ssrc_; }
+
+ RtpPayloadState state() const { return state_; }
+
+ private:
+ const uint32_t ssrc_;
+ RtpPayloadState state_;
+};
+
PayloadRouter::PayloadRouter(const std::vector<RtpRtcp*>& rtp_modules,
const std::vector<uint32_t>& ssrcs,
int payload_type,
@@ -101,10 +231,25 @@
if (!active_)
return Result(Result::ERROR_SEND_FAILED);
- size_t stream_index = GetSimulcastIdx(codec_specific_info).value_or(0);
+ RTPVideoHeader rtp_video_header;
+ if (codec_specific_info)
+ CopyCodecSpecific(codec_specific_info, &rtp_video_header);
+
+ rtp_video_header.rotation = encoded_image.rotation_;
+ rtp_video_header.content_type = encoded_image.content_type_;
+ rtp_video_header.playout_delay = encoded_image.playout_delay_;
+
+ SetVideoTiming(&rtp_video_header.video_timing, encoded_image);
+
+ int stream_index = rtp_video_header.simulcastIdx;
RTC_DCHECK_LT(stream_index, rtp_modules_.size());
- RTPVideoHeader rtp_video_header = params_[stream_index].GetRtpVideoHeader(
- encoded_image, codec_specific_info);
+
+ // Sets picture id and tl0 pic idx.
+ const bool first_frame_in_picture =
+ (codec_specific_info && codec_specific_info->codecType == kVideoCodecVP9)
+ ? codec_specific_info->codecSpecific.VP9.first_frame_in_picture
+ : true;
+ params_[stream_index].Set(&rtp_video_header, first_frame_in_picture);
uint32_t frame_id;
if (!rtp_modules_[stream_index]->Sending()) {
@@ -129,16 +274,22 @@
// If spatial scalability is enabled, it is covered by a single stream.
rtp_modules_[0]->SetVideoBitrateAllocation(bitrate);
} else {
- std::vector<absl::optional<VideoBitrateAllocation>> layer_bitrates =
- bitrate.GetSimulcastAllocations();
// Simulcast is in use, split the VideoBitrateAllocation into one struct
// per rtp stream, moving over the temporal layer allocation.
- for (size_t i = 0; i < rtp_modules_.size(); ++i) {
- // The next spatial layer could be used if the current one is
- // inactive.
- if (layer_bitrates[i]) {
- rtp_modules_[i]->SetVideoBitrateAllocation(*layer_bitrates[i]);
+ for (size_t si = 0; si < rtp_modules_.size(); ++si) {
+ // Don't send empty TargetBitrate messages on streams not being relayed.
+ if (!bitrate.IsSpatialLayerUsed(si)) {
+ // The next spatial layer could be used if the current one is
+ // inactive.
+ continue;
}
+
+ VideoBitrateAllocation layer_bitrate;
+ for (int tl = 0; tl < kMaxTemporalStreams; ++tl) {
+ if (bitrate.HasBitrate(si, tl))
+ layer_bitrate.SetBitrate(0, tl, bitrate.GetBitrate(si, tl));
+ }
+ rtp_modules_[si]->SetVideoBitrateAllocation(layer_bitrate);
}
}
}
diff --git a/call/payload_router.h b/call/payload_router.h
index c62bc75..8885458 100644
--- a/call/payload_router.h
+++ b/call/payload_router.h
@@ -15,7 +15,6 @@
#include <vector>
#include "api/video_codecs/video_encoder.h"
-#include "call/rtp_payload_params.h"
#include "common_types.h" // NOLINT(build/include)
#include "modules/rtp_rtcp/source/rtp_video_header.h"
#include "rtc_base/constructormagic.h"
@@ -27,6 +26,12 @@
class RTPFragmentationHeader;
class RtpRtcp;
+// Currently only VP8/VP9 specific.
+struct RtpPayloadState {
+ int16_t picture_id = -1;
+ uint8_t tl0_pic_idx = 0;
+};
+
// PayloadRouter routes outgoing data to the correct sending RTP module, based
// on the simulcast layer in RTPVideoHeader.
class PayloadRouter : public EncodedImageCallback {
@@ -58,6 +63,8 @@
void OnBitrateAllocationUpdated(const VideoBitrateAllocation& bitrate);
private:
+ class RtpPayloadParams;
+
void UpdateModuleSendingState() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_);
rtc::CriticalSection crit_;
diff --git a/call/payload_router_unittest.cc b/call/payload_router_unittest.cc
index 9c3e1de..d6dfedb 100644
--- a/call/payload_router_unittest.cc
+++ b/call/payload_router_unittest.cc
@@ -31,6 +31,10 @@
const int8_t kPayloadType = 96;
const uint32_t kSsrc1 = 12345;
const uint32_t kSsrc2 = 23456;
+const uint32_t kSsrc3 = 34567;
+const int16_t kPictureId = 123;
+const int16_t kTl0PicIdx = 20;
+const uint8_t kTemporalIdx = 1;
const int16_t kInitialPictureId1 = 222;
const int16_t kInitialPictureId2 = 44;
const int16_t kInitialTl0PicIdx1 = 99;
@@ -216,6 +220,243 @@
.error);
}
+TEST(PayloadRouterTest, SimulcastTargetBitrate) {
+ NiceMock<MockRtpRtcp> rtp_1;
+ NiceMock<MockRtpRtcp> rtp_2;
+ std::vector<RtpRtcp*> modules = {&rtp_1, &rtp_2};
+
+ PayloadRouter payload_router(modules, {kSsrc1, kSsrc2}, kPayloadType, {});
+ payload_router.SetActive(true);
+
+ VideoBitrateAllocation bitrate;
+ bitrate.SetBitrate(0, 0, 10000);
+ bitrate.SetBitrate(0, 1, 20000);
+ bitrate.SetBitrate(1, 0, 40000);
+ bitrate.SetBitrate(1, 1, 80000);
+
+ VideoBitrateAllocation layer0_bitrate;
+ layer0_bitrate.SetBitrate(0, 0, 10000);
+ layer0_bitrate.SetBitrate(0, 1, 20000);
+
+ VideoBitrateAllocation layer1_bitrate;
+ layer1_bitrate.SetBitrate(0, 0, 40000);
+ layer1_bitrate.SetBitrate(0, 1, 80000);
+
+ EXPECT_CALL(rtp_1, SetVideoBitrateAllocation(layer0_bitrate)).Times(1);
+ EXPECT_CALL(rtp_2, SetVideoBitrateAllocation(layer1_bitrate)).Times(1);
+
+ payload_router.OnBitrateAllocationUpdated(bitrate);
+}
+
+// If the middle of three streams is inactive the first and last streams should
+// be asked to send the TargetBitrate message.
+TEST(PayloadRouterTest, SimulcastTargetBitrateWithInactiveStream) {
+ // Set up three active rtp modules.
+ NiceMock<MockRtpRtcp> rtp_1;
+ NiceMock<MockRtpRtcp> rtp_2;
+ NiceMock<MockRtpRtcp> rtp_3;
+ std::vector<RtpRtcp*> modules = {&rtp_1, &rtp_2, &rtp_3};
+ PayloadRouter payload_router(modules, {kSsrc1, kSsrc2, kSsrc3}, kPayloadType,
+ {});
+ payload_router.SetActive(true);
+
+ // Create bitrate allocation with bitrate only for the first and third stream.
+ VideoBitrateAllocation bitrate;
+ bitrate.SetBitrate(0, 0, 10000);
+ bitrate.SetBitrate(0, 1, 20000);
+ bitrate.SetBitrate(2, 0, 40000);
+ bitrate.SetBitrate(2, 1, 80000);
+
+ VideoBitrateAllocation layer0_bitrate;
+ layer0_bitrate.SetBitrate(0, 0, 10000);
+ layer0_bitrate.SetBitrate(0, 1, 20000);
+
+ VideoBitrateAllocation layer2_bitrate;
+ layer2_bitrate.SetBitrate(0, 0, 40000);
+ layer2_bitrate.SetBitrate(0, 1, 80000);
+
+ // Expect the first and third rtp module to be asked to send a TargetBitrate
+ // message. (No target bitrate with 0bps sent from the second one.)
+ EXPECT_CALL(rtp_1, SetVideoBitrateAllocation(layer0_bitrate)).Times(1);
+ EXPECT_CALL(rtp_2, SetVideoBitrateAllocation(_)).Times(0);
+ EXPECT_CALL(rtp_3, SetVideoBitrateAllocation(layer2_bitrate)).Times(1);
+
+ payload_router.OnBitrateAllocationUpdated(bitrate);
+}
+
+TEST(PayloadRouterTest, SvcTargetBitrate) {
+ NiceMock<MockRtpRtcp> rtp_1;
+ std::vector<RtpRtcp*> modules = {&rtp_1};
+ PayloadRouter payload_router(modules, {kSsrc1}, kPayloadType, {});
+ payload_router.SetActive(true);
+
+ VideoBitrateAllocation bitrate;
+ bitrate.SetBitrate(0, 0, 10000);
+ bitrate.SetBitrate(0, 1, 20000);
+ bitrate.SetBitrate(1, 0, 40000);
+ bitrate.SetBitrate(1, 1, 80000);
+
+ EXPECT_CALL(rtp_1, SetVideoBitrateAllocation(bitrate)).Times(1);
+
+ payload_router.OnBitrateAllocationUpdated(bitrate);
+}
+
+TEST(PayloadRouterTest, InfoMappedToRtpVideoHeader_Vp8) {
+ NiceMock<MockRtpRtcp> rtp1;
+ NiceMock<MockRtpRtcp> rtp2;
+ std::vector<RtpRtcp*> modules = {&rtp1, &rtp2};
+ RtpPayloadState state2;
+ state2.picture_id = kPictureId;
+ state2.tl0_pic_idx = kTl0PicIdx;
+ std::map<uint32_t, RtpPayloadState> states = {{kSsrc2, state2}};
+
+ PayloadRouter payload_router(modules, {kSsrc1, kSsrc2}, kPayloadType, states);
+ payload_router.SetActive(true);
+
+ EncodedImage encoded_image;
+ encoded_image.rotation_ = kVideoRotation_90;
+ encoded_image.content_type_ = VideoContentType::SCREENSHARE;
+
+ CodecSpecificInfo codec_info;
+ memset(&codec_info, 0, sizeof(CodecSpecificInfo));
+ codec_info.codecType = kVideoCodecVP8;
+ codec_info.codecSpecific.VP8.simulcastIdx = 1;
+ codec_info.codecSpecific.VP8.temporalIdx = kTemporalIdx;
+ codec_info.codecSpecific.VP8.keyIdx = kNoKeyIdx;
+ codec_info.codecSpecific.VP8.layerSync = true;
+ codec_info.codecSpecific.VP8.nonReference = true;
+
+ EXPECT_CALL(rtp2, Sending()).WillOnce(Return(true));
+ EXPECT_CALL(rtp2, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
+ .WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
+ Unused, const RTPVideoHeader* header, Unused) {
+ EXPECT_EQ(kVideoRotation_90, header->rotation);
+ EXPECT_EQ(VideoContentType::SCREENSHARE, header->content_type);
+ EXPECT_EQ(1, header->simulcastIdx);
+ EXPECT_EQ(kVideoCodecVP8, header->codec);
+ EXPECT_EQ(kPictureId + 1, header->vp8().pictureId);
+ EXPECT_EQ(kTemporalIdx, header->vp8().temporalIdx);
+ EXPECT_EQ(kTl0PicIdx, header->vp8().tl0PicIdx);
+ EXPECT_EQ(kNoKeyIdx, header->vp8().keyIdx);
+ EXPECT_TRUE(header->vp8().layerSync);
+ EXPECT_TRUE(header->vp8().nonReference);
+ return true;
+ }));
+
+ EXPECT_EQ(
+ EncodedImageCallback::Result::OK,
+ payload_router.OnEncodedImage(encoded_image, &codec_info, nullptr).error);
+}
+
+TEST(PayloadRouterTest, InfoMappedToRtpVideoHeader_Vp9) {
+ RtpPayloadState state;
+ state.picture_id = kPictureId;
+ state.tl0_pic_idx = kTl0PicIdx;
+ std::map<uint32_t, RtpPayloadState> states = {{kSsrc1, state}};
+
+ NiceMock<MockRtpRtcp> rtp;
+ std::vector<RtpRtcp*> modules = {&rtp};
+ PayloadRouter router(modules, {kSsrc1}, kPayloadType, states);
+ router.SetActive(true);
+
+ EncodedImage encoded_image;
+ encoded_image.rotation_ = kVideoRotation_90;
+ encoded_image.content_type_ = VideoContentType::SCREENSHARE;
+
+ CodecSpecificInfo codec_info;
+ memset(&codec_info, 0, sizeof(CodecSpecificInfo));
+ codec_info.codecType = kVideoCodecVP9;
+ codec_info.codecSpecific.VP9.num_spatial_layers = 3;
+ codec_info.codecSpecific.VP9.first_frame_in_picture = true;
+ codec_info.codecSpecific.VP9.spatial_idx = 0;
+ codec_info.codecSpecific.VP9.temporal_idx = 2;
+ codec_info.codecSpecific.VP9.end_of_picture = false;
+
+ EXPECT_CALL(rtp, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
+ .WillOnce(
+ Invoke([&codec_info](Unused, Unused, Unused, Unused, Unused, Unused,
+ Unused, const RTPVideoHeader* header, Unused) {
+ EXPECT_EQ(kVideoRotation_90, header->rotation);
+ EXPECT_EQ(VideoContentType::SCREENSHARE, header->content_type);
+ EXPECT_EQ(kVideoCodecVP9, header->codec);
+ EXPECT_EQ(kPictureId + 1, header->vp9().picture_id);
+ EXPECT_EQ(kTl0PicIdx, header->vp9().tl0_pic_idx);
+ EXPECT_EQ(header->vp9().temporal_idx,
+ codec_info.codecSpecific.VP9.temporal_idx);
+ EXPECT_EQ(header->vp9().spatial_idx,
+ codec_info.codecSpecific.VP9.spatial_idx);
+ EXPECT_EQ(header->vp9().num_spatial_layers,
+ codec_info.codecSpecific.VP9.num_spatial_layers);
+ EXPECT_EQ(header->vp9().end_of_picture,
+ codec_info.codecSpecific.VP9.end_of_picture);
+ return true;
+ }));
+ EXPECT_CALL(rtp, Sending()).WillOnce(Return(true));
+
+ EXPECT_EQ(EncodedImageCallback::Result::OK,
+ router.OnEncodedImage(encoded_image, &codec_info, nullptr).error);
+
+ // Next spatial layer.
+ codec_info.codecSpecific.VP9.first_frame_in_picture = false;
+ codec_info.codecSpecific.VP9.spatial_idx += 1;
+ codec_info.codecSpecific.VP9.end_of_picture = true;
+
+ EXPECT_CALL(rtp, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
+ .WillOnce(
+ Invoke([&codec_info](Unused, Unused, Unused, Unused, Unused, Unused,
+ Unused, const RTPVideoHeader* header, Unused) {
+ EXPECT_EQ(kVideoRotation_90, header->rotation);
+ EXPECT_EQ(VideoContentType::SCREENSHARE, header->content_type);
+ EXPECT_EQ(kVideoCodecVP9, header->codec);
+ EXPECT_EQ(kPictureId + 1, header->vp9().picture_id);
+ EXPECT_EQ(kTl0PicIdx, header->vp9().tl0_pic_idx);
+ EXPECT_EQ(header->vp9().temporal_idx,
+ codec_info.codecSpecific.VP9.temporal_idx);
+ EXPECT_EQ(header->vp9().spatial_idx,
+ codec_info.codecSpecific.VP9.spatial_idx);
+ EXPECT_EQ(header->vp9().num_spatial_layers,
+ codec_info.codecSpecific.VP9.num_spatial_layers);
+ EXPECT_EQ(header->vp9().end_of_picture,
+ codec_info.codecSpecific.VP9.end_of_picture);
+ return true;
+ }));
+ EXPECT_CALL(rtp, Sending()).WillOnce(Return(true));
+
+ EXPECT_EQ(EncodedImageCallback::Result::OK,
+ router.OnEncodedImage(encoded_image, &codec_info, nullptr).error);
+}
+
+TEST(PayloadRouterTest, InfoMappedToRtpVideoHeader_H264) {
+ NiceMock<MockRtpRtcp> rtp1;
+ std::vector<RtpRtcp*> modules = {&rtp1};
+ PayloadRouter payload_router(modules, {kSsrc1}, kPayloadType, {});
+ payload_router.SetActive(true);
+
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_info;
+ memset(&codec_info, 0, sizeof(CodecSpecificInfo));
+ codec_info.codecType = kVideoCodecH264;
+ codec_info.codecSpecific.H264.packetization_mode =
+ H264PacketizationMode::SingleNalUnit;
+
+ EXPECT_CALL(rtp1, Sending()).WillOnce(Return(true));
+ EXPECT_CALL(rtp1, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
+ .WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
+ Unused, const RTPVideoHeader* header, Unused) {
+ EXPECT_EQ(0, header->simulcastIdx);
+ EXPECT_EQ(kVideoCodecH264, header->codec);
+ const auto& h264 =
+ absl::get<RTPVideoHeaderH264>(header->video_type_header);
+ EXPECT_EQ(H264PacketizationMode::SingleNalUnit,
+ h264.packetization_mode);
+ return true;
+ }));
+
+ EXPECT_EQ(
+ EncodedImageCallback::Result::OK,
+ payload_router.OnEncodedImage(encoded_image, &codec_info, nullptr).error);
+}
+
TEST(PayloadRouterTest, CreateWithNoPreviousStates) {
NiceMock<MockRtpRtcp> rtp1;
NiceMock<MockRtpRtcp> rtp2;
@@ -254,4 +495,227 @@
EXPECT_EQ(kInitialPictureId2, initial_states[kSsrc2].picture_id);
EXPECT_EQ(kInitialTl0PicIdx2, initial_states[kSsrc2].tl0_pic_idx);
}
+
+TEST(PayloadRouterTest, PictureIdIsSetForVp8) {
+ RtpPayloadState state1;
+ state1.picture_id = kInitialPictureId1;
+ state1.tl0_pic_idx = kInitialTl0PicIdx1;
+ RtpPayloadState state2;
+ state2.picture_id = kInitialPictureId2;
+ state2.tl0_pic_idx = kInitialTl0PicIdx2;
+ std::map<uint32_t, RtpPayloadState> states = {{kSsrc1, state1},
+ {kSsrc2, state2}};
+
+ NiceMock<MockRtpRtcp> rtp1;
+ NiceMock<MockRtpRtcp> rtp2;
+ std::vector<RtpRtcp*> modules = {&rtp1, &rtp2};
+ PayloadRouter router(modules, {kSsrc1, kSsrc2}, kPayloadType, states);
+ router.SetActive(true);
+
+ EncodedImage encoded_image;
+ // Modules are sending for this test.
+ // OnEncodedImage, simulcastIdx: 0.
+ CodecSpecificInfo codec_info;
+ memset(&codec_info, 0, sizeof(CodecSpecificInfo));
+ codec_info.codecType = kVideoCodecVP8;
+ codec_info.codecSpecific.VP8.simulcastIdx = 0;
+
+ EXPECT_CALL(rtp1, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
+ .WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
+ Unused, const RTPVideoHeader* header, Unused) {
+ EXPECT_EQ(kVideoCodecVP8, header->codec);
+ EXPECT_EQ(kInitialPictureId1 + 1, header->vp8().pictureId);
+ return true;
+ }));
+ EXPECT_CALL(rtp1, Sending()).WillOnce(Return(true));
+
+ EXPECT_EQ(EncodedImageCallback::Result::OK,
+ router.OnEncodedImage(encoded_image, &codec_info, nullptr).error);
+
+ // OnEncodedImage, simulcastIdx: 1.
+ codec_info.codecSpecific.VP8.simulcastIdx = 1;
+
+ EXPECT_CALL(rtp2, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
+ .WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
+ Unused, const RTPVideoHeader* header, Unused) {
+ EXPECT_EQ(kVideoCodecVP8, header->codec);
+ EXPECT_EQ(kInitialPictureId2 + 1, header->vp8().pictureId);
+ return true;
+ }));
+ EXPECT_CALL(rtp2, Sending()).WillOnce(Return(true));
+
+ EXPECT_EQ(EncodedImageCallback::Result::OK,
+ router.OnEncodedImage(encoded_image, &codec_info, nullptr).error);
+
+ // State should hold latest used picture id and tl0_pic_idx.
+ states = router.GetRtpPayloadStates();
+ EXPECT_EQ(2u, states.size());
+ EXPECT_EQ(kInitialPictureId1 + 1, states[kSsrc1].picture_id);
+ EXPECT_EQ(kInitialTl0PicIdx1 + 1, states[kSsrc1].tl0_pic_idx);
+ EXPECT_EQ(kInitialPictureId2 + 1, states[kSsrc2].picture_id);
+ EXPECT_EQ(kInitialTl0PicIdx2 + 1, states[kSsrc2].tl0_pic_idx);
+}
+
+TEST(PayloadRouterTest, PictureIdWraps) {
+ RtpPayloadState state1;
+ state1.picture_id = kMaxTwoBytePictureId;
+ state1.tl0_pic_idx = kInitialTl0PicIdx1;
+
+ NiceMock<MockRtpRtcp> rtp;
+ std::vector<RtpRtcp*> modules = {&rtp};
+ PayloadRouter router(modules, {kSsrc1}, kPayloadType, {{kSsrc1, state1}});
+ router.SetActive(true);
+
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_info;
+ memset(&codec_info, 0, sizeof(CodecSpecificInfo));
+ codec_info.codecType = kVideoCodecVP8;
+ codec_info.codecSpecific.VP8.temporalIdx = kNoTemporalIdx;
+
+ EXPECT_CALL(rtp, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
+ .WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
+ Unused, const RTPVideoHeader* header, Unused) {
+ EXPECT_EQ(kVideoCodecVP8, header->codec);
+ EXPECT_EQ(0, header->vp8().pictureId);
+ return true;
+ }));
+ EXPECT_CALL(rtp, Sending()).WillOnce(Return(true));
+
+ EXPECT_EQ(EncodedImageCallback::Result::OK,
+ router.OnEncodedImage(encoded_image, &codec_info, nullptr).error);
+
+ // State should hold latest used picture id and tl0_pic_idx.
+ std::map<uint32_t, RtpPayloadState> states = router.GetRtpPayloadStates();
+ EXPECT_EQ(1u, states.size());
+ EXPECT_EQ(0, states[kSsrc1].picture_id); // Wrapped.
+ EXPECT_EQ(kInitialTl0PicIdx1, states[kSsrc1].tl0_pic_idx);
+}
+
+TEST(PayloadRouterTest, Tl0PicIdxUpdatedForVp8) {
+ RtpPayloadState state;
+ state.picture_id = kInitialPictureId1;
+ state.tl0_pic_idx = kInitialTl0PicIdx1;
+ std::map<uint32_t, RtpPayloadState> states = {{kSsrc1, state}};
+
+ NiceMock<MockRtpRtcp> rtp;
+ std::vector<RtpRtcp*> modules = {&rtp};
+ PayloadRouter router(modules, {kSsrc1}, kPayloadType, states);
+ router.SetActive(true);
+
+ EncodedImage encoded_image;
+ // Modules are sending for this test.
+ // OnEncodedImage, temporalIdx: 1.
+ CodecSpecificInfo codec_info;
+ memset(&codec_info, 0, sizeof(CodecSpecificInfo));
+ codec_info.codecType = kVideoCodecVP8;
+ codec_info.codecSpecific.VP8.temporalIdx = 1;
+
+ EXPECT_CALL(rtp, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
+ .WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
+ Unused, const RTPVideoHeader* header, Unused) {
+ EXPECT_EQ(kVideoCodecVP8, header->codec);
+ EXPECT_EQ(kInitialPictureId1 + 1, header->vp8().pictureId);
+ EXPECT_EQ(kInitialTl0PicIdx1, header->vp8().tl0PicIdx);
+ return true;
+ }));
+ EXPECT_CALL(rtp, Sending()).WillOnce(Return(true));
+
+ EXPECT_EQ(EncodedImageCallback::Result::OK,
+ router.OnEncodedImage(encoded_image, &codec_info, nullptr).error);
+
+ // OnEncodedImage, temporalIdx: 0.
+ codec_info.codecSpecific.VP8.temporalIdx = 0;
+
+ EXPECT_CALL(rtp, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
+ .WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
+ Unused, const RTPVideoHeader* header, Unused) {
+ EXPECT_EQ(kVideoCodecVP8, header->codec);
+ EXPECT_EQ(kInitialPictureId1 + 2, header->vp8().pictureId);
+ EXPECT_EQ(kInitialTl0PicIdx1 + 1, header->vp8().tl0PicIdx);
+ return true;
+ }));
+ EXPECT_CALL(rtp, Sending()).WillOnce(Return(true));
+
+ EXPECT_EQ(EncodedImageCallback::Result::OK,
+ router.OnEncodedImage(encoded_image, &codec_info, nullptr).error);
+
+ // State should hold latest used picture id and tl0_pic_idx.
+ states = router.GetRtpPayloadStates();
+ EXPECT_EQ(1u, states.size());
+ EXPECT_EQ(kInitialPictureId1 + 2, states[kSsrc1].picture_id);
+ EXPECT_EQ(kInitialTl0PicIdx1 + 1, states[kSsrc1].tl0_pic_idx);
+}
+
+TEST(PayloadRouterTest, Tl0PicIdxUpdatedForVp9) {
+ RtpPayloadState state;
+ state.picture_id = kInitialPictureId1;
+ state.tl0_pic_idx = kInitialTl0PicIdx1;
+ std::map<uint32_t, RtpPayloadState> states = {{kSsrc1, state}};
+
+ NiceMock<MockRtpRtcp> rtp;
+ std::vector<RtpRtcp*> modules = {&rtp};
+ PayloadRouter router(modules, {kSsrc1}, kPayloadType, states);
+ router.SetActive(true);
+
+ EncodedImage encoded_image;
+ // Modules are sending for this test.
+ // OnEncodedImage, temporalIdx: 1.
+ CodecSpecificInfo codec_info;
+ memset(&codec_info, 0, sizeof(CodecSpecificInfo));
+ codec_info.codecType = kVideoCodecVP9;
+ codec_info.codecSpecific.VP9.temporal_idx = 1;
+ codec_info.codecSpecific.VP9.first_frame_in_picture = true;
+
+ EXPECT_CALL(rtp, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
+ .WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
+ Unused, const RTPVideoHeader* header, Unused) {
+ EXPECT_EQ(kVideoCodecVP9, header->codec);
+ EXPECT_EQ(kInitialPictureId1 + 1, header->vp9().picture_id);
+ EXPECT_EQ(kInitialTl0PicIdx1, header->vp9().tl0_pic_idx);
+ return true;
+ }));
+ EXPECT_CALL(rtp, Sending()).WillOnce(Return(true));
+
+ EXPECT_EQ(EncodedImageCallback::Result::OK,
+ router.OnEncodedImage(encoded_image, &codec_info, nullptr).error);
+
+ // OnEncodedImage, temporalIdx: 0.
+ codec_info.codecSpecific.VP9.temporal_idx = 0;
+
+ EXPECT_CALL(rtp, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
+ .WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
+ Unused, const RTPVideoHeader* header, Unused) {
+ EXPECT_EQ(kVideoCodecVP9, header->codec);
+ EXPECT_EQ(kInitialPictureId1 + 2, header->vp9().picture_id);
+ EXPECT_EQ(kInitialTl0PicIdx1 + 1, header->vp9().tl0_pic_idx);
+ return true;
+ }));
+ EXPECT_CALL(rtp, Sending()).WillOnce(Return(true));
+
+ EXPECT_EQ(EncodedImageCallback::Result::OK,
+ router.OnEncodedImage(encoded_image, &codec_info, nullptr).error);
+
+ // OnEncodedImage, first_frame_in_picture = false
+ codec_info.codecSpecific.VP9.first_frame_in_picture = false;
+
+ EXPECT_CALL(rtp, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
+ .WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
+ Unused, const RTPVideoHeader* header, Unused) {
+ EXPECT_EQ(kVideoCodecVP9, header->codec);
+ EXPECT_EQ(kInitialPictureId1 + 2, header->vp9().picture_id);
+ EXPECT_EQ(kInitialTl0PicIdx1 + 1, header->vp9().tl0_pic_idx);
+ return true;
+ }));
+ EXPECT_CALL(rtp, Sending()).WillOnce(Return(true));
+
+ EXPECT_EQ(EncodedImageCallback::Result::OK,
+ router.OnEncodedImage(encoded_image, &codec_info, nullptr).error);
+
+ // State should hold latest used picture id and tl0_pic_idx.
+ states = router.GetRtpPayloadStates();
+ EXPECT_EQ(1u, states.size());
+ EXPECT_EQ(kInitialPictureId1 + 2, states[kSsrc1].picture_id);
+ EXPECT_EQ(kInitialTl0PicIdx1 + 1, states[kSsrc1].tl0_pic_idx);
+}
+
} // namespace webrtc
diff --git a/call/rtp_payload_params.cc b/call/rtp_payload_params.cc
deleted file mode 100644
index d0915cc..0000000
--- a/call/rtp_payload_params.cc
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "call/rtp_payload_params.h"
-
-#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
-#include "modules/video_coding/include/video_codec_interface.h"
-#include "rtc_base/checks.h"
-#include "rtc_base/random.h"
-#include "rtc_base/timeutils.h"
-
-namespace webrtc {
-
-namespace {
-void PopulateRtpWithCodecSpecifics(const CodecSpecificInfo& info,
- RTPVideoHeader* rtp) {
- rtp->codec = info.codecType;
- switch (info.codecType) {
- case kVideoCodecVP8: {
- rtp->vp8().InitRTPVideoHeaderVP8();
- rtp->vp8().nonReference = info.codecSpecific.VP8.nonReference;
- rtp->vp8().temporalIdx = info.codecSpecific.VP8.temporalIdx;
- rtp->vp8().layerSync = info.codecSpecific.VP8.layerSync;
- rtp->vp8().keyIdx = info.codecSpecific.VP8.keyIdx;
- rtp->simulcastIdx = info.codecSpecific.VP8.simulcastIdx;
- return;
- }
- case kVideoCodecVP9: {
- rtp->vp9().InitRTPVideoHeaderVP9();
- rtp->vp9().inter_pic_predicted =
- info.codecSpecific.VP9.inter_pic_predicted;
- rtp->vp9().flexible_mode = info.codecSpecific.VP9.flexible_mode;
- rtp->vp9().ss_data_available = info.codecSpecific.VP9.ss_data_available;
- rtp->vp9().non_ref_for_inter_layer_pred =
- info.codecSpecific.VP9.non_ref_for_inter_layer_pred;
- rtp->vp9().temporal_idx = info.codecSpecific.VP9.temporal_idx;
- rtp->vp9().spatial_idx = info.codecSpecific.VP9.spatial_idx;
- rtp->vp9().temporal_up_switch = info.codecSpecific.VP9.temporal_up_switch;
- rtp->vp9().inter_layer_predicted =
- info.codecSpecific.VP9.inter_layer_predicted;
- rtp->vp9().gof_idx = info.codecSpecific.VP9.gof_idx;
- rtp->vp9().num_spatial_layers = info.codecSpecific.VP9.num_spatial_layers;
-
- if (info.codecSpecific.VP9.ss_data_available) {
- rtp->vp9().spatial_layer_resolution_present =
- info.codecSpecific.VP9.spatial_layer_resolution_present;
- if (info.codecSpecific.VP9.spatial_layer_resolution_present) {
- for (size_t i = 0; i < info.codecSpecific.VP9.num_spatial_layers;
- ++i) {
- rtp->vp9().width[i] = info.codecSpecific.VP9.width[i];
- rtp->vp9().height[i] = info.codecSpecific.VP9.height[i];
- }
- }
- rtp->vp9().gof.CopyGofInfoVP9(info.codecSpecific.VP9.gof);
- }
-
- rtp->vp9().num_ref_pics = info.codecSpecific.VP9.num_ref_pics;
- for (int i = 0; i < info.codecSpecific.VP9.num_ref_pics; ++i) {
- rtp->vp9().pid_diff[i] = info.codecSpecific.VP9.p_diff[i];
- }
- rtp->vp9().end_of_picture = info.codecSpecific.VP9.end_of_picture;
- return;
- }
- case kVideoCodecH264: {
- auto& h264_header = rtp->video_type_header.emplace<RTPVideoHeaderH264>();
- h264_header.packetization_mode =
- info.codecSpecific.H264.packetization_mode;
- rtp->simulcastIdx = info.codecSpecific.H264.simulcast_idx;
- return;
- }
- case kVideoCodecMultiplex:
- case kVideoCodecGeneric:
- rtp->codec = kVideoCodecGeneric;
- rtp->simulcastIdx = info.codecSpecific.generic.simulcast_idx;
- return;
- default:
- return;
- }
-}
-
-void SetVideoTiming(const EncodedImage& image, VideoSendTiming* timing) {
- if (image.timing_.flags == VideoSendTiming::TimingFrameFlags::kInvalid ||
- image.timing_.flags == VideoSendTiming::TimingFrameFlags::kNotTriggered) {
- timing->flags = VideoSendTiming::TimingFrameFlags::kInvalid;
- return;
- }
-
- timing->encode_start_delta_ms = VideoSendTiming::GetDeltaCappedMs(
- image.capture_time_ms_, image.timing_.encode_start_ms);
- timing->encode_finish_delta_ms = VideoSendTiming::GetDeltaCappedMs(
- image.capture_time_ms_, image.timing_.encode_finish_ms);
- timing->packetization_finish_delta_ms = 0;
- timing->pacer_exit_delta_ms = 0;
- timing->network_timestamp_delta_ms = 0;
- timing->network2_timestamp_delta_ms = 0;
- timing->flags = image.timing_.flags;
-}
-} // namespace
-
-RtpPayloadParams::RtpPayloadParams(const uint32_t ssrc,
- const RtpPayloadState* state)
- : ssrc_(ssrc) {
- Random random(rtc::TimeMicros());
- state_.picture_id =
- state ? state->picture_id : (random.Rand<int16_t>() & 0x7FFF);
- state_.tl0_pic_idx = state ? state->tl0_pic_idx : (random.Rand<uint8_t>());
-}
-RtpPayloadParams::~RtpPayloadParams() {}
-
-RTPVideoHeader RtpPayloadParams::GetRtpVideoHeader(
- const EncodedImage& image,
- const CodecSpecificInfo* codec_specific_info) {
- RTPVideoHeader rtp_video_header;
- if (codec_specific_info) {
- PopulateRtpWithCodecSpecifics(*codec_specific_info, &rtp_video_header);
- }
- rtp_video_header.rotation = image.rotation_;
- rtp_video_header.content_type = image.content_type_;
- rtp_video_header.playout_delay = image.playout_delay_;
-
- SetVideoTiming(image, &rtp_video_header.video_timing);
-
- // Sets picture id and tl0 pic idx.
- const bool first_frame_in_picture =
- (codec_specific_info && codec_specific_info->codecType == kVideoCodecVP9)
- ? codec_specific_info->codecSpecific.VP9.first_frame_in_picture
- : true;
- Set(&rtp_video_header, first_frame_in_picture);
- return rtp_video_header;
-}
-
-uint32_t RtpPayloadParams::ssrc() const {
- return ssrc_;
-}
-
-RtpPayloadState RtpPayloadParams::state() const {
- return state_;
-}
-
-void RtpPayloadParams::Set(RTPVideoHeader* rtp_video_header,
- bool first_frame_in_picture) {
- // Always set picture id. Set tl0_pic_idx iff temporal index is set.
- if (first_frame_in_picture) {
- state_.picture_id = (static_cast<uint16_t>(state_.picture_id) + 1) & 0x7FFF;
- }
- if (rtp_video_header->codec == kVideoCodecVP8) {
- rtp_video_header->vp8().pictureId = state_.picture_id;
-
- if (rtp_video_header->vp8().temporalIdx != kNoTemporalIdx) {
- if (rtp_video_header->vp8().temporalIdx == 0) {
- ++state_.tl0_pic_idx;
- }
- rtp_video_header->vp8().tl0PicIdx = state_.tl0_pic_idx;
- }
- }
- if (rtp_video_header->codec == kVideoCodecVP9) {
- rtp_video_header->vp9().picture_id = state_.picture_id;
-
- // Note that in the case that we have no temporal layers but we do have
- // spatial layers, packets will carry layering info with a temporal_idx of
- // zero, and we then have to set and increment tl0_pic_idx.
- if (rtp_video_header->vp9().temporal_idx != kNoTemporalIdx ||
- rtp_video_header->vp9().spatial_idx != kNoSpatialIdx) {
- if (first_frame_in_picture &&
- (rtp_video_header->vp9().temporal_idx == 0 ||
- rtp_video_header->vp9().temporal_idx == kNoTemporalIdx)) {
- ++state_.tl0_pic_idx;
- }
- rtp_video_header->vp9().tl0_pic_idx = state_.tl0_pic_idx;
- }
- }
-}
-} // namespace webrtc
diff --git a/call/rtp_payload_params.h b/call/rtp_payload_params.h
deleted file mode 100644
index b85fb42..0000000
--- a/call/rtp_payload_params.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef CALL_RTP_PAYLOAD_PARAMS_H_
-#define CALL_RTP_PAYLOAD_PARAMS_H_
-
-#include <map>
-#include <vector>
-
-#include "api/video_codecs/video_encoder.h"
-#include "common_types.h" // NOLINT(build/include)
-#include "modules/rtp_rtcp/source/rtp_video_header.h"
-
-namespace webrtc {
-
-class RTPFragmentationHeader;
-class RtpRtcp;
-
-// Currently only VP8/VP9 specific.
-struct RtpPayloadState {
- int16_t picture_id = -1;
- uint8_t tl0_pic_idx = 0;
-};
-
-// State for setting picture id and tl0 pic idx, for VP8 and VP9
-// TODO(nisse): Make these properties not codec specific.
-class RtpPayloadParams final {
- public:
- RtpPayloadParams(const uint32_t ssrc, const RtpPayloadState* state);
- ~RtpPayloadParams();
-
- RTPVideoHeader GetRtpVideoHeader(
- const EncodedImage& image,
- const CodecSpecificInfo* codec_specific_info);
-
- uint32_t ssrc() const;
-
- RtpPayloadState state() const;
-
- private:
- void Set(RTPVideoHeader* rtp_video_header, bool first_frame_in_picture);
-
- const uint32_t ssrc_;
- RtpPayloadState state_;
-};
-} // namespace webrtc
-#endif // CALL_RTP_PAYLOAD_PARAMS_H_
diff --git a/call/rtp_payload_params_unittest.cc b/call/rtp_payload_params_unittest.cc
deleted file mode 100644
index cdfbc70..0000000
--- a/call/rtp_payload_params_unittest.cc
+++ /dev/null
@@ -1,254 +0,0 @@
-/*
- * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <memory>
-
-#include "call/payload_router.h"
-#include "modules/video_coding/include/video_codec_interface.h"
-#include "test/gtest.h"
-
-namespace webrtc {
-namespace {
-const uint32_t kSsrc1 = 12345;
-const uint32_t kSsrc2 = 23456;
-const int16_t kPictureId = 123;
-const int16_t kTl0PicIdx = 20;
-const uint8_t kTemporalIdx = 1;
-const int16_t kInitialPictureId1 = 222;
-const int16_t kInitialTl0PicIdx1 = 99;
-} // namespace
-
-TEST(RtpPayloadParamsTest, InfoMappedToRtpVideoHeader_Vp8) {
- RtpPayloadState state2;
- state2.picture_id = kPictureId;
- state2.tl0_pic_idx = kTl0PicIdx;
- std::map<uint32_t, RtpPayloadState> states = {{kSsrc2, state2}};
-
- RtpPayloadParams params(kSsrc2, &state2);
- EncodedImage encoded_image;
- encoded_image.rotation_ = kVideoRotation_90;
- encoded_image.content_type_ = VideoContentType::SCREENSHARE;
-
- CodecSpecificInfo codec_info;
- memset(&codec_info, 0, sizeof(CodecSpecificInfo));
- codec_info.codecType = kVideoCodecVP8;
- codec_info.codecSpecific.VP8.simulcastIdx = 1;
- codec_info.codecSpecific.VP8.temporalIdx = kTemporalIdx;
- codec_info.codecSpecific.VP8.keyIdx = kNoKeyIdx;
- codec_info.codecSpecific.VP8.layerSync = true;
- codec_info.codecSpecific.VP8.nonReference = true;
-
- RTPVideoHeader header = params.GetRtpVideoHeader(encoded_image, &codec_info);
-
- EXPECT_EQ(kVideoRotation_90, header.rotation);
- EXPECT_EQ(VideoContentType::SCREENSHARE, header.content_type);
- EXPECT_EQ(1, header.simulcastIdx);
- EXPECT_EQ(kVideoCodecVP8, header.codec);
- EXPECT_EQ(kPictureId + 1, header.vp8().pictureId);
- EXPECT_EQ(kTemporalIdx, header.vp8().temporalIdx);
- EXPECT_EQ(kTl0PicIdx, header.vp8().tl0PicIdx);
- EXPECT_EQ(kNoKeyIdx, header.vp8().keyIdx);
- EXPECT_TRUE(header.vp8().layerSync);
- EXPECT_TRUE(header.vp8().nonReference);
-}
-
-TEST(RtpPayloadParamsTest, InfoMappedToRtpVideoHeader_Vp9) {
- RtpPayloadState state;
- state.picture_id = kPictureId;
- state.tl0_pic_idx = kTl0PicIdx;
- RtpPayloadParams params(kSsrc1, &state);
-
- EncodedImage encoded_image;
- encoded_image.rotation_ = kVideoRotation_90;
- encoded_image.content_type_ = VideoContentType::SCREENSHARE;
-
- CodecSpecificInfo codec_info;
- memset(&codec_info, 0, sizeof(CodecSpecificInfo));
- codec_info.codecType = kVideoCodecVP9;
- codec_info.codecSpecific.VP9.num_spatial_layers = 3;
- codec_info.codecSpecific.VP9.first_frame_in_picture = true;
- codec_info.codecSpecific.VP9.spatial_idx = 0;
- codec_info.codecSpecific.VP9.temporal_idx = 2;
- codec_info.codecSpecific.VP9.end_of_picture = false;
-
- RTPVideoHeader header = params.GetRtpVideoHeader(encoded_image, &codec_info);
-
- EXPECT_EQ(kVideoRotation_90, header.rotation);
- EXPECT_EQ(VideoContentType::SCREENSHARE, header.content_type);
- EXPECT_EQ(kVideoCodecVP9, header.codec);
- EXPECT_EQ(kPictureId + 1, header.vp9().picture_id);
- EXPECT_EQ(kTl0PicIdx, header.vp9().tl0_pic_idx);
- EXPECT_EQ(header.vp9().temporal_idx,
- codec_info.codecSpecific.VP9.temporal_idx);
- EXPECT_EQ(header.vp9().spatial_idx, codec_info.codecSpecific.VP9.spatial_idx);
- EXPECT_EQ(header.vp9().num_spatial_layers,
- codec_info.codecSpecific.VP9.num_spatial_layers);
- EXPECT_EQ(header.vp9().end_of_picture,
- codec_info.codecSpecific.VP9.end_of_picture);
-
- // Next spatial layer.
- codec_info.codecSpecific.VP9.first_frame_in_picture = false;
- codec_info.codecSpecific.VP9.spatial_idx += 1;
- codec_info.codecSpecific.VP9.end_of_picture = true;
-
- header = params.GetRtpVideoHeader(encoded_image, &codec_info);
-
- EXPECT_EQ(kVideoRotation_90, header.rotation);
- EXPECT_EQ(VideoContentType::SCREENSHARE, header.content_type);
- EXPECT_EQ(kVideoCodecVP9, header.codec);
- EXPECT_EQ(kPictureId + 1, header.vp9().picture_id);
- EXPECT_EQ(kTl0PicIdx, header.vp9().tl0_pic_idx);
- EXPECT_EQ(header.vp9().temporal_idx,
- codec_info.codecSpecific.VP9.temporal_idx);
- EXPECT_EQ(header.vp9().spatial_idx, codec_info.codecSpecific.VP9.spatial_idx);
- EXPECT_EQ(header.vp9().num_spatial_layers,
- codec_info.codecSpecific.VP9.num_spatial_layers);
- EXPECT_EQ(header.vp9().end_of_picture,
- codec_info.codecSpecific.VP9.end_of_picture);
-}
-
-TEST(RtpPayloadParamsTest, InfoMappedToRtpVideoHeader_H264) {
- RtpPayloadParams params(kSsrc1, {});
-
- EncodedImage encoded_image;
- CodecSpecificInfo codec_info;
- memset(&codec_info, 0, sizeof(CodecSpecificInfo));
- codec_info.codecType = kVideoCodecH264;
- codec_info.codecSpecific.H264.packetization_mode =
- H264PacketizationMode::SingleNalUnit;
-
- RTPVideoHeader header = params.GetRtpVideoHeader(encoded_image, &codec_info);
-
- EXPECT_EQ(0, header.simulcastIdx);
- EXPECT_EQ(kVideoCodecH264, header.codec);
- const auto& h264 = absl::get<RTPVideoHeaderH264>(header.video_type_header);
- EXPECT_EQ(H264PacketizationMode::SingleNalUnit, h264.packetization_mode);
-}
-
-TEST(RtpPayloadParamsTest, PictureIdIsSetForVp8) {
- RtpPayloadState state;
- state.picture_id = kInitialPictureId1;
- state.tl0_pic_idx = kInitialTl0PicIdx1;
-
- EncodedImage encoded_image;
- CodecSpecificInfo codec_info;
- memset(&codec_info, 0, sizeof(CodecSpecificInfo));
- codec_info.codecType = kVideoCodecVP8;
- codec_info.codecSpecific.VP8.simulcastIdx = 0;
-
- RtpPayloadParams params(kSsrc1, &state);
- RTPVideoHeader header = params.GetRtpVideoHeader(encoded_image, &codec_info);
- EXPECT_EQ(kVideoCodecVP8, header.codec);
- EXPECT_EQ(kInitialPictureId1 + 1, header.vp8().pictureId);
-
- // State should hold latest used picture id and tl0_pic_idx.
- state = params.state();
- EXPECT_EQ(kInitialPictureId1 + 1, state.picture_id);
- EXPECT_EQ(kInitialTl0PicIdx1 + 1, state.tl0_pic_idx);
-}
-
-TEST(RtpPayloadParamsTest, PictureIdWraps) {
- RtpPayloadState state;
- state.picture_id = kMaxTwoBytePictureId;
- state.tl0_pic_idx = kInitialTl0PicIdx1;
-
- EncodedImage encoded_image;
- CodecSpecificInfo codec_info;
- memset(&codec_info, 0, sizeof(CodecSpecificInfo));
- codec_info.codecType = kVideoCodecVP8;
- codec_info.codecSpecific.VP8.temporalIdx = kNoTemporalIdx;
-
- RtpPayloadParams params(kSsrc1, &state);
- RTPVideoHeader header = params.GetRtpVideoHeader(encoded_image, &codec_info);
- EXPECT_EQ(kVideoCodecVP8, header.codec);
- EXPECT_EQ(0, header.vp8().pictureId);
-
- // State should hold latest used picture id and tl0_pic_idx.
- EXPECT_EQ(0, params.state().picture_id); // Wrapped.
- EXPECT_EQ(kInitialTl0PicIdx1, params.state().tl0_pic_idx);
-}
-
-TEST(RtpPayloadParamsTest, Tl0PicIdxUpdatedForVp8) {
- RtpPayloadState state;
- state.picture_id = kInitialPictureId1;
- state.tl0_pic_idx = kInitialTl0PicIdx1;
-
- EncodedImage encoded_image;
- // Modules are sending for this test.
- // OnEncodedImage, temporalIdx: 1.
- CodecSpecificInfo codec_info;
- memset(&codec_info, 0, sizeof(CodecSpecificInfo));
- codec_info.codecType = kVideoCodecVP8;
- codec_info.codecSpecific.VP8.temporalIdx = 1;
-
- RtpPayloadParams params(kSsrc1, &state);
- RTPVideoHeader header = params.GetRtpVideoHeader(encoded_image, &codec_info);
-
- EXPECT_EQ(kVideoCodecVP8, header.codec);
- EXPECT_EQ(kInitialPictureId1 + 1, header.vp8().pictureId);
- EXPECT_EQ(kInitialTl0PicIdx1, header.vp8().tl0PicIdx);
-
- // OnEncodedImage, temporalIdx: 0.
- codec_info.codecSpecific.VP8.temporalIdx = 0;
-
- header = params.GetRtpVideoHeader(encoded_image, &codec_info);
- EXPECT_EQ(kVideoCodecVP8, header.codec);
- EXPECT_EQ(kInitialPictureId1 + 2, header.vp8().pictureId);
- EXPECT_EQ(kInitialTl0PicIdx1 + 1, header.vp8().tl0PicIdx);
-
- // State should hold latest used picture id and tl0_pic_idx.
- EXPECT_EQ(kInitialPictureId1 + 2, params.state().picture_id);
- EXPECT_EQ(kInitialTl0PicIdx1 + 1, params.state().tl0_pic_idx);
-}
-
-TEST(RtpPayloadParamsTest, Tl0PicIdxUpdatedForVp9) {
- RtpPayloadState state;
- state.picture_id = kInitialPictureId1;
- state.tl0_pic_idx = kInitialTl0PicIdx1;
-
- EncodedImage encoded_image;
- // Modules are sending for this test.
- // OnEncodedImage, temporalIdx: 1.
- CodecSpecificInfo codec_info;
- memset(&codec_info, 0, sizeof(CodecSpecificInfo));
- codec_info.codecType = kVideoCodecVP9;
- codec_info.codecSpecific.VP9.temporal_idx = 1;
- codec_info.codecSpecific.VP9.first_frame_in_picture = true;
-
- RtpPayloadParams params(kSsrc1, &state);
- RTPVideoHeader header = params.GetRtpVideoHeader(encoded_image, &codec_info);
-
- EXPECT_EQ(kVideoCodecVP9, header.codec);
- EXPECT_EQ(kInitialPictureId1 + 1, header.vp9().picture_id);
- EXPECT_EQ(kInitialTl0PicIdx1, header.vp9().tl0_pic_idx);
-
- // OnEncodedImage, temporalIdx: 0.
- codec_info.codecSpecific.VP9.temporal_idx = 0;
-
- header = params.GetRtpVideoHeader(encoded_image, &codec_info);
-
- EXPECT_EQ(kVideoCodecVP9, header.codec);
- EXPECT_EQ(kInitialPictureId1 + 2, header.vp9().picture_id);
- EXPECT_EQ(kInitialTl0PicIdx1 + 1, header.vp9().tl0_pic_idx);
-
- // OnEncodedImage, first_frame_in_picture = false
- codec_info.codecSpecific.VP9.first_frame_in_picture = false;
-
- header = params.GetRtpVideoHeader(encoded_image, &codec_info);
-
- EXPECT_EQ(kVideoCodecVP9, header.codec);
- EXPECT_EQ(kInitialPictureId1 + 2, header.vp9().picture_id);
- EXPECT_EQ(kInitialTl0PicIdx1 + 1, header.vp9().tl0_pic_idx);
-
- // State should hold latest used picture id and tl0_pic_idx.
- EXPECT_EQ(kInitialPictureId1 + 2, params.state().picture_id);
- EXPECT_EQ(kInitialTl0PicIdx1 + 1, params.state().tl0_pic_idx);
-}
-} // namespace webrtc