Reduce number of RTPVideoSender::SendVideo parameters
use frame_type from the RTPVideoHeader instead of as an extra parameter
merge payload data and payload size into single argument
pass RTPVideoHeader by value (relying on copy elision)
Bug: None
Change-Id: Ie7970af3b198b83b723d84c7a8b047219c4b38c0
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/156400
Commit-Queue: Danil Chapovalov <danilchap@webrtc.org>
Reviewed-by: Niels Moller <nisse@webrtc.org>
Reviewed-by: Ilya Nikolaevskiy <ilnik@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#29445}
diff --git a/call/rtp_payload_params.cc b/call/rtp_payload_params.cc
index af7bfd2..cc9712c 100644
--- a/call/rtp_payload_params.cc
+++ b/call/rtp_payload_params.cc
@@ -162,6 +162,7 @@
PopulateRtpWithCodecSpecifics(*codec_specific_info, image.SpatialIndex(),
&rtp_video_header);
}
+ rtp_video_header.frame_type = image._frameType,
rtp_video_header.rotation = image.rotation_;
rtp_video_header.content_type = image.content_type_;
rtp_video_header.playout_delay = image.playout_delay_;
diff --git a/call/rtp_video_sender.cc b/call/rtp_video_sender.cc
index ca6132f..73e356d 100644
--- a/call/rtp_video_sender.cc
+++ b/call/rtp_video_sender.cc
@@ -489,8 +489,6 @@
stream_index = encoded_image.SpatialIndex().value_or(0);
}
RTC_DCHECK_LT(stream_index, rtp_streams_.size());
- RTPVideoHeader rtp_video_header = params_[stream_index].GetRtpVideoHeader(
- encoded_image, codec_specific_info, shared_frame_id_);
uint32_t rtp_timestamp =
encoded_image.Timestamp() +
@@ -515,9 +513,10 @@
}
bool send_result = rtp_streams_[stream_index].sender_video->SendVideo(
- encoded_image._frameType, rtp_config_.payload_type, codec_type_,
- rtp_timestamp, encoded_image.capture_time_ms_, encoded_image.data(),
- encoded_image.size(), fragmentation, &rtp_video_header,
+ rtp_config_.payload_type, codec_type_, rtp_timestamp,
+ encoded_image.capture_time_ms_, encoded_image, fragmentation,
+ params_[stream_index].GetRtpVideoHeader(
+ encoded_image, codec_specific_info, shared_frame_id_),
expected_retransmission_time_ms);
if (frame_count_observer_) {
FrameCounts& counts = frame_counts_[stream_index];
diff --git a/modules/rtp_rtcp/source/nack_rtx_unittest.cc b/modules/rtp_rtcp/source/nack_rtx_unittest.cc
index 62d5f98..a75fd6e 100644
--- a/modules/rtp_rtcp/source/nack_rtx_unittest.cc
+++ b/modules/rtp_rtcp/source/nack_rtx_unittest.cc
@@ -121,7 +121,6 @@
: rtp_rtcp_module_(nullptr),
transport_(kTestRtxSsrc),
rtx_stream_(&media_stream_, rtx_associated_payload_types_, kTestSsrc),
- payload_data_length(sizeof(payload_data)),
fake_clock(123456),
retransmission_rate_limiter_(&fake_clock, kMaxRttMs) {}
~RtpRtcpRtxNackTest() override {}
@@ -159,7 +158,7 @@
media_receiver_ = transport_.stream_receiver_controller_.CreateReceiver(
kTestSsrc, &media_stream_);
- for (size_t n = 0; n < payload_data_length; n++) {
+ for (size_t n = 0; n < sizeof(payload_data); n++) {
payload_data[n] = n % 10;
}
}
@@ -209,10 +208,10 @@
RTPVideoHeader video_header;
EXPECT_TRUE(rtp_rtcp_module_->OnSendingRtpFrame(timestamp, timestamp / 90,
kPayloadType, false));
+ video_header.frame_type = VideoFrameType::kVideoFrameDelta;
EXPECT_TRUE(rtp_sender_video_->SendVideo(
- VideoFrameType::kVideoFrameDelta, kPayloadType,
- VideoCodecType::kVideoCodecGeneric, timestamp, timestamp / 90,
- payload_data, payload_data_length, nullptr, &video_header, 0));
+ kPayloadType, VideoCodecType::kVideoCodecGeneric, timestamp,
+ timestamp / 90, payload_data, nullptr, video_header, 0));
// Min required delay until retransmit = 5 + RTT ms (RTT = 0).
fake_clock.AdvanceTimeMilliseconds(5);
int length = BuildNackList(nack_list);
@@ -236,7 +235,6 @@
VerifyingMediaStream media_stream_;
RtxReceiveStream rtx_stream_;
uint8_t payload_data[65000];
- size_t payload_data_length;
SimulatedClock fake_clock;
RateLimiter retransmission_rate_limiter_;
std::unique_ptr<RtpStreamReceiverInterface> media_receiver_;
@@ -261,10 +259,10 @@
RTPVideoHeader video_header;
EXPECT_TRUE(rtp_rtcp_module_->OnSendingRtpFrame(timestamp, timestamp / 90,
kPayloadType, false));
+ video_header.frame_type = VideoFrameType::kVideoFrameDelta;
EXPECT_TRUE(rtp_sender_video_->SendVideo(
- VideoFrameType::kVideoFrameDelta, kPayloadType,
- VideoCodecType::kVideoCodecGeneric, timestamp, timestamp / 90,
- payload_data, payload_data_length, nullptr, &video_header, 0));
+ kPayloadType, VideoCodecType::kVideoCodecGeneric, timestamp,
+ timestamp / 90, payload_data, nullptr, video_header, 0));
// Prepare next frame.
timestamp += 3000;
fake_clock.AdvanceTimeMilliseconds(33);
diff --git a/modules/rtp_rtcp/source/rtp_format.cc b/modules/rtp_rtcp/source/rtp_format.cc
index df9cb02..47838cb 100644
--- a/modules/rtp_rtcp/source/rtp_format.cc
+++ b/modules/rtp_rtcp/source/rtp_format.cc
@@ -30,7 +30,6 @@
PayloadSizeLimits limits,
// Codec-specific details.
const RTPVideoHeader& rtp_video_header,
- VideoFrameType frame_type,
const RTPFragmentationHeader* fragmentation) {
if (!type) {
// Use raw packetizer.
@@ -56,8 +55,8 @@
return std::make_unique<RtpPacketizerVp9>(payload, limits, vp9);
}
default: {
- return std::make_unique<RtpPacketizerGeneric>(
- payload, limits, rtp_video_header, frame_type);
+ return std::make_unique<RtpPacketizerGeneric>(payload, limits,
+ rtp_video_header);
}
}
}
diff --git a/modules/rtp_rtcp/source/rtp_format.h b/modules/rtp_rtcp/source/rtp_format.h
index 2acf319..1c49811 100644
--- a/modules/rtp_rtcp/source/rtp_format.h
+++ b/modules/rtp_rtcp/source/rtp_format.h
@@ -42,7 +42,6 @@
PayloadSizeLimits limits,
// Codec-specific details.
const RTPVideoHeader& rtp_video_header,
- VideoFrameType frame_type,
const RTPFragmentationHeader* fragmentation);
virtual ~RtpPacketizer() = default;
diff --git a/modules/rtp_rtcp/source/rtp_format_video_generic.cc b/modules/rtp_rtcp/source/rtp_format_video_generic.cc
index 3c0fe03..3c9b162 100644
--- a/modules/rtp_rtcp/source/rtp_format_video_generic.cc
+++ b/modules/rtp_rtcp/source/rtp_format_video_generic.cc
@@ -26,10 +26,9 @@
RtpPacketizerGeneric::RtpPacketizerGeneric(
rtc::ArrayView<const uint8_t> payload,
PayloadSizeLimits limits,
- const RTPVideoHeader& rtp_video_header,
- VideoFrameType frame_type)
+ const RTPVideoHeader& rtp_video_header)
: remaining_payload_(payload) {
- BuildHeader(rtp_video_header, frame_type);
+ BuildHeader(rtp_video_header);
limits.max_payload_len -= header_size_;
payload_sizes_ = SplitAboutEqually(payload.size(), limits);
@@ -82,11 +81,10 @@
return true;
}
-void RtpPacketizerGeneric::BuildHeader(const RTPVideoHeader& rtp_video_header,
- VideoFrameType frame_type) {
+void RtpPacketizerGeneric::BuildHeader(const RTPVideoHeader& rtp_video_header) {
header_size_ = kGenericHeaderLength;
header_[0] = RtpFormatVideoGeneric::kFirstPacketBit;
- if (frame_type == VideoFrameType::kVideoFrameKey) {
+ if (rtp_video_header.frame_type == VideoFrameType::kVideoFrameKey) {
header_[0] |= RtpFormatVideoGeneric::kKeyFrameBit;
}
if (rtp_video_header.generic.has_value()) {
diff --git a/modules/rtp_rtcp/source/rtp_format_video_generic.h b/modules/rtp_rtcp/source/rtp_format_video_generic.h
index 43d4bcf..e091c89 100644
--- a/modules/rtp_rtcp/source/rtp_format_video_generic.h
+++ b/modules/rtp_rtcp/source/rtp_format_video_generic.h
@@ -38,8 +38,7 @@
// Packets returned by |NextPacket| will contain the generic payload header.
RtpPacketizerGeneric(rtc::ArrayView<const uint8_t> payload,
PayloadSizeLimits limits,
- const RTPVideoHeader& rtp_video_header,
- VideoFrameType frametype);
+ const RTPVideoHeader& rtp_video_header);
// Initialize with payload from encoder.
// The payload_data must be exactly one encoded generic frame.
// Packets returned by |NextPacket| will contain raw payload without the
@@ -58,8 +57,7 @@
private:
// Fills header_ and header_size_ members.
- void BuildHeader(const RTPVideoHeader& rtp_video_header,
- VideoFrameType frame_type);
+ void BuildHeader(const RTPVideoHeader& rtp_video_header);
uint8_t header_[3];
size_t header_size_;
diff --git a/modules/rtp_rtcp/source/rtp_format_video_generic_unittest.cc b/modules/rtp_rtcp/source/rtp_format_video_generic_unittest.cc
index 67b7cba..a1a2d47 100644
--- a/modules/rtp_rtcp/source/rtp_format_video_generic_unittest.cc
+++ b/modules/rtp_rtcp/source/rtp_format_video_generic_unittest.cc
@@ -48,8 +48,7 @@
RtpPacketizer::PayloadSizeLimits limits;
limits.max_payload_len = 6;
- RtpPacketizerGeneric packetizer(kPayload, limits, RTPVideoHeader(),
- VideoFrameType::kVideoFrameKey);
+ RtpPacketizerGeneric packetizer(kPayload, limits, RTPVideoHeader());
std::vector<int> payload_sizes = NextPacketFillPayloadSizes(&packetizer);
@@ -62,8 +61,7 @@
RtpPacketizer::PayloadSizeLimits limits;
limits.max_payload_len = 6;
- RtpPacketizerGeneric packetizer(kPayload, limits, RTPVideoHeader(),
- VideoFrameType::kVideoFrameKey);
+ RtpPacketizerGeneric packetizer(kPayload, limits, RTPVideoHeader());
std::vector<int> payload_sizes = NextPacketFillPayloadSizes(&packetizer);
@@ -78,8 +76,8 @@
RTPVideoHeader rtp_video_header;
rtp_video_header.generic.emplace().frame_id = 37;
- RtpPacketizerGeneric packetizer(kPayload, kNoSizeLimits, rtp_video_header,
- VideoFrameType::kVideoFrameKey);
+ rtp_video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ RtpPacketizerGeneric packetizer(kPayload, kNoSizeLimits, rtp_video_header);
RtpPacketToSend packet(nullptr);
ASSERT_TRUE(packetizer.NextPacket(&packet));
@@ -100,8 +98,7 @@
limits.max_payload_len = 6;
RTPVideoHeader rtp_video_header;
rtp_video_header.generic.emplace().frame_id = 37;
- RtpPacketizerGeneric packetizer(kPayload, limits, rtp_video_header,
- VideoFrameType::kVideoFrameKey);
+ RtpPacketizerGeneric packetizer(kPayload, limits, rtp_video_header);
std::vector<int> payload_sizes = NextPacketFillPayloadSizes(&packetizer);
@@ -116,8 +113,7 @@
limits.max_payload_len = 6;
RTPVideoHeader rtp_video_header;
rtp_video_header.generic.emplace().frame_id = 37;
- RtpPacketizerGeneric packetizer(kPayload, limits, rtp_video_header,
- VideoFrameType::kVideoFrameKey);
+ RtpPacketizerGeneric packetizer(kPayload, limits, rtp_video_header);
std::vector<int> payload_sizes = NextPacketFillPayloadSizes(&packetizer);
// With kPayloadSize > max_payload_len^2, there should be packets that use
@@ -131,8 +127,8 @@
RTPVideoHeader rtp_video_header;
rtp_video_header.generic.emplace().frame_id = 0x8137;
- RtpPacketizerGeneric packetizer(kPayload, kNoSizeLimits, rtp_video_header,
- VideoFrameType::kVideoFrameKey);
+ rtp_video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ RtpPacketizerGeneric packetizer(kPayload, kNoSizeLimits, rtp_video_header);
RtpPacketToSend packet(nullptr);
ASSERT_TRUE(packetizer.NextPacket(&packet));
@@ -148,8 +144,7 @@
const int kPayloadSize = 13;
const uint8_t kPayload[kPayloadSize] = {};
- RtpPacketizerGeneric packetizer(kPayload, kNoSizeLimits, RTPVideoHeader(),
- VideoFrameType::kVideoFrameKey);
+ RtpPacketizerGeneric packetizer(kPayload, kNoSizeLimits, RTPVideoHeader());
RtpPacketToSend packet(nullptr);
ASSERT_TRUE(packetizer.NextPacket(&packet));
diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc b/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
index 34944bc..0b681cf 100644
--- a/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
+++ b/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
@@ -212,6 +212,7 @@
RTPVideoHeaderVP8 vp8_header = {};
vp8_header.temporalIdx = tid;
RTPVideoHeader rtp_video_header;
+ rtp_video_header.frame_type = VideoFrameType::kVideoFrameKey;
rtp_video_header.width = codec_.width;
rtp_video_header.height = codec_.height;
rtp_video_header.rotation = kVideoRotation_0;
@@ -225,10 +226,8 @@
const uint8_t payload[100] = {0};
EXPECT_TRUE(module->impl_->OnSendingRtpFrame(0, 0, codec_.plType, true));
- EXPECT_TRUE(sender->SendVideo(VideoFrameType::kVideoFrameKey, codec_.plType,
- VideoCodecType::kVideoCodecVP8, 0, 0, payload,
- sizeof(payload), nullptr, &rtp_video_header,
- 0));
+ EXPECT_TRUE(sender->SendVideo(codec_.plType, VideoCodecType::kVideoCodecVP8,
+ 0, 0, payload, nullptr, rtp_video_header, 0));
}
void IncomingRtcpNack(const RtpRtcpModule* module, uint16_t sequence_number) {
diff --git a/modules/rtp_rtcp/source/rtp_sender_unittest.cc b/modules/rtp_rtcp/source/rtp_sender_unittest.cc
index 90b92b3..da7ba4f 100644
--- a/modules/rtp_rtcp/source/rtp_sender_unittest.cc
+++ b/modules/rtp_rtcp/source/rtp_sender_unittest.cc
@@ -610,10 +610,10 @@
.Times(1);
int64_t capture_time_ms = fake_clock_.TimeInMilliseconds();
fake_clock_.AdvanceTimeMilliseconds(10);
+ video_header.frame_type = VideoFrameType::kVideoFrameKey;
EXPECT_TRUE(rtp_sender_video.SendVideo(
- VideoFrameType::kVideoFrameKey, kPayloadType, kCodecType,
- capture_time_ms * kCaptureTimeMsToRtpTimestamp, capture_time_ms,
- kPayloadData, sizeof(kPayloadData), nullptr, &video_header,
+ kPayloadType, kCodecType, capture_time_ms * kCaptureTimeMsToRtpTimestamp,
+ capture_time_ms, kPayloadData, nullptr, video_header,
kDefaultExpectedRetransmissionTimeMs));
// Send another packet with 20 ms delay. The average, max and total should be
@@ -622,10 +622,10 @@
SendSideDelayUpdated(15, 20, 30, kSsrc))
.Times(1);
fake_clock_.AdvanceTimeMilliseconds(10);
+ video_header.frame_type = VideoFrameType::kVideoFrameKey;
EXPECT_TRUE(rtp_sender_video.SendVideo(
- VideoFrameType::kVideoFrameKey, kPayloadType, kCodecType,
- capture_time_ms * kCaptureTimeMsToRtpTimestamp, capture_time_ms,
- kPayloadData, sizeof(kPayloadData), nullptr, &video_header,
+ kPayloadType, kCodecType, capture_time_ms * kCaptureTimeMsToRtpTimestamp,
+ capture_time_ms, kPayloadData, nullptr, video_header,
kDefaultExpectedRetransmissionTimeMs));
// Send another packet at the same time, which replaces the last packet.
@@ -635,10 +635,10 @@
EXPECT_CALL(send_side_delay_observer_, SendSideDelayUpdated(5, 10, 30, kSsrc))
.Times(1);
capture_time_ms = fake_clock_.TimeInMilliseconds();
+ video_header.frame_type = VideoFrameType::kVideoFrameKey;
EXPECT_TRUE(rtp_sender_video.SendVideo(
- VideoFrameType::kVideoFrameKey, kPayloadType, kCodecType,
- capture_time_ms * kCaptureTimeMsToRtpTimestamp, capture_time_ms,
- kPayloadData, sizeof(kPayloadData), nullptr, &video_header,
+ kPayloadType, kCodecType, capture_time_ms * kCaptureTimeMsToRtpTimestamp,
+ capture_time_ms, kPayloadData, nullptr, video_header,
kDefaultExpectedRetransmissionTimeMs));
// Send a packet 1 second later. The earlier packets should have timed
@@ -649,10 +649,10 @@
fake_clock_.AdvanceTimeMilliseconds(1);
EXPECT_CALL(send_side_delay_observer_, SendSideDelayUpdated(1, 1, 31, kSsrc))
.Times(1);
+ video_header.frame_type = VideoFrameType::kVideoFrameKey;
EXPECT_TRUE(rtp_sender_video.SendVideo(
- VideoFrameType::kVideoFrameKey, kPayloadType, kCodecType,
- capture_time_ms * kCaptureTimeMsToRtpTimestamp, capture_time_ms,
- kPayloadData, sizeof(kPayloadData), nullptr, &video_header,
+ kPayloadType, kCodecType, capture_time_ms * kCaptureTimeMsToRtpTimestamp,
+ capture_time_ms, kPayloadData, nullptr, video_header,
kDefaultExpectedRetransmissionTimeMs));
}
@@ -1093,10 +1093,10 @@
// Send keyframe
RTPVideoHeader video_header;
- ASSERT_TRUE(rtp_sender_video.SendVideo(
- VideoFrameType::kVideoFrameKey, kPayloadType, kCodecType, 1234, 4321,
- payload, sizeof(payload), nullptr, &video_header,
- kDefaultExpectedRetransmissionTimeMs));
+ video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ ASSERT_TRUE(rtp_sender_video.SendVideo(kPayloadType, kCodecType, 1234, 4321,
+ payload, nullptr, video_header,
+ kDefaultExpectedRetransmissionTimeMs));
auto sent_payload = transport_.last_sent_packet().payload();
uint8_t generic_header = sent_payload[0];
@@ -1109,10 +1109,10 @@
payload[1] = 42;
payload[4] = 13;
- ASSERT_TRUE(rtp_sender_video.SendVideo(
- VideoFrameType::kVideoFrameDelta, kPayloadType, kCodecType, 1234, 4321,
- payload, sizeof(payload), nullptr, &video_header,
- kDefaultExpectedRetransmissionTimeMs));
+ video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ ASSERT_TRUE(rtp_sender_video.SendVideo(kPayloadType, kCodecType, 1234, 4321,
+ payload, nullptr, video_header,
+ kDefaultExpectedRetransmissionTimeMs));
sent_payload = transport_.last_sent_packet().payload();
generic_header = sent_payload[0];
@@ -1136,10 +1136,10 @@
// Send a frame.
RTPVideoHeader video_header;
- ASSERT_TRUE(rtp_sender_video.SendVideo(
- VideoFrameType::kVideoFrameKey, kPayloadType, absl::nullopt, 1234, 4321,
- payload, sizeof(payload), nullptr, &video_header,
- kDefaultExpectedRetransmissionTimeMs));
+ video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ ASSERT_TRUE(rtp_sender_video.SendVideo(kPayloadType, absl::nullopt, 1234,
+ 4321, payload, nullptr, video_header,
+ kDefaultExpectedRetransmissionTimeMs));
auto sent_payload = transport_.last_sent_packet().payload();
EXPECT_THAT(sent_payload, ElementsAreArray(payload));
@@ -1212,10 +1212,10 @@
}
});
+ video_header.frame_type = VideoFrameType::kVideoFrameKey;
EXPECT_TRUE(rtp_sender_video.SendVideo(
- VideoFrameType::kVideoFrameKey, kMediaPayloadType, kCodecType,
- kTimestamp, fake_clock_.TimeInMilliseconds(), kPayloadData,
- sizeof(kPayloadData), nullptr, &video_header,
+ kMediaPayloadType, kCodecType, kTimestamp,
+ fake_clock_.TimeInMilliseconds(), kPayloadData, nullptr, video_header,
kDefaultExpectedRetransmissionTimeMs));
ASSERT_TRUE(media_packet != nullptr);
ASSERT_TRUE(fec_packet != nullptr);
@@ -1308,10 +1308,10 @@
Pointee(Property(&RtpPacketToSend::Ssrc, kFlexFecSsrc)))))
.Times(0); // Not called because packet should not be protected.
+ video_header.frame_type = VideoFrameType::kVideoFrameKey;
EXPECT_TRUE(rtp_sender_video.SendVideo(
- VideoFrameType::kVideoFrameKey, kMediaPayloadType, kCodecType,
- kTimestamp, kCaptureTimeMs, kPayloadData, sizeof(kPayloadData), nullptr,
- &video_header, kDefaultExpectedRetransmissionTimeMs));
+ kMediaPayloadType, kCodecType, kTimestamp, kCaptureTimeMs, kPayloadData,
+ nullptr, video_header, kDefaultExpectedRetransmissionTimeMs));
EXPECT_TRUE(
rtp_sender_->TrySendPacket(rtp_packet.get(), PacedPacketInfo()));
@@ -1350,10 +1350,11 @@
});
video_header.video_timing.flags = VideoSendTiming::kInvalid;
- EXPECT_TRUE(rtp_sender_video.SendVideo(
- VideoFrameType::kVideoFrameKey, kMediaPayloadType, kCodecType,
- kTimestamp + 1, kCaptureTimeMs + 1, kPayloadData, sizeof(kPayloadData),
- nullptr, &video_header, kDefaultExpectedRetransmissionTimeMs));
+ video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ EXPECT_TRUE(rtp_sender_video.SendVideo(kMediaPayloadType, kCodecType,
+ kTimestamp + 1, kCaptureTimeMs + 1,
+ kPayloadData, nullptr, video_header,
+ kDefaultExpectedRetransmissionTimeMs));
ASSERT_TRUE(media_packet2 != nullptr);
ASSERT_TRUE(fec_packet != nullptr);
@@ -1418,10 +1419,11 @@
LogProxy(SameRtcEventTypeAs(RtcEvent::Type::RtpPacketOutgoing)))
.Times(2);
RTPVideoHeader video_header;
+ video_header.frame_type = VideoFrameType::kVideoFrameKey;
EXPECT_TRUE(rtp_sender_video.SendVideo(
- VideoFrameType::kVideoFrameKey, kMediaPayloadType, kCodecType, kTimestamp,
- fake_clock_.TimeInMilliseconds(), kPayloadData, sizeof(kPayloadData),
- nullptr, &video_header, kDefaultExpectedRetransmissionTimeMs));
+ kMediaPayloadType, kCodecType, kTimestamp,
+ fake_clock_.TimeInMilliseconds(), kPayloadData, nullptr, video_header,
+ kDefaultExpectedRetransmissionTimeMs));
ASSERT_EQ(2, transport_.packets_sent());
const RtpPacketReceived& media_packet = transport_.sent_packets_[0];
@@ -1693,10 +1695,10 @@
for (size_t i = 0; i < kNumMediaPackets; ++i) {
RTPVideoHeader video_header;
+ video_header.frame_type = VideoFrameType::kVideoFrameKey;
EXPECT_TRUE(rtp_sender_video.SendVideo(
- VideoFrameType::kVideoFrameKey, kMediaPayloadType, kCodecType,
- kTimestamp, fake_clock_.TimeInMilliseconds(), kPayloadData,
- sizeof(kPayloadData), nullptr, &video_header,
+ kMediaPayloadType, kCodecType, kTimestamp,
+ fake_clock_.TimeInMilliseconds(), kPayloadData, nullptr, video_header,
kDefaultExpectedRetransmissionTimeMs));
fake_clock_.AdvanceTimeMilliseconds(kTimeBetweenPacketsMs);
@@ -1777,9 +1779,9 @@
// Send a few frames.
RTPVideoHeader video_header;
for (uint32_t i = 0; i < kNumPackets; ++i) {
+ video_header.frame_type = VideoFrameType::kVideoFrameKey;
ASSERT_TRUE(rtp_sender_video.SendVideo(
- VideoFrameType::kVideoFrameKey, kPayloadType, kCodecType, 1234, 4321,
- payload, sizeof(payload), nullptr, &video_header,
+ kPayloadType, kCodecType, 1234, 4321, payload, nullptr, video_header,
kDefaultExpectedRetransmissionTimeMs));
fake_clock_.AdvanceTimeMilliseconds(kPacketInterval);
}
@@ -1854,10 +1856,10 @@
// Send a frame.
RTPVideoHeader video_header;
- ASSERT_TRUE(rtp_sender_video.SendVideo(
- VideoFrameType::kVideoFrameKey, kPayloadType, kCodecType, 1234, 4321,
- payload, sizeof(payload), nullptr, &video_header,
- kDefaultExpectedRetransmissionTimeMs));
+ video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ ASSERT_TRUE(rtp_sender_video.SendVideo(kPayloadType, kCodecType, 1234, 4321,
+ payload, nullptr, video_header,
+ kDefaultExpectedRetransmissionTimeMs));
StreamDataCounters expected;
expected.transmitted.payload_bytes = 6;
expected.transmitted.header_bytes = 12;
@@ -1925,10 +1927,10 @@
fec_params.fec_rate = 1;
fec_params.max_fec_frames = 1;
rtp_sender_video.SetFecParameters(fec_params, fec_params);
- ASSERT_TRUE(rtp_sender_video.SendVideo(
- VideoFrameType::kVideoFrameDelta, kPayloadType, kCodecType, 1234, 4321,
- payload, sizeof(payload), nullptr, &video_header,
- kDefaultExpectedRetransmissionTimeMs));
+ video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ ASSERT_TRUE(rtp_sender_video.SendVideo(kPayloadType, kCodecType, 1234, 4321,
+ payload, nullptr, video_header,
+ kDefaultExpectedRetransmissionTimeMs));
expected.transmitted.payload_bytes = 28;
expected.transmitted.header_bytes = 24;
expected.transmitted.packets = 2;
diff --git a/modules/rtp_rtcp/source/rtp_sender_video.cc b/modules/rtp_rtcp/source/rtp_sender_video.cc
index fc40a97..b6799c9 100644
--- a/modules/rtp_rtcp/source/rtp_sender_video.cc
+++ b/modules/rtp_rtcp/source/rtp_sender_video.cc
@@ -68,7 +68,6 @@
void AddRtpHeaderExtensions(const RTPVideoHeader& video_header,
const absl::optional<PlayoutDelay>& playout_delay,
- VideoFrameType frame_type,
bool set_video_rotation,
bool set_color_space,
bool set_frame_marking,
@@ -86,7 +85,8 @@
packet->SetExtension<VideoOrientation>(video_header.rotation);
// Report content type only for key frames.
- if (last_packet && frame_type == VideoFrameType::kVideoFrameKey &&
+ if (last_packet &&
+ video_header.frame_type == VideoFrameType::kVideoFrameKey &&
video_header.content_type != VideoContentType::UNSPECIFIED)
packet->SetExtension<VideoContentTypeExtension>(video_header.content_type);
@@ -130,7 +130,7 @@
generic_descriptor.SetTemporalLayer(video_header.generic->temporal_index);
- if (frame_type == VideoFrameType::kVideoFrameKey) {
+ if (video_header.frame_type == VideoFrameType::kVideoFrameKey) {
generic_descriptor.SetResolution(video_header.width,
video_header.height);
}
@@ -144,14 +144,14 @@
}
}
-bool MinimizeDescriptor(const RTPVideoHeader& full, RTPVideoHeader* minimized) {
- if (full.codec == VideoCodecType::kVideoCodecVP8) {
- minimized->codec = VideoCodecType::kVideoCodecVP8;
- const auto& vp8 = absl::get<RTPVideoHeaderVP8>(full.video_type_header);
+bool MinimizeDescriptor(RTPVideoHeader* video_header) {
+ if (auto* vp8 =
+ absl::get_if<RTPVideoHeaderVP8>(&video_header->video_type_header)) {
// Set minimum fields the RtpPacketizer is using to create vp8 packets.
- auto& min_vp8 = minimized->video_type_header.emplace<RTPVideoHeaderVP8>();
- min_vp8.InitRTPVideoHeaderVP8();
- min_vp8.nonReference = vp8.nonReference;
+ // nonReference is the only field that doesn't require extra space.
+ bool non_reference = vp8->nonReference;
+ vp8->InitRTPVideoHeaderVP8();
+ vp8->nonReference = non_reference;
return true;
}
// TODO(danilchap): Reduce vp9 codec specific descriptor too.
@@ -488,9 +488,12 @@
}
codec_type = it->second;
}
- return SendVideo(frame_type, payload_type, codec_type, rtp_timestamp,
- capture_time_ms, payload_data, payload_size, fragmentation,
- video_header, expected_retransmission_time_ms);
+ RTPVideoHeader rtp_video_header = *video_header;
+ rtp_video_header.frame_type = frame_type;
+ return SendVideo(payload_type, codec_type, rtp_timestamp, capture_time_ms,
+ rtc::MakeArrayView(payload_data, payload_size),
+ fragmentation, rtp_video_header,
+ expected_retransmission_time_ms);
}
bool RTPSenderVideo::SendVideo(
@@ -504,16 +507,32 @@
const RTPFragmentationHeader* fragmentation,
const RTPVideoHeader* video_header,
absl::optional<int64_t> expected_retransmission_time_ms) {
+ RTPVideoHeader rtp_video_header = *video_header;
+ rtp_video_header.frame_type = frame_type;
+ return SendVideo(payload_type, codec_type, rtp_timestamp, capture_time_ms,
+ rtc::MakeArrayView(payload_data, payload_size),
+ fragmentation, rtp_video_header,
+ expected_retransmission_time_ms);
+}
+
+bool RTPSenderVideo::SendVideo(
+ int payload_type,
+ absl::optional<VideoCodecType> codec_type,
+ uint32_t rtp_timestamp,
+ int64_t capture_time_ms,
+ rtc::ArrayView<const uint8_t> payload,
+ const RTPFragmentationHeader* fragmentation,
+ RTPVideoHeader video_header,
+ absl::optional<int64_t> expected_retransmission_time_ms) {
TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", capture_time_ms, "Send", "type",
- FrameTypeToString(frame_type));
+ FrameTypeToString(video_header.frame_type));
RTC_CHECK_RUNS_SERIALIZED(&send_checker_);
- if (frame_type == VideoFrameType::kEmptyFrame)
+ if (video_header.frame_type == VideoFrameType::kEmptyFrame)
return true;
- if (payload_size == 0)
+ if (payload.empty())
return false;
- RTC_CHECK(video_header);
int32_t retransmission_settings = retransmission_settings_;
if (codec_type == VideoCodecType::kVideoCodecH264) {
@@ -522,11 +541,11 @@
}
bool set_frame_marking =
- video_header->codec == kVideoCodecH264 &&
- video_header->frame_marking.temporal_id != kNoTemporalIdx;
+ video_header.codec == kVideoCodecH264 &&
+ video_header.frame_marking.temporal_id != kNoTemporalIdx;
const absl::optional<PlayoutDelay> playout_delay =
- playout_delay_oracle_->PlayoutDelayToSend(video_header->playout_delay);
+ playout_delay_oracle_->PlayoutDelayToSend(video_header.playout_delay);
// According to
// http://www.etsi.org/deliver/etsi_ts/126100_126199/126114/12.07.00_60/
@@ -540,24 +559,26 @@
// value sent.
// Set rotation when key frame or when changed (to follow standard).
// Or when different from 0 (to follow current receiver implementation).
- bool set_video_rotation = frame_type == VideoFrameType::kVideoFrameKey ||
- video_header->rotation != last_rotation_ ||
- video_header->rotation != kVideoRotation_0;
- last_rotation_ = video_header->rotation;
+ bool set_video_rotation =
+ video_header.frame_type == VideoFrameType::kVideoFrameKey ||
+ video_header.rotation != last_rotation_ ||
+ video_header.rotation != kVideoRotation_0;
+ last_rotation_ = video_header.rotation;
// Send color space when changed or if the frame is a key frame. Keep
// sending color space information until the first base layer frame to
// guarantee that the information is retrieved by the receiver.
bool set_color_space;
- if (video_header->color_space != last_color_space_) {
- last_color_space_ = video_header->color_space;
+ if (video_header.color_space != last_color_space_) {
+ last_color_space_ = video_header.color_space;
set_color_space = true;
- transmit_color_space_next_frame_ = !IsBaseLayer(*video_header);
+ transmit_color_space_next_frame_ = !IsBaseLayer(video_header);
} else {
- set_color_space = frame_type == VideoFrameType::kVideoFrameKey ||
- transmit_color_space_next_frame_;
+ set_color_space =
+ video_header.frame_type == VideoFrameType::kVideoFrameKey ||
+ transmit_color_space_next_frame_;
transmit_color_space_next_frame_ =
- transmit_color_space_next_frame_ ? !IsBaseLayer(*video_header) : false;
+ transmit_color_space_next_frame_ ? !IsBaseLayer(video_header) : false;
}
size_t fec_packet_overhead;
@@ -566,8 +587,9 @@
rtc::CritScope cs(&crit_);
// FEC settings.
const FecProtectionParams& fec_params =
- frame_type == VideoFrameType::kVideoFrameKey ? key_fec_params_
- : delta_fec_params_;
+ video_header.frame_type == VideoFrameType::kVideoFrameKey
+ ? key_fec_params_
+ : delta_fec_params_;
if (flexfec_enabled())
flexfec_sender_->SetFecParameters(fec_params);
if (ulpfec_enabled())
@@ -593,17 +615,17 @@
auto middle_packet = std::make_unique<RtpPacketToSend>(*single_packet);
auto last_packet = std::make_unique<RtpPacketToSend>(*single_packet);
// Simplest way to estimate how much extensions would occupy is to set them.
- AddRtpHeaderExtensions(*video_header, playout_delay, frame_type,
- set_video_rotation, set_color_space, set_frame_marking,
+ AddRtpHeaderExtensions(video_header, playout_delay, set_video_rotation,
+ set_color_space, set_frame_marking,
/*first=*/true, /*last=*/true, single_packet.get());
- AddRtpHeaderExtensions(*video_header, playout_delay, frame_type,
- set_video_rotation, set_color_space, set_frame_marking,
+ AddRtpHeaderExtensions(video_header, playout_delay, set_video_rotation,
+ set_color_space, set_frame_marking,
/*first=*/true, /*last=*/false, first_packet.get());
- AddRtpHeaderExtensions(*video_header, playout_delay, frame_type,
- set_video_rotation, set_color_space, set_frame_marking,
+ AddRtpHeaderExtensions(video_header, playout_delay, set_video_rotation,
+ set_color_space, set_frame_marking,
/*first=*/false, /*last=*/false, middle_packet.get());
- AddRtpHeaderExtensions(*video_header, playout_delay, frame_type,
- set_video_rotation, set_color_space, set_frame_marking,
+ AddRtpHeaderExtensions(video_header, playout_delay, set_video_rotation,
+ set_color_space, set_frame_marking,
/*first=*/false, /*last=*/true, last_packet.get());
RTC_DCHECK_GT(packet_capacity, single_packet->headers_size());
@@ -625,9 +647,6 @@
limits.last_packet_reduction_len =
last_packet->headers_size() - middle_packet->headers_size();
- RTPVideoHeader minimized_video_header;
- const RTPVideoHeader* packetize_video_header = video_header;
-
rtc::ArrayView<const uint8_t> generic_descriptor_raw_00 =
first_packet->GetRawExtension<RtpGenericFrameDescriptorExtension00>();
rtc::ArrayView<const uint8_t> generic_descriptor_raw_01 =
@@ -639,13 +658,13 @@
return false;
}
+ // Minimiazation of the vp8 descriptor may erase temporal_id, so save it.
+ const uint8_t temporal_id = GetTemporalId(video_header);
rtc::ArrayView<const uint8_t> generic_descriptor_raw =
!generic_descriptor_raw_01.empty() ? generic_descriptor_raw_01
: generic_descriptor_raw_00;
if (!generic_descriptor_raw.empty()) {
- if (MinimizeDescriptor(*video_header, &minimized_video_header)) {
- packetize_video_header = &minimized_video_header;
- }
+ MinimizeDescriptor(&video_header);
}
// TODO(benwright@webrtc.org) - Allocate enough to always encrypt inline.
@@ -657,7 +676,7 @@
const size_t max_ciphertext_size =
frame_encryptor_->GetMaxCiphertextByteSize(cricket::MEDIA_TYPE_VIDEO,
- payload_size);
+ payload.size());
encrypted_video_payload.SetSize(max_ciphertext_size);
size_t bytes_written = 0;
@@ -670,14 +689,12 @@
if (frame_encryptor_->Encrypt(
cricket::MEDIA_TYPE_VIDEO, first_packet->Ssrc(), additional_data,
- rtc::MakeArrayView(payload_data, payload_size),
- encrypted_video_payload, &bytes_written) != 0) {
+ payload, encrypted_video_payload, &bytes_written) != 0) {
return false;
}
encrypted_video_payload.SetSize(bytes_written);
- payload_data = encrypted_video_payload.data();
- payload_size = encrypted_video_payload.size();
+ payload = encrypted_video_payload;
} else if (require_frame_encryption_) {
RTC_LOG(LS_WARNING)
<< "No FrameEncryptor is attached to this video sending stream but "
@@ -685,10 +702,8 @@
}
std::unique_ptr<RtpPacketizer> packetizer = RtpPacketizer::Create(
- codec_type, rtc::MakeArrayView(payload_data, payload_size), limits,
- *packetize_video_header, frame_type, fragmentation);
+ codec_type, payload, limits, video_header, fragmentation);
- const uint8_t temporal_id = GetTemporalId(*video_header);
// TODO(bugs.webrtc.org/10714): retransmission_settings_ should generally be
// replaced by expected_retransmission_time_ms.has_value(). For now, though,
// only VP8 with an injected frame buffer controller actually controls it.
@@ -706,7 +721,7 @@
unpacketized_payload_size += fragmentation->fragmentationLength[i];
}
} else {
- unpacketized_payload_size = payload_size;
+ unpacketized_payload_size = payload.size();
}
if (num_packets == 0)
diff --git a/modules/rtp_rtcp/source/rtp_sender_video.h b/modules/rtp_rtcp/source/rtp_sender_video.h
index d332333..8906e31 100644
--- a/modules/rtp_rtcp/source/rtp_sender_video.h
+++ b/modules/rtp_rtcp/source/rtp_sender_video.h
@@ -94,10 +94,20 @@
// expected_retransmission_time_ms.has_value() -> retransmission allowed.
// Calls to this method is assumed to be externally serialized.
+ bool SendVideo(int payload_type,
+ absl::optional<VideoCodecType> codec_type,
+ uint32_t rtp_timestamp,
+ int64_t capture_time_ms,
+ rtc::ArrayView<const uint8_t> payload,
+ const RTPFragmentationHeader* fragmentation,
+ RTPVideoHeader video_header,
+ absl::optional<int64_t> expected_retransmission_time_ms);
+
+ RTC_DEPRECATED
bool SendVideo(VideoFrameType frame_type,
int8_t payload_type,
absl::optional<VideoCodecType> codec_type,
- uint32_t capture_timestamp,
+ uint32_t rtp_timestamp,
int64_t capture_time_ms,
const uint8_t* payload_data,
size_t payload_size,
@@ -106,6 +116,7 @@
absl::optional<int64_t> expected_retransmission_time_ms);
// TODO(bugs.webrtc.org/10809): Remove when downstream usage is gone.
+ RTC_DEPRECATED
bool SendVideo(VideoFrameType frame_type,
int8_t payload_type,
uint32_t capture_timestamp,
diff --git a/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc b/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc
index 856d239..17aafed 100644
--- a/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc
+++ b/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc
@@ -178,9 +178,9 @@
RTPVideoHeader hdr;
hdr.rotation = kVideoRotation_0;
- rtp_sender_video_.SendVideo(VideoFrameType::kVideoFrameKey, kPayload, kType,
- kTimestamp, 0, kFrame, sizeof(kFrame), nullptr,
- &hdr, kDefaultExpectedRetransmissionTimeMs);
+ hdr.frame_type = VideoFrameType::kVideoFrameKey;
+ rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, nullptr,
+ hdr, kDefaultExpectedRetransmissionTimeMs);
VideoRotation rotation;
EXPECT_TRUE(
@@ -204,9 +204,9 @@
hdr.video_timing.encode_finish_delta_ms = kEncodeFinishDeltaMs;
fake_clock_.AdvanceTimeMilliseconds(kPacketizationTimeMs);
- rtp_sender_video_.SendVideo(VideoFrameType::kVideoFrameKey, kPayload, kType,
- kTimestamp, kCaptureTimestamp, kFrame,
- sizeof(kFrame), nullptr, &hdr,
+ hdr.frame_type = VideoFrameType::kVideoFrameKey;
+ rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, kCaptureTimestamp,
+ kFrame, nullptr, hdr,
kDefaultExpectedRetransmissionTimeMs);
VideoSendTiming timing;
EXPECT_TRUE(transport_.last_sent_packet().GetExtension<VideoTimingExtension>(
@@ -223,14 +223,15 @@
RTPVideoHeader hdr;
hdr.rotation = kVideoRotation_90;
+ hdr.frame_type = VideoFrameType::kVideoFrameKey;
EXPECT_TRUE(rtp_sender_video_.SendVideo(
- VideoFrameType::kVideoFrameKey, kPayload, kType, kTimestamp, 0, kFrame,
- sizeof(kFrame), nullptr, &hdr, kDefaultExpectedRetransmissionTimeMs));
+ kPayload, kType, kTimestamp, 0, kFrame, nullptr, hdr,
+ kDefaultExpectedRetransmissionTimeMs));
hdr.rotation = kVideoRotation_0;
+ hdr.frame_type = VideoFrameType::kVideoFrameDelta;
EXPECT_TRUE(rtp_sender_video_.SendVideo(
- VideoFrameType::kVideoFrameDelta, kPayload, kType, kTimestamp + 1, 0,
- kFrame, sizeof(kFrame), nullptr, &hdr,
+ kPayload, kType, kTimestamp + 1, 0, kFrame, nullptr, hdr,
kDefaultExpectedRetransmissionTimeMs));
VideoRotation rotation;
@@ -246,13 +247,14 @@
RTPVideoHeader hdr;
hdr.rotation = kVideoRotation_90;
+ hdr.frame_type = VideoFrameType::kVideoFrameKey;
EXPECT_TRUE(rtp_sender_video_.SendVideo(
- VideoFrameType::kVideoFrameKey, kPayload, kType, kTimestamp, 0, kFrame,
- sizeof(kFrame), nullptr, &hdr, kDefaultExpectedRetransmissionTimeMs));
+ kPayload, kType, kTimestamp, 0, kFrame, nullptr, hdr,
+ kDefaultExpectedRetransmissionTimeMs));
+ hdr.frame_type = VideoFrameType::kVideoFrameDelta;
EXPECT_TRUE(rtp_sender_video_.SendVideo(
- VideoFrameType::kVideoFrameDelta, kPayload, kType, kTimestamp + 1, 0,
- kFrame, sizeof(kFrame), nullptr, &hdr,
+ kPayload, kType, kTimestamp + 1, 0, kFrame, nullptr, hdr,
kDefaultExpectedRetransmissionTimeMs));
VideoRotation rotation;
@@ -278,18 +280,18 @@
hdr.frame_marking.temporal_id = kNoTemporalIdx;
hdr.frame_marking.tl0_pic_idx = 99;
hdr.frame_marking.base_layer_sync = true;
- rtp_sender_video_.SendVideo(VideoFrameType::kVideoFrameDelta, kPayload, kType,
- kTimestamp, 0, kFrame, sizeof(kFrame), &frag,
- &hdr, kDefaultExpectedRetransmissionTimeMs);
+ hdr.frame_type = VideoFrameType::kVideoFrameDelta;
+ rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, &frag,
+ hdr, kDefaultExpectedRetransmissionTimeMs);
FrameMarking fm;
EXPECT_FALSE(
transport_.last_sent_packet().GetExtension<FrameMarkingExtension>(&fm));
hdr.frame_marking.temporal_id = 0;
- rtp_sender_video_.SendVideo(VideoFrameType::kVideoFrameDelta, kPayload, kType,
- kTimestamp + 1, 0, kFrame, sizeof(kFrame), &frag,
- &hdr, kDefaultExpectedRetransmissionTimeMs);
+ hdr.frame_type = VideoFrameType::kVideoFrameDelta;
+ rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp + 1, 0, kFrame, &frag,
+ hdr, kDefaultExpectedRetransmissionTimeMs);
EXPECT_TRUE(
transport_.last_sent_packet().GetExtension<FrameMarkingExtension>(&fm));
@@ -529,9 +531,9 @@
generic.higher_spatial_layers.push_back(4);
generic.dependencies.push_back(kFrameId - 1);
generic.dependencies.push_back(kFrameId - 500);
- rtp_sender_video_.SendVideo(VideoFrameType::kVideoFrameDelta, kPayload, kType,
- kTimestamp, 0, kFrame, sizeof(kFrame), nullptr,
- &hdr, kDefaultExpectedRetransmissionTimeMs);
+ hdr.frame_type = VideoFrameType::kVideoFrameDelta;
+ rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, nullptr,
+ hdr, kDefaultExpectedRetransmissionTimeMs);
RtpGenericFrameDescriptor descriptor_wire;
EXPECT_EQ(1, transport_.packets_sent());
@@ -583,9 +585,9 @@
vp8.keyIdx = 2;
RTPVideoHeader::GenericDescriptorInfo& generic = hdr.generic.emplace();
generic.frame_id = kFrameId;
- rtp_sender_video_.SendVideo(VideoFrameType::kVideoFrameDelta, kPayload,
- VideoCodecType::kVideoCodecVP8, kTimestamp, 0,
- kFrame, sizeof(kFrame), nullptr, &hdr,
+ hdr.frame_type = VideoFrameType::kVideoFrameDelta;
+ rtp_sender_video_.SendVideo(kPayload, VideoCodecType::kVideoCodecVP8,
+ kTimestamp, 0, kFrame, nullptr, hdr,
kDefaultExpectedRetransmissionTimeMs);
ASSERT_EQ(transport_.packets_sent(), 1);
diff --git a/video/rtp_video_stream_receiver_unittest.cc b/video/rtp_video_stream_receiver_unittest.cc
index 885641c..19a398e 100644
--- a/video/rtp_video_stream_receiver_unittest.cc
+++ b/video/rtp_video_stream_receiver_unittest.cc
@@ -17,6 +17,7 @@
#include "common_video/h264/h264_common.h"
#include "media/base/media_constants.h"
#include "modules/rtp_rtcp/source/rtp_format.h"
+#include "modules/rtp_rtcp/source/rtp_format_vp9.h"
#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h"
#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h"
#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
@@ -233,15 +234,12 @@
// Reduce max payload length to make sure the key frame generates two
// packets.
pay_load_size_limits.max_payload_len = 8;
- RTPVideoHeader rtp_video_header;
RTPVideoHeaderVP9 rtp_video_header_vp9;
rtp_video_header_vp9.InitRTPVideoHeaderVP9();
rtp_video_header_vp9.inter_pic_predicted =
(video_frame_type == VideoFrameType::kVideoFrameDelta);
- rtp_video_header.video_type_header = rtp_video_header_vp9;
- rtp_packetizer_ = RtpPacketizer::Create(
- kVideoCodecVP9, rtc::MakeArrayView(payload.data(), payload.size()),
- pay_load_size_limits, rtp_video_header, video_frame_type, nullptr);
+ rtp_packetizer_ = std::make_unique<RtpPacketizerVp9>(
+ payload, pay_load_size_limits, rtp_video_header_vp9);
}
size_t NumPackets() { return rtp_packetizer_->NumPackets(); }