Pass full RtpPacket to RtpVideoStreamReceiver::OnReceivedPayload
that brings RtpPacketReceived closer to the packet buffer
to allow strore original packets rather than VCMPacket in it.
Bug: webrtc:10979
Change-Id: Ia0fc0abf3551a843b19b0ee66ca0f20cae014479
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/157164
Reviewed-by: Philip Eliasson <philipel@webrtc.org>
Reviewed-by: Ilya Nikolaevskiy <ilnik@webrtc.org>
Commit-Queue: Danil Chapovalov <danilchap@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#29516}
diff --git a/video/rtp_video_stream_receiver.cc b/video/rtp_video_stream_receiver.cc
index dcc015d..a598250 100644
--- a/video/rtp_video_stream_receiver.cc
+++ b/video/rtp_video_stream_receiver.cc
@@ -325,29 +325,98 @@
return info;
}
-int32_t RtpVideoStreamReceiver::OnReceivedPayloadData(
- const uint8_t* payload_data,
- size_t payload_size,
- const RTPHeader& rtp_header,
- const RTPVideoHeader& video_header,
- const absl::optional<RtpGenericFrameDescriptor>& generic_descriptor,
- bool is_recovered) {
- VCMPacket packet(payload_data, payload_size, rtp_header, video_header,
- ntp_estimator_.Estimate(rtp_header.timestamp),
+void RtpVideoStreamReceiver::OnReceivedPayloadData(
+ rtc::ArrayView<const uint8_t> codec_payload,
+ const RtpPacketReceived& rtp_packet,
+ const RTPVideoHeader& video) {
+ RTPHeader rtp_header;
+ rtp_packet.GetHeader(&rtp_header);
+ VCMPacket packet(codec_payload.data(), codec_payload.size(), rtp_header,
+ video, ntp_estimator_.Estimate(rtp_packet.Timestamp()),
clock_->TimeInMilliseconds());
- packet.generic_descriptor = generic_descriptor;
+
+ RTPVideoHeader& video_header = packet.video_header;
+ video_header.rotation = kVideoRotation_0;
+ video_header.content_type = VideoContentType::UNSPECIFIED;
+ video_header.video_timing.flags = VideoSendTiming::kInvalid;
+ video_header.is_last_packet_in_frame |= rtp_packet.Marker();
+ video_header.frame_marking.temporal_id = kNoTemporalIdx;
+
+ if (const auto* vp9_header =
+ absl::get_if<RTPVideoHeaderVP9>(&video_header.video_type_header)) {
+ video_header.is_last_packet_in_frame |= vp9_header->end_of_frame;
+ video_header.is_first_packet_in_frame |= vp9_header->beginning_of_frame;
+ }
+
+ rtp_packet.GetExtension<VideoOrientation>(&video_header.rotation);
+ rtp_packet.GetExtension<VideoContentTypeExtension>(
+ &video_header.content_type);
+ rtp_packet.GetExtension<VideoTimingExtension>(&video_header.video_timing);
+ rtp_packet.GetExtension<PlayoutDelayLimits>(&video_header.playout_delay);
+ rtp_packet.GetExtension<FrameMarkingExtension>(&video_header.frame_marking);
+
+ RtpGenericFrameDescriptor& generic_descriptor =
+ packet.generic_descriptor.emplace();
+ if (rtp_packet.GetExtension<RtpGenericFrameDescriptorExtension01>(
+ &generic_descriptor)) {
+ if (rtp_packet.HasExtension<RtpGenericFrameDescriptorExtension00>()) {
+ RTC_LOG(LS_WARNING) << "RTP packet had two different GFD versions.";
+ return;
+ }
+ generic_descriptor.SetByteRepresentation(
+ rtp_packet.GetRawExtension<RtpGenericFrameDescriptorExtension01>());
+ } else if ((rtp_packet.GetExtension<RtpGenericFrameDescriptorExtension00>(
+ &generic_descriptor))) {
+ generic_descriptor.SetByteRepresentation(
+ rtp_packet.GetRawExtension<RtpGenericFrameDescriptorExtension00>());
+ } else {
+ packet.generic_descriptor = absl::nullopt;
+ }
+ if (packet.generic_descriptor != absl::nullopt) {
+ video_header.is_first_packet_in_frame =
+ packet.generic_descriptor->FirstPacketInSubFrame();
+ video_header.is_last_packet_in_frame =
+ rtp_packet.Marker() ||
+ packet.generic_descriptor->LastPacketInSubFrame();
+
+ if (packet.generic_descriptor->FirstPacketInSubFrame()) {
+ video_header.frame_type =
+ packet.generic_descriptor->FrameDependenciesDiffs().empty()
+ ? VideoFrameType::kVideoFrameKey
+ : VideoFrameType::kVideoFrameDelta;
+ }
+
+ video_header.width = packet.generic_descriptor->Width();
+ video_header.height = packet.generic_descriptor->Height();
+ }
+
+ // Color space should only be transmitted in the last packet of a frame,
+ // therefore, neglect it otherwise so that last_color_space_ is not reset by
+ // mistake.
+ if (video_header.is_last_packet_in_frame) {
+ video_header.color_space = rtp_packet.GetExtension<ColorSpaceExtension>();
+ if (video_header.color_space ||
+ video_header.frame_type == VideoFrameType::kVideoFrameKey) {
+ // Store color space since it's only transmitted when changed or for key
+ // frames. Color space will be cleared if a key frame is transmitted
+ // without color space information.
+ last_color_space_ = video_header.color_space;
+ } else if (last_color_space_) {
+ video_header.color_space = last_color_space_;
+ }
+ }
if (loss_notification_controller_) {
- if (is_recovered) {
+ if (rtp_packet.recovered()) {
// TODO(bugs.webrtc.org/10336): Implement support for reordering.
RTC_LOG(LS_INFO)
<< "LossNotificationController does not support reordering.";
- } else if (!generic_descriptor) {
+ } else if (!packet.generic_descriptor) {
RTC_LOG(LS_WARNING) << "LossNotificationController requires generic "
"frame descriptor, but it is missing.";
} else {
- loss_notification_controller_->OnReceivedPacket(rtp_header.sequenceNumber,
- *generic_descriptor);
+ loss_notification_controller_->OnReceivedPacket(
+ rtp_packet.SequenceNumber(), *packet.generic_descriptor);
}
}
@@ -357,7 +426,7 @@
video_header.frame_type == VideoFrameType::kVideoFrameKey;
packet.timesNacked = nack_module_->OnReceivedPacket(
- rtp_header.sequenceNumber, is_keyframe, is_recovered);
+ rtp_packet.SequenceNumber(), is_keyframe, rtp_packet.recovered());
} else {
packet.timesNacked = -1;
}
@@ -365,7 +434,7 @@
if (packet.sizeBytes == 0) {
NotifyReceiverOfEmptyPacket(packet.seqNum);
rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
- return 0;
+ return;
}
if (packet.codec() == kVideoCodecH264) {
@@ -383,7 +452,7 @@
rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
RTC_FALLTHROUGH();
case video_coding::H264SpsPpsTracker::kDrop:
- return 0;
+ return;
case video_coding::H264SpsPpsTracker::kInsert:
break;
}
@@ -398,7 +467,6 @@
if (!packet_buffer_.InsertPacket(&packet)) {
RequestKeyFrame();
}
- return 0;
}
void RtpVideoStreamReceiver::OnRecoveredPacket(const uint8_t* rtp_packet,
@@ -681,87 +749,9 @@
return;
}
- RTPHeader rtp_header;
- packet.GetHeader(&rtp_header);
- RTPVideoHeader video_header = parsed_payload.video_header();
- video_header.rotation = kVideoRotation_0;
- video_header.content_type = VideoContentType::UNSPECIFIED;
- video_header.video_timing.flags = VideoSendTiming::kInvalid;
- video_header.is_last_packet_in_frame = rtp_header.markerBit;
- video_header.frame_marking.temporal_id = kNoTemporalIdx;
-
- if (parsed_payload.video_header().codec == kVideoCodecVP9) {
- const RTPVideoHeaderVP9& codec_header = absl::get<RTPVideoHeaderVP9>(
- parsed_payload.video_header().video_type_header);
- video_header.is_last_packet_in_frame |= codec_header.end_of_frame;
- video_header.is_first_packet_in_frame |= codec_header.beginning_of_frame;
- }
-
- packet.GetExtension<VideoOrientation>(&video_header.rotation);
- packet.GetExtension<VideoContentTypeExtension>(&video_header.content_type);
- packet.GetExtension<VideoTimingExtension>(&video_header.video_timing);
- packet.GetExtension<PlayoutDelayLimits>(&video_header.playout_delay);
- packet.GetExtension<FrameMarkingExtension>(&video_header.frame_marking);
-
- // Color space should only be transmitted in the last packet of a frame,
- // therefore, neglect it otherwise so that last_color_space_ is not reset by
- // mistake.
- if (video_header.is_last_packet_in_frame) {
- video_header.color_space = packet.GetExtension<ColorSpaceExtension>();
- if (video_header.color_space ||
- video_header.frame_type == VideoFrameType::kVideoFrameKey) {
- // Store color space since it's only transmitted when changed or for key
- // frames. Color space will be cleared if a key frame is transmitted
- // without color space information.
- last_color_space_ = video_header.color_space;
- } else if (last_color_space_) {
- video_header.color_space = last_color_space_;
- }
- }
-
- absl::optional<RtpGenericFrameDescriptor> generic_descriptor_wire;
- generic_descriptor_wire.emplace();
- const bool generic_descriptor_v00 =
- packet.GetExtension<RtpGenericFrameDescriptorExtension00>(
- &generic_descriptor_wire.value());
- const bool generic_descriptor_v01 =
- packet.GetExtension<RtpGenericFrameDescriptorExtension01>(
- &generic_descriptor_wire.value());
- if (generic_descriptor_v00 && generic_descriptor_v01) {
- RTC_LOG(LS_WARNING) << "RTP packet had two different GFD versions.";
- return;
- }
-
- if (generic_descriptor_v00 || generic_descriptor_v01) {
- if (generic_descriptor_v00) {
- generic_descriptor_wire->SetByteRepresentation(
- packet.GetRawExtension<RtpGenericFrameDescriptorExtension00>());
- } else {
- generic_descriptor_wire->SetByteRepresentation(
- packet.GetRawExtension<RtpGenericFrameDescriptorExtension01>());
- }
-
- video_header.is_first_packet_in_frame =
- generic_descriptor_wire->FirstPacketInSubFrame();
- video_header.is_last_packet_in_frame =
- rtp_header.markerBit || generic_descriptor_wire->LastPacketInSubFrame();
-
- if (generic_descriptor_wire->FirstPacketInSubFrame()) {
- video_header.frame_type =
- generic_descriptor_wire->FrameDependenciesDiffs().empty()
- ? VideoFrameType::kVideoFrameKey
- : VideoFrameType::kVideoFrameDelta;
- }
-
- video_header.width = generic_descriptor_wire->Width();
- video_header.height = generic_descriptor_wire->Height();
- } else {
- generic_descriptor_wire.reset();
- }
-
- OnReceivedPayloadData(parsed_payload.payload, parsed_payload.payload_length,
- rtp_header, video_header, generic_descriptor_wire,
- packet.recovered());
+ OnReceivedPayloadData(
+ rtc::MakeArrayView(parsed_payload.payload, parsed_payload.payload_length),
+ packet, parsed_payload.video);
}
void RtpVideoStreamReceiver::ParseAndHandleEncapsulatingHeader(
diff --git a/video/rtp_video_stream_receiver.h b/video/rtp_video_stream_receiver.h
index 1779fa6..5f14613 100644
--- a/video/rtp_video_stream_receiver.h
+++ b/video/rtp_video_stream_receiver.h
@@ -19,6 +19,7 @@
#include <vector>
#include "absl/types/optional.h"
+#include "api/array_view.h"
#include "api/crypto/frame_decryptor_interface.h"
#include "api/video/color_space.h"
#include "api/video_codecs/video_codec.h"
@@ -30,6 +31,8 @@
#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
#include "modules/rtp_rtcp/include/rtp_rtcp.h"
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
#include "modules/video_coding/h264_sps_pps_tracker.h"
#include "modules/video_coding/loss_notification_controller.h"
#include "modules/video_coding/packet_buffer.h"
@@ -109,13 +112,9 @@
// TODO(philipel): Stop using VCMPacket in the new jitter buffer and then
// remove this function. Public only for tests.
- int32_t OnReceivedPayloadData(
- const uint8_t* payload_data,
- size_t payload_size,
- const RTPHeader& rtp_header,
- const RTPVideoHeader& video_header,
- const absl::optional<RtpGenericFrameDescriptor>& generic_descriptor,
- bool is_recovered);
+ void OnReceivedPayloadData(rtc::ArrayView<const uint8_t> codec_payload,
+ const RtpPacketReceived& rtp_packet,
+ const RTPVideoHeader& video);
// Implements RecoveredPacketReceiver.
void OnRecoveredPacket(const uint8_t* packet, size_t packet_length) override;
diff --git a/video/rtp_video_stream_receiver_unittest.cc b/video/rtp_video_stream_receiver_unittest.cc
index 19a398e..d83e81e 100644
--- a/video/rtp_video_stream_receiver_unittest.cc
+++ b/video/rtp_video_stream_receiver_unittest.cc
@@ -330,10 +330,10 @@
}
TEST_F(RtpVideoStreamReceiverTest, GenericKeyFrame) {
- RTPHeader rtp_header;
+ RtpPacketReceived rtp_packet;
RTPVideoHeader video_header;
const std::vector<uint8_t> data({1, 2, 3, 4});
- rtp_header.sequenceNumber = 1;
+ rtp_packet.SetSequenceNumber(1);
video_header.is_first_packet_in_frame = true;
video_header.is_last_packet_in_frame = true;
video_header.codec = kVideoCodecGeneric;
@@ -341,8 +341,8 @@
mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
data.size());
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
- rtp_video_stream_receiver_->OnReceivedPayloadData(
- data.data(), data.size(), rtp_header, video_header, absl::nullopt, false);
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
}
TEST_F(RtpVideoStreamReceiverTest, NoInfiniteRecursionOnEncapsulatedRedPacket) {
@@ -388,10 +388,10 @@
}
TEST_F(RtpVideoStreamReceiverTest, GenericKeyFrameBitstreamError) {
- RTPHeader rtp_header;
+ RtpPacketReceived rtp_packet;
RTPVideoHeader video_header;
const std::vector<uint8_t> data({1, 2, 3, 4});
- rtp_header.sequenceNumber = 1;
+ rtp_packet.SetSequenceNumber(1);
video_header.is_first_packet_in_frame = true;
video_header.is_last_packet_in_frame = true;
video_header.codec = kVideoCodecGeneric;
@@ -401,8 +401,8 @@
expected_bitsteam, sizeof(expected_bitsteam));
EXPECT_CALL(mock_on_complete_frame_callback_,
DoOnCompleteFrameFailBitstream(_));
- rtp_video_stream_receiver_->OnReceivedPayloadData(
- data.data(), data.size(), rtp_header, video_header, absl::nullopt, false);
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
}
class RtpVideoStreamReceiverTestH264
@@ -418,38 +418,36 @@
TEST_P(RtpVideoStreamReceiverTestH264, InBandSpsPps) {
std::vector<uint8_t> sps_data;
- RTPHeader rtp_header;
+ RtpPacketReceived rtp_packet;
RTPVideoHeader sps_video_header = GetDefaultH264VideoHeader();
AddSps(&sps_video_header, 0, &sps_data);
- rtp_header.sequenceNumber = 0;
+ rtp_packet.SetSequenceNumber(0);
sps_video_header.is_first_packet_in_frame = true;
sps_video_header.frame_type = VideoFrameType::kEmptyFrame;
mock_on_complete_frame_callback_.AppendExpectedBitstream(
kH264StartCode, sizeof(kH264StartCode));
mock_on_complete_frame_callback_.AppendExpectedBitstream(sps_data.data(),
sps_data.size());
- rtp_video_stream_receiver_->OnReceivedPayloadData(
- sps_data.data(), sps_data.size(), rtp_header, sps_video_header,
- absl::nullopt, false);
+ rtp_video_stream_receiver_->OnReceivedPayloadData(sps_data, rtp_packet,
+ sps_video_header);
std::vector<uint8_t> pps_data;
RTPVideoHeader pps_video_header = GetDefaultH264VideoHeader();
AddPps(&pps_video_header, 0, 1, &pps_data);
- rtp_header.sequenceNumber = 1;
+ rtp_packet.SetSequenceNumber(1);
pps_video_header.is_first_packet_in_frame = true;
pps_video_header.frame_type = VideoFrameType::kEmptyFrame;
mock_on_complete_frame_callback_.AppendExpectedBitstream(
kH264StartCode, sizeof(kH264StartCode));
mock_on_complete_frame_callback_.AppendExpectedBitstream(pps_data.data(),
pps_data.size());
- rtp_video_stream_receiver_->OnReceivedPayloadData(
- pps_data.data(), pps_data.size(), rtp_header, pps_video_header,
- absl::nullopt, false);
+ rtp_video_stream_receiver_->OnReceivedPayloadData(pps_data, rtp_packet,
+ pps_video_header);
std::vector<uint8_t> idr_data;
RTPVideoHeader idr_video_header = GetDefaultH264VideoHeader();
AddIdr(&idr_video_header, 1);
- rtp_header.sequenceNumber = 2;
+ rtp_packet.SetSequenceNumber(2);
idr_video_header.is_first_packet_in_frame = true;
idr_video_header.is_last_packet_in_frame = true;
idr_video_header.frame_type = VideoFrameType::kVideoFrameKey;
@@ -459,9 +457,8 @@
mock_on_complete_frame_callback_.AppendExpectedBitstream(idr_data.data(),
idr_data.size());
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
- rtp_video_stream_receiver_->OnReceivedPayloadData(
- idr_data.data(), idr_data.size(), rtp_header, idr_video_header,
- absl::nullopt, false);
+ rtp_video_stream_receiver_->OnReceivedPayloadData(idr_data, rtp_packet,
+ idr_video_header);
}
TEST_P(RtpVideoStreamReceiverTestH264, OutOfBandFmtpSpsPps) {
@@ -488,11 +485,11 @@
sizeof(binary_pps));
std::vector<uint8_t> data;
- RTPHeader rtp_header;
+ RtpPacketReceived rtp_packet;
RTPVideoHeader video_header = GetDefaultH264VideoHeader();
AddIdr(&video_header, 0);
- rtp_header.payloadType = kPayloadType;
- rtp_header.sequenceNumber = 2;
+ rtp_packet.SetPayloadType(kPayloadType);
+ rtp_packet.SetSequenceNumber(2);
video_header.is_first_packet_in_frame = true;
video_header.is_last_packet_in_frame = true;
video_header.codec = kVideoCodecH264;
@@ -503,17 +500,17 @@
mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
data.size());
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
- rtp_video_stream_receiver_->OnReceivedPayloadData(
- data.data(), data.size(), rtp_header, video_header, absl::nullopt, false);
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
}
TEST_F(RtpVideoStreamReceiverTest, PaddingInMediaStream) {
- RTPHeader rtp_header;
+ RtpPacketReceived rtp_packet;
RTPVideoHeader video_header = GetDefaultH264VideoHeader();
std::vector<uint8_t> data;
data.insert(data.end(), {1, 2, 3});
- rtp_header.payloadType = 99;
- rtp_header.sequenceNumber = 2;
+ rtp_packet.SetPayloadType(99);
+ rtp_packet.SetSequenceNumber(2);
video_header.is_first_packet_in_frame = true;
video_header.is_last_packet_in_frame = true;
video_header.codec = kVideoCodecGeneric;
@@ -522,47 +519,47 @@
data.size());
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
- rtp_video_stream_receiver_->OnReceivedPayloadData(
- data.data(), data.size(), rtp_header, video_header, absl::nullopt, false);
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
- rtp_header.sequenceNumber = 3;
- rtp_video_stream_receiver_->OnReceivedPayloadData(
- nullptr, 0, rtp_header, video_header, absl::nullopt, false);
+ rtp_packet.SetSequenceNumber(3);
+ rtp_video_stream_receiver_->OnReceivedPayloadData({}, rtp_packet,
+ video_header);
- rtp_header.sequenceNumber = 4;
+ rtp_packet.SetSequenceNumber(4);
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
video_header.frame_type = VideoFrameType::kVideoFrameDelta;
- rtp_video_stream_receiver_->OnReceivedPayloadData(
- data.data(), data.size(), rtp_header, video_header, absl::nullopt, false);
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
- rtp_header.sequenceNumber = 6;
- rtp_video_stream_receiver_->OnReceivedPayloadData(
- data.data(), data.size(), rtp_header, video_header, absl::nullopt, false);
+ rtp_packet.SetSequenceNumber(6);
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
- rtp_header.sequenceNumber = 5;
- rtp_video_stream_receiver_->OnReceivedPayloadData(
- nullptr, 0, rtp_header, video_header, absl::nullopt, false);
+ rtp_packet.SetSequenceNumber(5);
+ rtp_video_stream_receiver_->OnReceivedPayloadData({}, rtp_packet,
+ video_header);
}
TEST_F(RtpVideoStreamReceiverTest, RequestKeyframeIfFirstFrameIsDelta) {
- RTPHeader rtp_header;
+ RtpPacketReceived rtp_packet;
RTPVideoHeader video_header;
const std::vector<uint8_t> data({1, 2, 3, 4});
- rtp_header.sequenceNumber = 1;
+ rtp_packet.SetSequenceNumber(1);
video_header.is_first_packet_in_frame = true;
video_header.is_last_packet_in_frame = true;
video_header.codec = kVideoCodecGeneric;
video_header.frame_type = VideoFrameType::kVideoFrameDelta;
EXPECT_CALL(mock_key_frame_request_sender_, RequestKeyFrame());
- rtp_video_stream_receiver_->OnReceivedPayloadData(
- data.data(), data.size(), rtp_header, video_header, absl::nullopt, false);
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
}
TEST_F(RtpVideoStreamReceiverTest, RequestKeyframeWhenPacketBufferGetsFull) {
constexpr int kPacketBufferMaxSize = 2048;
- RTPHeader rtp_header;
+ RtpPacketReceived rtp_packet;
RTPVideoHeader video_header;
const std::vector<uint8_t> data({1, 2, 3, 4});
video_header.is_first_packet_in_frame = true;
@@ -571,18 +568,17 @@
video_header.codec = kVideoCodecGeneric;
video_header.frame_type = VideoFrameType::kVideoFrameDelta;
uint16_t start_sequence_number = 1234;
- rtp_header.sequenceNumber = start_sequence_number;
- while (rtp_header.sequenceNumber - start_sequence_number <
+ rtp_packet.SetSequenceNumber(start_sequence_number);
+ while (rtp_packet.SequenceNumber() - start_sequence_number <
kPacketBufferMaxSize) {
- rtp_video_stream_receiver_->OnReceivedPayloadData(data.data(), data.size(),
- rtp_header, video_header,
- absl::nullopt, false);
- rtp_header.sequenceNumber += 2;
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
+ rtp_packet.SetSequenceNumber(rtp_packet.SequenceNumber() + 2);
}
EXPECT_CALL(mock_key_frame_request_sender_, RequestKeyFrame());
- rtp_video_stream_receiver_->OnReceivedPayloadData(
- data.data(), data.size(), rtp_header, video_header, absl::nullopt, false);
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
}
TEST_F(RtpVideoStreamReceiverTest, SecondarySinksGetRtpNotifications) {