Add support for setting CSRCs on audio and video senders
This is a modified version of
https://webrtc-review.googlesource.com/c/src/+/392940 to avoid breaking
downstream dependencies.
With this change, CSRCs can be added to video packets sent via
RTPSenderVideo::SendEncodedImage. This is implemented by keeping a list
of CSRCs in the calling class RtpVideoSender, which is included in all
calls to SendEncodedImage. Similarly, a list of CSRCs for audio packets
is kept on ChannelSend and added to calls to SendRtpAudio. CSRCs are
also propagated to frame transformers for audio and video.
Bug: b/410811496
Change-Id: I728934f8c190120211672e2d6dc5940bc8f83838
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/395301
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Reviewed-by: Jonas Oreland <jonaso@webrtc.org>
Commit-Queue: Helmer Nylén <helmern@google.com>
Cr-Commit-Position: refs/heads/main@{#44907}
diff --git a/audio/channel_send.cc b/audio/channel_send.cc
index 2077d97..06ebdd2 100644
--- a/audio/channel_send.cc
+++ b/audio/channel_send.cc
@@ -163,6 +163,9 @@
// Muting, Volume and Level.
void SetInputMute(bool enable) override;
+ // CSRCs.
+ void SetCsrcs(ArrayView<const uint32_t> csrcs) override;
+
// Stats.
ANAStats GetANAStatistics() const override;
@@ -320,6 +323,8 @@
mutable Mutex bitrate_accountant_mutex_;
AudioBitrateAccountant bitrate_accountant_
RTC_GUARDED_BY(bitrate_accountant_mutex_);
+
+ std::vector<uint32_t> csrcs_ RTC_GUARDED_BY(encoder_queue_checker_);
};
const int kTelephoneEventAttenuationdB = 10;
@@ -393,12 +398,11 @@
frame_transformer_delegate_->Transform(
frameType, payloadType, rtp_timestamp + rtp_rtcp_->StartTimestamp(),
payloadData, payloadSize, absolute_capture_timestamp_ms,
- rtp_rtcp_->SSRC(), mime_type.str(), audio_level_dbov);
+ rtp_rtcp_->SSRC(), mime_type.str(), audio_level_dbov, csrcs_);
return 0;
}
return SendRtpAudio(frameType, payloadType, rtp_timestamp, payload,
- absolute_capture_timestamp_ms, /*csrcs=*/{},
- audio_level_dbov);
+ absolute_capture_timestamp_ms, csrcs_, audio_level_dbov);
}
int32_t ChannelSend::SendRtpAudio(AudioFrameType frameType,
@@ -694,6 +698,17 @@
return input_mute_;
}
+void ChannelSend::SetCsrcs(ArrayView<const uint32_t> csrcs) {
+ RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+ std::vector<uint32_t> csrcs_copy(
+ csrcs.begin(),
+ csrcs.begin() + std::min<size_t>(csrcs.size(), kRtpCsrcSize));
+ encoder_queue_->PostTask([this, csrcs = std::move(csrcs_copy)]() mutable {
+ RTC_DCHECK_RUN_ON(&encoder_queue_checker_);
+ csrcs_ = csrcs;
+ });
+}
+
bool ChannelSend::SendTelephoneEventOutband(int event, int duration_ms) {
RTC_DCHECK_RUN_ON(&worker_thread_checker_);
RTC_DCHECK_LE(0, event);
diff --git a/audio/channel_send.h b/audio/channel_send.h
index 8991fc0..0ed544a 100644
--- a/audio/channel_send.h
+++ b/audio/channel_send.h
@@ -18,6 +18,7 @@
#include <vector>
#include "absl/strings/string_view.h"
+#include "api/array_view.h"
#include "api/audio/audio_frame.h"
#include "api/audio_codecs/audio_encoder.h"
#include "api/audio_codecs/audio_format.h"
@@ -90,6 +91,10 @@
virtual void OnBitrateAllocation(BitrateAllocationUpdate update) = 0;
virtual int GetTargetBitrate() const = 0;
virtual void SetInputMute(bool muted) = 0;
+ // Sets the list of CSRCs to be included in the RTP header. If more than
+ // kRtpCsrcSize CSRCs are provided, only the first kRtpCsrcSize elements are
+ // kept.
+ virtual void SetCsrcs(ArrayView<const uint32_t> csrcs) = 0;
virtual void ProcessAndEncodeAudio(
std::unique_ptr<AudioFrame> audio_frame) = 0;
diff --git a/audio/channel_send_frame_transformer_delegate.cc b/audio/channel_send_frame_transformer_delegate.cc
index f1a8b4e..d82b0f5 100644
--- a/audio/channel_send_frame_transformer_delegate.cc
+++ b/audio/channel_send_frame_transformer_delegate.cc
@@ -10,11 +10,25 @@
#include "audio/channel_send_frame_transformer_delegate.h"
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <optional>
+#include <string>
#include <utility>
#include <vector>
+#include "api/array_view.h"
+#include "api/frame_transformer_interface.h"
+#include "api/scoped_refptr.h"
+#include "api/sequence_checker.h"
+#include "api/task_queue/task_queue_base.h"
#include "api/units/time_delta.h"
#include "api/units/timestamp.h"
+#include "modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "rtc_base/buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/synchronization/mutex.h"
namespace webrtc {
namespace {
@@ -183,14 +197,15 @@
size_t payload_size,
int64_t absolute_capture_timestamp_ms,
uint32_t ssrc,
- const std::string& codec_mimetype,
- std::optional<uint8_t> audio_level_dbov) {
+ const std::string& codec_mime_type,
+ std::optional<uint8_t> audio_level_dbov,
+ const std::vector<uint32_t>& csrcs) {
{
MutexLock lock(&send_lock_);
if (short_circuit_) {
send_frame_callback_(frame_type, payload_type, rtp_timestamp,
ArrayView<const uint8_t>(payload_data, payload_size),
- absolute_capture_timestamp_ms, /*csrcs=*/{},
+ absolute_capture_timestamp_ms, csrcs,
audio_level_dbov);
return;
}
@@ -198,8 +213,7 @@
frame_transformer_->Transform(
std::make_unique<TransformableOutgoingAudioFrame>(
frame_type, payload_type, rtp_timestamp, payload_data, payload_size,
- absolute_capture_timestamp_ms, ssrc,
- /*csrcs=*/std::vector<uint32_t>(), codec_mimetype,
+ absolute_capture_timestamp_ms, ssrc, csrcs, codec_mime_type,
/*sequence_number=*/std::nullopt, audio_level_dbov));
}
diff --git a/audio/channel_send_frame_transformer_delegate.h b/audio/channel_send_frame_transformer_delegate.h
index eae9cac..f1a6d20 100644
--- a/audio/channel_send_frame_transformer_delegate.h
+++ b/audio/channel_send_frame_transformer_delegate.h
@@ -11,15 +11,21 @@
#ifndef AUDIO_CHANNEL_SEND_FRAME_TRANSFORMER_DELEGATE_H_
#define AUDIO_CHANNEL_SEND_FRAME_TRANSFORMER_DELEGATE_H_
+#include <cstddef>
+#include <cstdint>
+#include <functional>
#include <memory>
+#include <optional>
#include <string>
+#include <vector>
+#include "api/array_view.h"
#include "api/frame_transformer_interface.h"
-#include "api/sequence_checker.h"
+#include "api/scoped_refptr.h"
#include "api/task_queue/task_queue_base.h"
#include "modules/audio_coding/include/audio_coding_module_typedefs.h"
-#include "rtc_base/buffer.h"
#include "rtc_base/synchronization/mutex.h"
+#include "rtc_base/thread_annotations.h"
namespace webrtc {
@@ -62,7 +68,8 @@
int64_t absolute_capture_timestamp_ms,
uint32_t ssrc,
const std::string& codec_mime_type,
- std::optional<uint8_t> audio_level_dbov);
+ std::optional<uint8_t> audio_level_dbov,
+ const std::vector<uint32_t>& csrcs = {});
// Implements TransformedFrameCallback. Can be called on any thread.
void OnTransformedFrame(
diff --git a/audio/channel_send_frame_transformer_delegate_unittest.cc b/audio/channel_send_frame_transformer_delegate_unittest.cc
index 2eac45c..f0d498b 100644
--- a/audio/channel_send_frame_transformer_delegate_unittest.cc
+++ b/audio/channel_send_frame_transformer_delegate_unittest.cc
@@ -23,6 +23,7 @@
#include "api/scoped_refptr.h"
#include "api/test/mock_frame_transformer.h"
#include "api/test/mock_transformable_audio_frame.h"
+#include "api/units/timestamp.h"
#include "modules/audio_coding/include/audio_coding_module_typedefs.h"
#include "rtc_base/task_queue_for_test.h"
#include "test/gmock.h"
@@ -200,6 +201,59 @@
channel_queue.WaitForPreviouslyPostedTasks();
}
+// Test that CSRCs are propagated correctly from the Transform call to the frame
+// transformer.
+TEST(ChannelSendFrameTransformerDelegateTest,
+ TransformForwardsCsrcsViaFrameTransformer) {
+ TaskQueueForTest channel_queue("channel_queue");
+ scoped_refptr<MockFrameTransformer> mock_frame_transformer =
+ make_ref_counted<NiceMock<MockFrameTransformer>>();
+ MockChannelSend mock_channel;
+ scoped_refptr<ChannelSendFrameTransformerDelegate> delegate =
+ make_ref_counted<ChannelSendFrameTransformerDelegate>(
+ mock_channel.callback(), mock_frame_transformer, channel_queue.Get());
+ scoped_refptr<TransformedFrameCallback> callback;
+ EXPECT_CALL(*mock_frame_transformer, RegisterTransformedFrameCallback)
+ .WillOnce(SaveArg<0>(&callback));
+ delegate->Init();
+ ASSERT_TRUE(callback);
+
+ std::vector<uint32_t> csrcs = {123, 234, 345, 456};
+ EXPECT_CALL(mock_channel,
+ SendFrame(_, _, _, _, _, ElementsAreArray(csrcs), _));
+ ON_CALL(*mock_frame_transformer, Transform)
+ .WillByDefault(
+ [&callback](std::unique_ptr<TransformableFrameInterface> frame) {
+ callback->OnTransformedFrame(std::move(frame));
+ });
+ delegate->Transform(
+ AudioFrameType::kEmptyFrame, 0, 0, mock_data, sizeof(mock_data), 0,
+ /*ssrc=*/0, /*mimeType=*/"audio/opus", /*audio_level_dbov=*/31, csrcs);
+ channel_queue.WaitForPreviouslyPostedTasks();
+}
+
+// Test that CSRCs are propagated correctly from the Transform call to the send
+// frame callback when short circuiting is enabled.
+TEST(ChannelSendFrameTransformerDelegateTest,
+ TransformForwardsCsrcsViaShortCircuiting) {
+ TaskQueueForTest channel_queue("channel_queue");
+ scoped_refptr<MockFrameTransformer> mock_frame_transformer =
+ make_ref_counted<testing::NiceMock<MockFrameTransformer>>();
+ MockChannelSend mock_channel;
+ scoped_refptr<ChannelSendFrameTransformerDelegate> delegate =
+ make_ref_counted<ChannelSendFrameTransformerDelegate>(
+ mock_channel.callback(), mock_frame_transformer, channel_queue.Get());
+
+ std::vector<uint32_t> csrcs = {123, 234, 345, 456};
+ delegate->StartShortCircuiting();
+ EXPECT_CALL(mock_channel,
+ SendFrame(_, _, _, _, _, ElementsAreArray(csrcs), _));
+ delegate->Transform(
+ AudioFrameType::kEmptyFrame, 0, 0, mock_data, sizeof(mock_data), 0,
+ /*ssrc=*/0, /*mimeType=*/"audio/opus", /*audio_level_dbov=*/31, csrcs);
+ channel_queue.WaitForPreviouslyPostedTasks();
+}
+
// Test that if the delegate receives a transformed frame after it has been
// reset, it does not run the SendFrameCallback, as the channel is destroyed
// after resetting the delegate.
diff --git a/audio/channel_send_unittest.cc b/audio/channel_send_unittest.cc
index 7636ee3..2e8bfb6 100644
--- a/audio/channel_send_unittest.cc
+++ b/audio/channel_send_unittest.cc
@@ -409,6 +409,117 @@
ProcessNextFrame();
}
+TEST_F(ChannelSendTest, ConfiguredCsrcsAreIncludedInRtpPackets) {
+ channel_->StartSend();
+ std::vector<uint32_t> expected_csrcs = {1, 2, 3};
+ channel_->SetCsrcs(expected_csrcs);
+
+ std::vector<uint32_t> csrcs;
+ auto send_rtp = [&](ArrayView<const uint8_t> data,
+ const PacketOptions& /* options */) {
+ RtpPacketReceived packet;
+ packet.Parse(data);
+ csrcs = packet.Csrcs();
+ return true;
+ };
+
+ EXPECT_CALL(transport_, SendRtp).WillRepeatedly(Invoke(send_rtp));
+ ProcessNextFrame();
+ ProcessNextFrame();
+
+ EXPECT_EQ(csrcs, expected_csrcs);
+}
+
+// Creates a frame with the given CSRCs where other values are copied from the
+// template.
+std::unique_ptr<TransformableAudioFrameInterface> CreateMockFrameWithCsrcs(
+ const TransformableAudioFrameInterface* frame_template,
+ const std::vector<uint32_t>& csrcs) {
+ std::unique_ptr<MockTransformableAudioFrame> mock_frame =
+ std::make_unique<MockTransformableAudioFrame>();
+ EXPECT_CALL(*mock_frame, GetContributingSources)
+ .WillRepeatedly(Return(csrcs));
+
+ std::vector<uint8_t> frame_data = std::vector(
+ frame_template->GetData().begin(), frame_template->GetData().end());
+ ON_CALL(*mock_frame, GetData).WillByDefault(Return(frame_data));
+
+ ON_CALL(*mock_frame, GetTimestamp)
+ .WillByDefault(Return(frame_template->GetTimestamp()));
+ ON_CALL(*mock_frame, GetPayloadType)
+ .WillByDefault(Return(frame_template->GetPayloadType()));
+ ON_CALL(*mock_frame, GetSsrc)
+ .WillByDefault(Return(frame_template->GetSsrc()));
+ ON_CALL(*mock_frame, GetMimeType)
+ .WillByDefault(Return(frame_template->GetMimeType()));
+ ON_CALL(*mock_frame, SequenceNumber)
+ .WillByDefault(Return(frame_template->SequenceNumber()));
+ ON_CALL(*mock_frame, GetDirection)
+ .WillByDefault(Return(frame_template->GetDirection()));
+ ON_CALL(*mock_frame, AbsoluteCaptureTimestamp)
+ .WillByDefault(Return(frame_template->AbsoluteCaptureTimestamp()));
+ ON_CALL(*mock_frame, Type).WillByDefault(Return(frame_template->Type()));
+ ON_CALL(*mock_frame, AudioLevel)
+ .WillByDefault(Return(frame_template->AudioLevel()));
+ ON_CALL(*mock_frame, ReceiveTime)
+ .WillByDefault(Return(frame_template->ReceiveTime()));
+ ON_CALL(*mock_frame, CaptureTime)
+ .WillByDefault(Return(frame_template->CaptureTime()));
+ ON_CALL(*mock_frame, SenderCaptureTimeOffset)
+ .WillByDefault(Return(frame_template->SenderCaptureTimeOffset()));
+ return mock_frame;
+}
+
+TEST_F(ChannelSendTest, FrameTransformerTakesPrecedenceOverSetCsrcs) {
+ scoped_refptr<MockFrameTransformer> mock_frame_transformer =
+ make_ref_counted<MockFrameTransformer>();
+ scoped_refptr<TransformedFrameCallback> callback;
+ EXPECT_CALL(*mock_frame_transformer, RegisterTransformedFrameCallback)
+ .WillOnce(SaveArg<0>(&callback));
+ EXPECT_CALL(*mock_frame_transformer, UnregisterTransformedFrameCallback);
+ channel_->SetEncoderToPacketizerFrameTransformer(mock_frame_transformer);
+
+ // Configure the mock frame transformer to return a frame with different CSRCs
+ // than it is provided.
+ std::vector<uint32_t> csrcs_provided_to_frame_transformer;
+ std::vector<uint32_t> csrcs_output_by_frame_transformer = {1, 2, 3};
+ EXPECT_CALL(*mock_frame_transformer, Transform)
+ .WillRepeatedly(
+ Invoke([&](std::unique_ptr<TransformableFrameInterface> frame) {
+ auto audio_frame =
+ static_cast<TransformableAudioFrameInterface*>(frame.get());
+ csrcs_provided_to_frame_transformer.assign(
+ audio_frame->GetContributingSources().begin(),
+ audio_frame->GetContributingSources().end());
+ callback->OnTransformedFrame(CreateMockFrameWithCsrcs(
+ audio_frame, csrcs_output_by_frame_transformer));
+ }));
+
+ std::vector<uint32_t> set_csrcs = {4, 5, 6};
+ channel_->SetCsrcs(set_csrcs);
+ channel_->StartSend();
+
+ std::vector<uint32_t> sent_csrcs;
+ auto send_rtp = [&](ArrayView<const uint8_t> data,
+ const PacketOptions& /* options */) {
+ RtpPacketReceived packet;
+ packet.Parse(data);
+ sent_csrcs = packet.Csrcs();
+ return true;
+ };
+
+ EXPECT_CALL(transport_, SendRtp).WillRepeatedly(Invoke(send_rtp));
+ ProcessNextFrame();
+ ProcessNextFrame();
+
+ EXPECT_EQ(csrcs_provided_to_frame_transformer, set_csrcs)
+ << "The CSRCs configured in ChannelSend should be passed to the frame "
+ "transformer.";
+ EXPECT_EQ(sent_csrcs, csrcs_output_by_frame_transformer)
+ << "CSRCs provided by the frame transformer should propagate to the RTP "
+ "packet.";
+}
+
} // namespace
} // namespace voe
} // namespace webrtc
diff --git a/audio/mock_voe_channel_proxy.h b/audio/mock_voe_channel_proxy.h
index 0038f3a..cd2927d 100644
--- a/audio/mock_voe_channel_proxy.h
+++ b/audio/mock_voe_channel_proxy.h
@@ -20,6 +20,7 @@
#include <vector>
#include "absl/strings/string_view.h"
+#include "api/array_view.h"
#include "api/audio/audio_frame.h"
#include "api/audio/audio_mixer.h"
#include "api/audio_codecs/audio_encoder.h"
@@ -200,6 +201,7 @@
RegisterPacketOverhead,
(int packet_byte_overhead),
(override));
+ MOCK_METHOD(void, SetCsrcs, (ArrayView<const uint32_t> csrcs), (override));
};
} // namespace test
} // namespace webrtc
diff --git a/call/BUILD.gn b/call/BUILD.gn
index aa7c506..0e553ea 100644
--- a/call/BUILD.gn
+++ b/call/BUILD.gn
@@ -401,6 +401,7 @@
]
deps = [
":rtp_interfaces",
+ "../api:array_view",
"../api:frame_transformer_interface",
"../api:rtp_parameters",
"../api:rtp_sender_interface",
diff --git a/call/rtp_video_sender.cc b/call/rtp_video_sender.cc
index 4943443..dfd9566 100644
--- a/call/rtp_video_sender.cc
+++ b/call/rtp_video_sender.cc
@@ -625,7 +625,7 @@
encoded_image,
params_[simulcast_index].GetRtpVideoHeader(
encoded_image, codec_specific_info, frame_id),
- expected_retransmission_time);
+ expected_retransmission_time, csrcs_);
if (frame_count_observer_) {
FrameCounts& counts = frame_counts_[simulcast_index];
if (encoded_image._frameType == VideoFrameType::kVideoFrameKey) {
@@ -1031,6 +1031,12 @@
rtp_config_.max_packet_size);
}
+void RtpVideoSender::SetCsrcs(ArrayView<const uint32_t> csrcs) {
+ MutexLock lock(&mutex_);
+ csrcs_.assign(csrcs.begin(),
+ csrcs.begin() + std::min<size_t>(csrcs.size(), kRtpCsrcSize));
+}
+
DataRate RtpVideoSender::CalculateOverheadRate(DataRate data_rate,
DataSize packet_size,
DataSize overhead_per_packet,
diff --git a/call/rtp_video_sender.h b/call/rtp_video_sender.h
index 6fce841..86a5338 100644
--- a/call/rtp_video_sender.h
+++ b/call/rtp_video_sender.h
@@ -152,6 +152,12 @@
void SetEncodingData(size_t width, size_t height, size_t num_temporal_layers)
RTC_LOCKS_EXCLUDED(mutex_) override;
+ // Sets the list of CSRCs to be included in every packet. If more than
+ // kRtpCsrcSize CSRCs are provided, only the first kRtpCsrcSize elements are
+ // kept.
+ void SetCsrcs(ArrayView<const uint32_t> csrcs)
+ RTC_LOCKS_EXCLUDED(mutex_) override;
+
std::vector<RtpSequenceNumberMap::Info> GetSentRtpPacketInfos(
uint32_t ssrc,
ArrayView<const uint16_t> sequence_numbers) const
@@ -201,6 +207,9 @@
const RtpConfig rtp_config_;
RtpTransportControllerSendInterface* const transport_;
+ // The list of CSRCs to be included when sending an encoded image.
+ std::vector<uint32_t> csrcs_ RTC_GUARDED_BY(mutex_);
+
// When using the generic descriptor we want all simulcast streams to share
// one frame id space (so that the SFU can switch stream without having to
// rewrite the frame id), therefore `shared_frame_id` has to live in a place
diff --git a/call/rtp_video_sender_interface.h b/call/rtp_video_sender_interface.h
index 069a2a8..70c0800 100644
--- a/call/rtp_video_sender_interface.h
+++ b/call/rtp_video_sender_interface.h
@@ -55,6 +55,7 @@
virtual void SetEncodingData(size_t width,
size_t height,
size_t num_temporal_layers) = 0;
+ virtual void SetCsrcs(ArrayView<const uint32_t> csrcs) = 0;
virtual std::vector<RtpSequenceNumberMap::Info> GetSentRtpPacketInfos(
uint32_t ssrc,
ArrayView<const uint16_t> sequence_numbers) const = 0;
diff --git a/media/BUILD.gn b/media/BUILD.gn
index f4a761a..7f92e45 100644
--- a/media/BUILD.gn
+++ b/media/BUILD.gn
@@ -806,6 +806,7 @@
":rtp_utils",
":stream_params",
":video_common",
+ "../api:array_view",
"../api:audio_options_api",
"../api:call_api",
"../api:fec_controller_api",
diff --git a/modules/rtp_rtcp/source/rtp_sender_video.cc b/modules/rtp_rtcp/source/rtp_sender_video.cc
index 96662d1..8503b0d 100644
--- a/modules/rtp_rtcp/source/rtp_sender_video.cc
+++ b/modules/rtp_rtcp/source/rtp_sender_video.cc
@@ -804,17 +804,18 @@
uint32_t rtp_timestamp,
const EncodedImage& encoded_image,
RTPVideoHeader video_header,
- TimeDelta expected_retransmission_time) {
+ TimeDelta expected_retransmission_time,
+ const std::vector<uint32_t>& csrcs) {
if (frame_transformer_delegate_) {
// The frame will be sent async once transformed.
return frame_transformer_delegate_->TransformFrame(
payload_type, codec_type, rtp_timestamp, encoded_image, video_header,
- expected_retransmission_time);
+ expected_retransmission_time, csrcs);
}
return SendVideo(payload_type, codec_type, rtp_timestamp,
encoded_image.CaptureTime(), encoded_image,
encoded_image.size(), video_header,
- expected_retransmission_time, /*csrcs=*/{});
+ expected_retransmission_time, csrcs);
}
DataRate RTPSenderVideo::PostEncodeOverhead() const {
diff --git a/modules/rtp_rtcp/source/rtp_sender_video.h b/modules/rtp_rtcp/source/rtp_sender_video.h
index 86943db..8e30021 100644
--- a/modules/rtp_rtcp/source/rtp_sender_video.h
+++ b/modules/rtp_rtcp/source/rtp_sender_video.h
@@ -116,7 +116,8 @@
uint32_t rtp_timestamp,
const EncodedImage& encoded_image,
RTPVideoHeader video_header,
- TimeDelta expected_retransmission_time);
+ TimeDelta expected_retransmission_time,
+ const std::vector<uint32_t>& csrcs = {});
// Configures video structures produced by encoder to send using the
// dependency descriptor rtp header extension. Next call to SendVideo should
diff --git a/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.cc b/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.cc
index 2d08bd6..7f9a80e 100644
--- a/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.cc
+++ b/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.cc
@@ -180,22 +180,21 @@
uint32_t rtp_timestamp,
const EncodedImage& encoded_image,
RTPVideoHeader video_header,
- TimeDelta expected_retransmission_time) {
+ TimeDelta expected_retransmission_time,
+ const std::vector<uint32_t>& csrcs) {
{
MutexLock lock(&sender_lock_);
if (short_circuit_) {
sender_->SendVideo(payload_type, codec_type, rtp_timestamp,
encoded_image.CaptureTime(),
*encoded_image.GetEncodedData(), encoded_image.size(),
- video_header, expected_retransmission_time,
- /*csrcs=*/{});
+ video_header, expected_retransmission_time, csrcs);
return true;
}
}
frame_transformer_->Transform(std::make_unique<TransformableVideoSenderFrame>(
encoded_image, video_header, payload_type, codec_type, rtp_timestamp,
- expected_retransmission_time, ssrc_,
- /*csrcs=*/std::vector<uint32_t>()));
+ expected_retransmission_time, ssrc_, csrcs));
return true;
}
diff --git a/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.h b/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.h
index 4302fb7..2e84e1f 100644
--- a/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.h
+++ b/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.h
@@ -77,7 +77,8 @@
uint32_t rtp_timestamp,
const EncodedImage& encoded_image,
RTPVideoHeader video_header,
- TimeDelta expected_retransmission_time);
+ TimeDelta expected_retransmission_time,
+ const std::vector<uint32_t>& csrcs = {});
// Implements TransformedFrameCallback. Can be called on any thread. Posts
// the transformed frame to be sent on the `encoder_queue_`.
diff --git a/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate_unittest.cc b/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate_unittest.cc
index af3083b..48c6d09 100644
--- a/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate_unittest.cc
+++ b/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate_unittest.cc
@@ -42,6 +42,7 @@
namespace {
using ::testing::_;
+using ::testing::ElementsAreArray;
using ::testing::NiceMock;
using ::testing::Return;
using ::testing::SaveArg;
@@ -136,13 +137,22 @@
auto delegate = make_ref_counted<RTPSenderVideoFrameTransformerDelegate>(
&test_sender_, frame_transformer_,
/*ssrc=*/1111, time_controller_.CreateTaskQueueFactory().get());
+ VideoFrameMetadata metadata;
+ EXPECT_CALL(*frame_transformer_, Transform)
+ .WillOnce([&](std::unique_ptr<TransformableFrameInterface> frame) {
+ metadata = static_cast<TransformableVideoFrameInterface*>(frame.get())
+ ->Metadata();
+ });
+ std::vector<uint32_t> csrcs = {1, 2, 3};
EncodedImage encoded_image;
- EXPECT_CALL(*frame_transformer_, Transform);
delegate->TransformFrame(
/*payload_type=*/1, VideoCodecType::kVideoCodecVP8, /*rtp_timestamp=*/2,
encoded_image, RTPVideoHeader(),
- /*expected_retransmission_time=*/TimeDelta::Millis(10));
+ /*expected_retransmission_time=*/TimeDelta::Millis(10), csrcs);
+
+ EXPECT_EQ(metadata.GetSsrc(), 1111U);
+ EXPECT_THAT(metadata.GetCsrcs(), ElementsAreArray(csrcs));
}
TEST_F(RtpSenderVideoFrameTransformerDelegateTest,
@@ -319,17 +329,19 @@
delegate->StartShortCircuiting();
+ std::vector<uint32_t> csrcs = {1, 2, 3};
// Will not call the actual transformer.
EXPECT_CALL(*frame_transformer_, Transform).Times(0);
// Will pass the frame straight to the reciever.
- EXPECT_CALL(test_sender_, SendVideo);
+ EXPECT_CALL(test_sender_,
+ SendVideo(_, _, _, _, _, _, _, _, ElementsAreArray(csrcs)));
EncodedImage encoded_image;
encoded_image.SetEncodedData(EncodedImageBuffer::Create(1));
delegate->TransformFrame(
/*payload_type=*/1, VideoCodecType::kVideoCodecVP8, /*rtp_timestamp=*/2,
encoded_image, RTPVideoHeader(),
- /*expected_retransmission_time=*/TimeDelta::Millis(10));
+ /*expected_retransmission_time=*/TimeDelta::Millis(10), csrcs);
}
} // namespace
diff --git a/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc b/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc
index 479527e..c279ee5 100644
--- a/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc
+++ b/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc
@@ -1617,6 +1617,55 @@
kDefaultExpectedRetransmissionTime);
}
+TEST_F(RtpSenderVideoTest, SendEncodedImageIncludesProvidedCsrcs) {
+ std::vector<uint32_t> expected_csrcs = {1, 2, 3};
+ std::unique_ptr<EncodedImage> encoded_image = CreateDefaultEncodedImage();
+ RTPVideoHeader video_header;
+ video_header.frame_type = VideoFrameType::kVideoFrameKey;
+
+ ASSERT_TRUE(rtp_sender_video_->SendEncodedImage(
+ 0, kType, kTimestamp, *encoded_image, video_header,
+ kDefaultExpectedRetransmissionTime, expected_csrcs));
+
+ ASSERT_GT(transport_.packets_sent(), 0);
+ std::vector<uint32_t> csrcs = transport_.last_sent_packet().Csrcs();
+ EXPECT_EQ(csrcs, expected_csrcs);
+}
+
+TEST_F(RtpSenderVideoWithFrameTransformerTest,
+ SendEncodedImageIncludesProvidedCsrcs) {
+ auto mock_frame_transformer =
+ make_ref_counted<NiceMock<MockFrameTransformer>>();
+ scoped_refptr<TransformedFrameCallback> callback;
+ EXPECT_CALL(*mock_frame_transformer, RegisterTransformedFrameSinkCallback)
+ .WillOnce(SaveArg<0>(&callback));
+ std::unique_ptr<RTPSenderVideo> rtp_sender_video =
+ CreateSenderWithFrameTransformer(mock_frame_transformer);
+ ASSERT_TRUE(callback);
+ ON_CALL(*mock_frame_transformer, Transform)
+ .WillByDefault(
+ [&callback](std::unique_ptr<TransformableFrameInterface> frame) {
+ callback->OnTransformedFrame(std::move(frame));
+ });
+
+ auto encoded_image = CreateDefaultEncodedImage();
+ std::vector<uint32_t> expected_csrcs = {1, 2, 3};
+ RTPVideoHeader video_header;
+ video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ auto encoder_queue = time_controller_.GetTaskQueueFactory()->CreateTaskQueue(
+ "encoder_queue", TaskQueueFactory::Priority::NORMAL);
+ encoder_queue->PostTask([&] {
+ rtp_sender_video->SendEncodedImage(
+ kPayloadType, kType, kTimestamp, *encoded_image, video_header,
+ kDefaultExpectedRetransmissionTime, expected_csrcs);
+ });
+ time_controller_.AdvanceTime(TimeDelta::Zero());
+
+ ASSERT_GT(transport_.packets_sent(), 0);
+ std::vector<uint32_t> csrcs = transport_.last_sent_packet().Csrcs();
+ EXPECT_EQ(csrcs, expected_csrcs);
+}
+
#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
TEST_F(RtpSenderVideoWithFrameTransformerTest, ValidPayloadTypes) {
auto mock_frame_transformer =
diff --git a/video/video_send_stream_impl_unittest.cc b/video/video_send_stream_impl_unittest.cc
index f0023f1..00b45f4 100644
--- a/video/video_send_stream_impl_unittest.cc
+++ b/video/video_send_stream_impl_unittest.cc
@@ -124,6 +124,7 @@
MOCK_METHOD(uint32_t, GetPayloadBitrateBps, (), (const, override));
MOCK_METHOD(uint32_t, GetProtectionBitrateBps, (), (const, override));
MOCK_METHOD(void, SetEncodingData, (size_t, size_t, size_t), (override));
+ MOCK_METHOD(void, SetCsrcs, (ArrayView<const uint32_t> csrcs), (override));
MOCK_METHOD(std::vector<RtpSequenceNumberMap::Info>,
GetSentRtpPacketInfos,
(uint32_t ssrc, ArrayView<const uint16_t> sequence_numbers),