Reland of Deprecate VCMPacketizationCallback::SendData and use EncodedImageCallback instead. (patchset #1 id:1 of https://codereview.webrtc.org/1903193002/ )
Reason for revert:
A fix is being prepared downstream so this can now go in.
Original issue's description:
> Revert of Deprecate VCMPacketizationCallback::SendData and use EncodedImageCallback instead. (patchset #5 id:80001 of https://codereview.webrtc.org/1897233002/ )
>
> Reason for revert:
> API changes broke downstream.
>
> Original issue's description:
> > Deprecate VCMPacketizationCallback::SendData and use EncodedImageCallback instead.
> > EncodedImageCallback is used by all encoder implementations and seems to be what we should try to use in the transport.
> > EncodedImageCallback can of course be cleaned up in the future.
> >
> > This moves creation of RTPVideoHeader from the GenericEncoder to the PayLoadRouter.
> >
> > BUG=webrtc::5687
> >
> > Committed: https://crrev.com/f5d55aaecdc39e9cc66eb6e87614f04afe28f6eb
> > Cr-Commit-Position: refs/heads/master@{#12436}
>
> TBR=stefan@webrtc.org,pbos@webrtc.org,perkj@webrtc.org
> # Skipping CQ checks because original CL landed less than 1 days ago.
> NOPRESUBMIT=true
> NOTREECHECKS=true
> NOTRY=true
> BUG=webrtc:5687
>
> Committed: https://crrev.com/a261e6136655af33f283eda8e60a6dd93dd746a4
> Cr-Commit-Position: refs/heads/master@{#12441}
TBR=stefan@webrtc.org,pbos@webrtc.org,perkj@webrtc.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=webrtc:5687
Review URL: https://codereview.webrtc.org/1905583002
Cr-Commit-Position: refs/heads/master@{#12442}
diff --git a/webrtc/modules/video_coding/codec_database.cc b/webrtc/modules/video_coding/codec_database.cc
index a5a7c1e..1baa414 100644
--- a/webrtc/modules/video_coding/codec_database.cc
+++ b/webrtc/modules/video_coding/codec_database.cc
@@ -238,7 +238,6 @@
memcpy(&send_codec_, &new_send_codec, sizeof(send_codec_));
if (!reset_required) {
- encoded_frame_callback_->SetPayloadType(send_codec_.plType);
return true;
}
@@ -249,7 +248,6 @@
ptr_encoder_.reset(
new VCMGenericEncoder(external_encoder_, encoder_rate_observer_,
encoded_frame_callback_, internal_source_));
- encoded_frame_callback_->SetPayloadType(send_codec_.plType);
encoded_frame_callback_->SetInternalSource(internal_source_);
if (ptr_encoder_->InitEncode(&send_codec_, number_of_cores_,
max_payload_size_) < 0) {
diff --git a/webrtc/modules/video_coding/generic_encoder.cc b/webrtc/modules/video_coding/generic_encoder.cc
index 9a3d2ff..321deb0 100644
--- a/webrtc/modules/video_coding/generic_encoder.cc
+++ b/webrtc/modules/video_coding/generic_encoder.cc
@@ -21,76 +21,6 @@
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
namespace webrtc {
-namespace {
-// Map information from info into rtp. If no relevant information is found
-// in info, rtp is set to NULL.
-void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) {
- RTC_DCHECK(info);
- switch (info->codecType) {
- case kVideoCodecVP8: {
- rtp->codec = kRtpVideoVp8;
- rtp->codecHeader.VP8.InitRTPVideoHeaderVP8();
- rtp->codecHeader.VP8.pictureId = info->codecSpecific.VP8.pictureId;
- rtp->codecHeader.VP8.nonReference = info->codecSpecific.VP8.nonReference;
- rtp->codecHeader.VP8.temporalIdx = info->codecSpecific.VP8.temporalIdx;
- rtp->codecHeader.VP8.layerSync = info->codecSpecific.VP8.layerSync;
- rtp->codecHeader.VP8.tl0PicIdx = info->codecSpecific.VP8.tl0PicIdx;
- rtp->codecHeader.VP8.keyIdx = info->codecSpecific.VP8.keyIdx;
- rtp->simulcastIdx = info->codecSpecific.VP8.simulcastIdx;
- return;
- }
- case kVideoCodecVP9: {
- rtp->codec = kRtpVideoVp9;
- rtp->codecHeader.VP9.InitRTPVideoHeaderVP9();
- rtp->codecHeader.VP9.inter_pic_predicted =
- info->codecSpecific.VP9.inter_pic_predicted;
- rtp->codecHeader.VP9.flexible_mode =
- info->codecSpecific.VP9.flexible_mode;
- rtp->codecHeader.VP9.ss_data_available =
- info->codecSpecific.VP9.ss_data_available;
- rtp->codecHeader.VP9.picture_id = info->codecSpecific.VP9.picture_id;
- rtp->codecHeader.VP9.tl0_pic_idx = info->codecSpecific.VP9.tl0_pic_idx;
- rtp->codecHeader.VP9.temporal_idx = info->codecSpecific.VP9.temporal_idx;
- rtp->codecHeader.VP9.spatial_idx = info->codecSpecific.VP9.spatial_idx;
- rtp->codecHeader.VP9.temporal_up_switch =
- info->codecSpecific.VP9.temporal_up_switch;
- rtp->codecHeader.VP9.inter_layer_predicted =
- info->codecSpecific.VP9.inter_layer_predicted;
- rtp->codecHeader.VP9.gof_idx = info->codecSpecific.VP9.gof_idx;
- rtp->codecHeader.VP9.num_spatial_layers =
- info->codecSpecific.VP9.num_spatial_layers;
-
- if (info->codecSpecific.VP9.ss_data_available) {
- rtp->codecHeader.VP9.spatial_layer_resolution_present =
- info->codecSpecific.VP9.spatial_layer_resolution_present;
- if (info->codecSpecific.VP9.spatial_layer_resolution_present) {
- for (size_t i = 0; i < info->codecSpecific.VP9.num_spatial_layers;
- ++i) {
- rtp->codecHeader.VP9.width[i] = info->codecSpecific.VP9.width[i];
- rtp->codecHeader.VP9.height[i] = info->codecSpecific.VP9.height[i];
- }
- }
- rtp->codecHeader.VP9.gof.CopyGofInfoVP9(info->codecSpecific.VP9.gof);
- }
-
- rtp->codecHeader.VP9.num_ref_pics = info->codecSpecific.VP9.num_ref_pics;
- for (int i = 0; i < info->codecSpecific.VP9.num_ref_pics; ++i)
- rtp->codecHeader.VP9.pid_diff[i] = info->codecSpecific.VP9.p_diff[i];
- return;
- }
- case kVideoCodecH264:
- rtp->codec = kRtpVideoH264;
- return;
- case kVideoCodecGeneric:
- rtp->codec = kRtpVideoGeneric;
- rtp->simulcastIdx = info->codecSpecific.generic.simulcast_idx;
- return;
- default:
- return;
- }
-}
-} // namespace
-
VCMGenericEncoder::VCMGenericEncoder(
VideoEncoder* encoder,
VideoEncoderRateObserver* rate_observer,
@@ -216,7 +146,6 @@
EncodedImageCallback* post_encode_callback)
: send_callback_(),
media_opt_(nullptr),
- payload_type_(0),
internal_source_(false),
post_encode_callback_(post_encode_callback) {}
@@ -234,19 +163,8 @@
const RTPFragmentationHeader* fragmentation_header) {
TRACE_EVENT_INSTANT1("webrtc", "VCMEncodedFrameCallback::Encoded",
"timestamp", encoded_image._timeStamp);
- post_encode_callback_->Encoded(encoded_image, nullptr, nullptr);
-
- if (send_callback_ == nullptr)
- return VCM_UNINITIALIZED;
-
- RTPVideoHeader rtp_video_header;
- memset(&rtp_video_header, 0, sizeof(RTPVideoHeader));
- if (codec_specific)
- CopyCodecSpecific(codec_specific, &rtp_video_header);
- rtp_video_header.rotation = encoded_image.rotation_;
-
- int32_t ret_val = send_callback_->SendData(
- payload_type_, encoded_image, fragmentation_header, &rtp_video_header);
+ int ret_val = post_encode_callback_->Encoded(encoded_image, codec_specific,
+ fragmentation_header);
if (ret_val < 0)
return ret_val;
diff --git a/webrtc/modules/video_coding/generic_encoder.h b/webrtc/modules/video_coding/generic_encoder.h
index a39ffd7..89d07e2 100644
--- a/webrtc/modules/video_coding/generic_encoder.h
+++ b/webrtc/modules/video_coding/generic_encoder.h
@@ -44,7 +44,6 @@
const RTPFragmentationHeader* fragmentation_header) override;
int32_t SetTransportCallback(VCMPacketizationCallback* transport);
void SetMediaOpt(media_optimization::MediaOptimization* media_opt);
- void SetPayloadType(uint8_t payload_type) { payload_type_ = payload_type; }
void SetInternalSource(bool internal_source) {
internal_source_ = internal_source;
}
@@ -54,7 +53,6 @@
private:
VCMPacketizationCallback* send_callback_;
media_optimization::MediaOptimization* media_opt_;
- uint8_t payload_type_;
bool internal_source_;
EncodedImageCallback* post_encode_callback_;
diff --git a/webrtc/modules/video_coding/include/video_coding_defines.h b/webrtc/modules/video_coding/include/video_coding_defines.h
index 4fe8c79..231e24f 100644
--- a/webrtc/modules/video_coding/include/video_coding_defines.h
+++ b/webrtc/modules/video_coding/include/video_coding_defines.h
@@ -57,13 +57,11 @@
};
// Callback class used for sending data ready to be packetized
+// Deprecated.
+// TODO(perkj): Remove once OnEncoderImplementationName is not used.
class VCMPacketizationCallback {
public:
- virtual int32_t SendData(uint8_t payloadType,
- const EncodedImage& encoded_image,
- const RTPFragmentationHeader* fragmentationHeader,
- const RTPVideoHeader* rtpVideoHdr) = 0;
-
+ // TODO(perkj): Refactor this. It does not belong in VCMPacketizationCallback.
virtual void OnEncoderImplementationName(const char* implementation_name) {}
protected:
diff --git a/webrtc/modules/video_coding/utility/ivf_file_writer.cc b/webrtc/modules/video_coding/utility/ivf_file_writer.cc
index 8161a8b..a3b680d 100644
--- a/webrtc/modules/video_coding/utility/ivf_file_writer.cc
+++ b/webrtc/modules/video_coding/utility/ivf_file_writer.cc
@@ -18,7 +18,7 @@
IvfFileWriter::IvfFileWriter(const std::string& file_name,
std::unique_ptr<FileWrapper> file,
- RtpVideoCodecTypes codec_type)
+ VideoCodecType codec_type)
: codec_type_(codec_type),
num_frames_(0),
width_(0),
@@ -34,9 +34,8 @@
const size_t kIvfHeaderSize = 32;
-std::unique_ptr<IvfFileWriter> IvfFileWriter::Open(
- const std::string& file_name,
- RtpVideoCodecTypes codec_type) {
+std::unique_ptr<IvfFileWriter> IvfFileWriter::Open(const std::string& file_name,
+ VideoCodecType codec_type) {
std::unique_ptr<IvfFileWriter> file_writer;
std::unique_ptr<FileWrapper> file(FileWrapper::Create());
if (file->OpenFile(file_name.c_str(), false) != 0)
@@ -65,19 +64,19 @@
ByteWriter<uint16_t>::WriteLittleEndian(&ivf_header[6], 32); // Header size.
switch (codec_type_) {
- case kRtpVideoVp8:
+ case kVideoCodecVP8:
ivf_header[8] = 'V';
ivf_header[9] = 'P';
ivf_header[10] = '8';
ivf_header[11] = '0';
break;
- case kRtpVideoVp9:
+ case kVideoCodecVP9:
ivf_header[8] = 'V';
ivf_header[9] = 'P';
ivf_header[10] = '9';
ivf_header[11] = '0';
break;
- case kRtpVideoH264:
+ case kVideoCodecH264:
ivf_header[8] = 'H';
ivf_header[9] = '2';
ivf_header[10] = '6';
diff --git a/webrtc/modules/video_coding/utility/ivf_file_writer.h b/webrtc/modules/video_coding/utility/ivf_file_writer.h
index 61d1829..25d68a2 100644
--- a/webrtc/modules/video_coding/utility/ivf_file_writer.h
+++ b/webrtc/modules/video_coding/utility/ivf_file_writer.h
@@ -27,18 +27,18 @@
~IvfFileWriter();
static std::unique_ptr<IvfFileWriter> Open(const std::string& file_name,
- RtpVideoCodecTypes codec_type);
+ VideoCodecType codec_type);
bool WriteFrame(const EncodedImage& encoded_image);
bool Close();
private:
IvfFileWriter(const std::string& path_name,
std::unique_ptr<FileWrapper> file,
- RtpVideoCodecTypes codec_type);
+ VideoCodecType codec_type);
bool WriteHeader();
bool InitFromFirstFrame(const EncodedImage& encoded_image);
- const RtpVideoCodecTypes codec_type_;
+ const VideoCodecType codec_type_;
size_t num_frames_;
uint16_t width_;
uint16_t height_;
diff --git a/webrtc/modules/video_coding/utility/ivf_file_writer_unittest.cc b/webrtc/modules/video_coding/utility/ivf_file_writer_unittest.cc
index bdd74a4..577af41 100644
--- a/webrtc/modules/video_coding/utility/ivf_file_writer_unittest.cc
+++ b/webrtc/modules/video_coding/utility/ivf_file_writer_unittest.cc
@@ -103,7 +103,7 @@
}
}
- void RunBasicFileStructureTest(RtpVideoCodecTypes codec_type,
+ void RunBasicFileStructureTest(VideoCodecType codec_type,
const uint8_t fourcc[4],
bool use_capture_tims_ms) {
file_writer_ = IvfFileWriter::Open(file_name_, codec_type);
@@ -135,7 +135,7 @@
};
TEST_F(IvfFileWriterTest, RemovesUnusedFile) {
- file_writer_ = IvfFileWriter::Open(file_name_, kRtpVideoVp8);
+ file_writer_ = IvfFileWriter::Open(file_name_, kVideoCodecVP8);
ASSERT_TRUE(file_writer_.get() != nullptr);
EXPECT_TRUE(FileExists());
EXPECT_TRUE(file_writer_->Close());
@@ -145,32 +145,32 @@
TEST_F(IvfFileWriterTest, WritesBasicVP8FileNtpTimestamp) {
const uint8_t fourcc[4] = {'V', 'P', '8', '0'};
- RunBasicFileStructureTest(kRtpVideoVp8, fourcc, false);
+ RunBasicFileStructureTest(kVideoCodecVP8, fourcc, false);
}
TEST_F(IvfFileWriterTest, WritesBasicVP8FileMsTimestamp) {
const uint8_t fourcc[4] = {'V', 'P', '8', '0'};
- RunBasicFileStructureTest(kRtpVideoVp8, fourcc, true);
+ RunBasicFileStructureTest(kVideoCodecVP8, fourcc, true);
}
TEST_F(IvfFileWriterTest, WritesBasicVP9FileNtpTimestamp) {
const uint8_t fourcc[4] = {'V', 'P', '9', '0'};
- RunBasicFileStructureTest(kRtpVideoVp9, fourcc, false);
+ RunBasicFileStructureTest(kVideoCodecVP9, fourcc, false);
}
TEST_F(IvfFileWriterTest, WritesBasicVP9FileMsTimestamp) {
const uint8_t fourcc[4] = {'V', 'P', '9', '0'};
- RunBasicFileStructureTest(kRtpVideoVp9, fourcc, true);
+ RunBasicFileStructureTest(kVideoCodecVP9, fourcc, true);
}
TEST_F(IvfFileWriterTest, WritesBasicH264FileNtpTimestamp) {
const uint8_t fourcc[4] = {'H', '2', '6', '4'};
- RunBasicFileStructureTest(kRtpVideoH264, fourcc, false);
+ RunBasicFileStructureTest(kVideoCodecH264, fourcc, false);
}
TEST_F(IvfFileWriterTest, WritesBasicH264FileMsTimestamp) {
const uint8_t fourcc[4] = {'H', '2', '6', '4'};
- RunBasicFileStructureTest(kRtpVideoH264, fourcc, true);
+ RunBasicFileStructureTest(kVideoCodecH264, fourcc, true);
}
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/video_coding_impl.cc b/webrtc/modules/video_coding/video_coding_impl.cc
index ee03753..a9a4724 100644
--- a/webrtc/modules/video_coding/video_coding_impl.cc
+++ b/webrtc/modules/video_coding/video_coding_impl.cc
@@ -54,7 +54,6 @@
callback_ = callback;
}
- // TODO(andresp): Change to void as return value is ignored.
virtual int32_t Encoded(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) {
diff --git a/webrtc/modules/video_coding/video_coding_impl.h b/webrtc/modules/video_coding/video_coding_impl.h
index 40352b0..e9d7abc 100644
--- a/webrtc/modules/video_coding/video_coding_impl.h
+++ b/webrtc/modules/video_coding/video_coding_impl.h
@@ -79,6 +79,8 @@
uint8_t lossRate,
int64_t rtt);
+ // Deprecated. Use |post_encode_callback| instead.
+ // TODO(perkj): Remove once |OnEncoderImplementationName| is not used.
int32_t RegisterTransportCallback(VCMPacketizationCallback* transport);
int32_t RegisterSendStatisticsCallback(VCMSendStatisticsCallback* sendStats);
int32_t RegisterProtectionCallback(VCMProtectionCallback* protection);
diff --git a/webrtc/modules/video_coding/video_sender_unittest.cc b/webrtc/modules/video_coding/video_sender_unittest.cc
index e15e872..50283fc 100644
--- a/webrtc/modules/video_coding/video_sender_unittest.cc
+++ b/webrtc/modules/video_coding/video_sender_unittest.cc
@@ -86,19 +86,19 @@
std::unique_ptr<VideoFrame> frame_;
};
-class PacketizationCallback : public VCMPacketizationCallback {
+class EncodedImageCallbackImpl : public EncodedImageCallback {
public:
- explicit PacketizationCallback(Clock* clock)
+ explicit EncodedImageCallbackImpl(Clock* clock)
: clock_(clock), start_time_ms_(clock_->TimeInMilliseconds()) {}
- virtual ~PacketizationCallback() {}
+ virtual ~EncodedImageCallbackImpl() {}
- int32_t SendData(uint8_t payload_type,
- const EncodedImage& encoded_image,
- const RTPFragmentationHeader* fragmentation_header,
- const RTPVideoHeader* rtp_video_header) override {
- assert(rtp_video_header);
- frame_data_.push_back(FrameData(encoded_image._length, *rtp_video_header));
+ int32_t Encoded(const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const RTPFragmentationHeader* fragmentation) override {
+ assert(codec_specific_info);
+ frame_data_.push_back(
+ FrameData(encoded_image._length, *codec_specific_info));
return 0;
}
@@ -130,11 +130,12 @@
struct FrameData {
FrameData() {}
- FrameData(size_t payload_size, const RTPVideoHeader& rtp_video_header)
- : payload_size(payload_size), rtp_video_header(rtp_video_header) {}
+ FrameData(size_t payload_size, const CodecSpecificInfo& codec_specific_info)
+ : payload_size(payload_size),
+ codec_specific_info(codec_specific_info) {}
size_t payload_size;
- RTPVideoHeader rtp_video_header;
+ CodecSpecificInfo codec_specific_info;
};
int64_t interval_ms() {
@@ -146,9 +147,9 @@
int CountFramesWithinTemporalLayer(int temporal_layer) {
int frames = 0;
for (size_t i = 0; i < frame_data_.size(); ++i) {
- EXPECT_EQ(kRtpVideoVp8, frame_data_[i].rtp_video_header.codec);
+ EXPECT_EQ(kVideoCodecVP8, frame_data_[i].codec_specific_info.codecType);
const uint8_t temporal_idx =
- frame_data_[i].rtp_video_header.codecHeader.VP8.temporalIdx;
+ frame_data_[i].codec_specific_info.codecSpecific.VP8.temporalIdx;
if (temporal_idx <= temporal_layer || temporal_idx == kNoTemporalIdx)
frames++;
}
@@ -158,9 +159,9 @@
size_t SumPayloadBytesWithinTemporalLayer(int temporal_layer) {
size_t payload_size = 0;
for (size_t i = 0; i < frame_data_.size(); ++i) {
- EXPECT_EQ(kRtpVideoVp8, frame_data_[i].rtp_video_header.codec);
+ EXPECT_EQ(kVideoCodecVP8, frame_data_[i].codec_specific_info.codecType);
const uint8_t temporal_idx =
- frame_data_[i].rtp_video_header.codecHeader.VP8.temporalIdx;
+ frame_data_[i].codec_specific_info.codecSpecific.VP8.temporalIdx;
if (temporal_idx <= temporal_layer || temporal_idx == kNoTemporalIdx)
payload_size += frame_data_[i].payload_size;
}
@@ -176,12 +177,11 @@
protected:
// Note: simulated clock starts at 1 seconds, since parts of webrtc use 0 as
// a special case (e.g. frame rate in media optimization).
- TestVideoSender() : clock_(1000), packetization_callback_(&clock_) {}
+ TestVideoSender() : clock_(1000), encoded_frame_callback_(&clock_) {}
void SetUp() override {
sender_.reset(
- new VideoSender(&clock_, &post_encode_callback_, nullptr, nullptr));
- EXPECT_EQ(0, sender_->RegisterTransportCallback(&packetization_callback_));
+ new VideoSender(&clock_, &encoded_frame_callback_, nullptr, nullptr));
}
void AddFrame() {
@@ -190,8 +190,7 @@
}
SimulatedClock clock_;
- PacketizationCallback packetization_callback_;
- MockEncodedImageCallback post_encode_callback_;
+ EncodedImageCallbackImpl encoded_frame_callback_;
// Used by subclassing tests, need to outlive sender_.
std::unique_ptr<VideoEncoder> encoder_;
std::unique_ptr<VideoSender> sender_;
@@ -415,8 +414,6 @@
void InsertFrames(float framerate, float seconds) {
for (int i = 0; i < seconds * framerate; ++i) {
clock_.AdvanceTimeMilliseconds(1000.0f / framerate);
- EXPECT_CALL(post_encode_callback_, Encoded(_, NULL, NULL))
- .WillOnce(Return(0));
AddFrame();
// SetChannelParameters needs to be called frequently to propagate
// framerate from the media optimization into the encoder.
@@ -435,10 +432,10 @@
// It appears that this 5 seconds simulation is needed to allow
// bitrate and framerate to stabilize.
InsertFrames(framerate, short_simulation_interval);
- packetization_callback_.Reset();
+ encoded_frame_callback_.Reset();
InsertFrames(framerate, long_simulation_interval);
- return packetization_callback_.CalculateVp8StreamInfo();
+ return encoded_frame_callback_.CalculateVp8StreamInfo();
}
protected:
diff --git a/webrtc/video/encoder_state_feedback_unittest.cc b/webrtc/video/encoder_state_feedback_unittest.cc
index 3341cf0..be81bda 100644
--- a/webrtc/video/encoder_state_feedback_unittest.cc
+++ b/webrtc/video/encoder_state_feedback_unittest.cc
@@ -35,6 +35,7 @@
nullptr,
nullptr,
pacer,
+ nullptr,
nullptr) {}
~MockVieEncoder() {}
diff --git a/webrtc/video/payload_router.cc b/webrtc/video/payload_router.cc
index d466e41..abe476f 100644
--- a/webrtc/video/payload_router.cc
+++ b/webrtc/video/payload_router.cc
@@ -13,11 +13,85 @@
#include "webrtc/base/checks.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
namespace webrtc {
-PayloadRouter::PayloadRouter(const std::vector<RtpRtcp*>& rtp_modules)
- : active_(false), num_sending_modules_(1), rtp_modules_(rtp_modules) {
+namespace {
+// Map information from info into rtp.
+void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) {
+ RTC_DCHECK(info);
+ switch (info->codecType) {
+ case kVideoCodecVP8: {
+ rtp->codec = kRtpVideoVp8;
+ rtp->codecHeader.VP8.InitRTPVideoHeaderVP8();
+ rtp->codecHeader.VP8.pictureId = info->codecSpecific.VP8.pictureId;
+ rtp->codecHeader.VP8.nonReference = info->codecSpecific.VP8.nonReference;
+ rtp->codecHeader.VP8.temporalIdx = info->codecSpecific.VP8.temporalIdx;
+ rtp->codecHeader.VP8.layerSync = info->codecSpecific.VP8.layerSync;
+ rtp->codecHeader.VP8.tl0PicIdx = info->codecSpecific.VP8.tl0PicIdx;
+ rtp->codecHeader.VP8.keyIdx = info->codecSpecific.VP8.keyIdx;
+ rtp->simulcastIdx = info->codecSpecific.VP8.simulcastIdx;
+ return;
+ }
+ case kVideoCodecVP9: {
+ rtp->codec = kRtpVideoVp9;
+ rtp->codecHeader.VP9.InitRTPVideoHeaderVP9();
+ rtp->codecHeader.VP9.inter_pic_predicted =
+ info->codecSpecific.VP9.inter_pic_predicted;
+ rtp->codecHeader.VP9.flexible_mode =
+ info->codecSpecific.VP9.flexible_mode;
+ rtp->codecHeader.VP9.ss_data_available =
+ info->codecSpecific.VP9.ss_data_available;
+ rtp->codecHeader.VP9.picture_id = info->codecSpecific.VP9.picture_id;
+ rtp->codecHeader.VP9.tl0_pic_idx = info->codecSpecific.VP9.tl0_pic_idx;
+ rtp->codecHeader.VP9.temporal_idx = info->codecSpecific.VP9.temporal_idx;
+ rtp->codecHeader.VP9.spatial_idx = info->codecSpecific.VP9.spatial_idx;
+ rtp->codecHeader.VP9.temporal_up_switch =
+ info->codecSpecific.VP9.temporal_up_switch;
+ rtp->codecHeader.VP9.inter_layer_predicted =
+ info->codecSpecific.VP9.inter_layer_predicted;
+ rtp->codecHeader.VP9.gof_idx = info->codecSpecific.VP9.gof_idx;
+ rtp->codecHeader.VP9.num_spatial_layers =
+ info->codecSpecific.VP9.num_spatial_layers;
+
+ if (info->codecSpecific.VP9.ss_data_available) {
+ rtp->codecHeader.VP9.spatial_layer_resolution_present =
+ info->codecSpecific.VP9.spatial_layer_resolution_present;
+ if (info->codecSpecific.VP9.spatial_layer_resolution_present) {
+ for (size_t i = 0; i < info->codecSpecific.VP9.num_spatial_layers;
+ ++i) {
+ rtp->codecHeader.VP9.width[i] = info->codecSpecific.VP9.width[i];
+ rtp->codecHeader.VP9.height[i] = info->codecSpecific.VP9.height[i];
+ }
+ }
+ rtp->codecHeader.VP9.gof.CopyGofInfoVP9(info->codecSpecific.VP9.gof);
+ }
+
+ rtp->codecHeader.VP9.num_ref_pics = info->codecSpecific.VP9.num_ref_pics;
+ for (int i = 0; i < info->codecSpecific.VP9.num_ref_pics; ++i)
+ rtp->codecHeader.VP9.pid_diff[i] = info->codecSpecific.VP9.p_diff[i];
+ return;
+ }
+ case kVideoCodecH264:
+ rtp->codec = kRtpVideoH264;
+ return;
+ case kVideoCodecGeneric:
+ rtp->codec = kRtpVideoGeneric;
+ rtp->simulcastIdx = info->codecSpecific.generic.simulcast_idx;
+ return;
+ default:
+ return;
+ }
+}
+} // namespace
+
+PayloadRouter::PayloadRouter(const std::vector<RtpRtcp*>& rtp_modules,
+ int payload_type)
+ : active_(false),
+ num_sending_modules_(1),
+ rtp_modules_(rtp_modules),
+ payload_type_(payload_type) {
UpdateModuleSendingState();
}
@@ -60,31 +134,33 @@
}
}
-bool PayloadRouter::RoutePayload(FrameType frame_type,
- int8_t payload_type,
- uint32_t time_stamp,
- int64_t capture_time_ms,
- const uint8_t* payload_data,
- size_t payload_length,
- const RTPFragmentationHeader* fragmentation,
- const RTPVideoHeader* rtp_video_hdr) {
+int32_t PayloadRouter::Encoded(const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const RTPFragmentationHeader* fragmentation) {
rtc::CritScope lock(&crit_);
RTC_DCHECK(!rtp_modules_.empty());
if (!active_ || num_sending_modules_ == 0)
- return false;
+ return -1;
int stream_idx = 0;
- if (rtp_video_hdr) {
- RTC_DCHECK_LT(rtp_video_hdr->simulcastIdx, rtp_modules_.size());
- // The simulcast index might actually be larger than the number of modules
- // in case the encoder was processing a frame during a codec reconfig.
- if (rtp_video_hdr->simulcastIdx >= num_sending_modules_)
- return false;
- stream_idx = rtp_video_hdr->simulcastIdx;
- }
+
+ RTPVideoHeader rtp_video_header;
+ memset(&rtp_video_header, 0, sizeof(RTPVideoHeader));
+ if (codec_specific_info)
+ CopyCodecSpecific(codec_specific_info, &rtp_video_header);
+ rtp_video_header.rotation = encoded_image.rotation_;
+
+ RTC_DCHECK_LT(rtp_video_header.simulcastIdx, rtp_modules_.size());
+ // The simulcast index might actually be larger than the number of modules
+ // in case the encoder was processing a frame during a codec reconfig.
+ if (rtp_video_header.simulcastIdx >= num_sending_modules_)
+ return -1;
+ stream_idx = rtp_video_header.simulcastIdx;
+
return rtp_modules_[stream_idx]->SendOutgoingData(
- frame_type, payload_type, time_stamp, capture_time_ms, payload_data,
- payload_length, fragmentation, rtp_video_hdr) == 0 ? true : false;
+ encoded_image._frameType, payload_type_, encoded_image._timeStamp,
+ encoded_image.capture_time_ms_, encoded_image._buffer,
+ encoded_image._length, fragmentation, &rtp_video_header);
}
void PayloadRouter::SetTargetSendBitrates(
diff --git a/webrtc/video/payload_router.h b/webrtc/video/payload_router.h
index 81ec0dd..c2f4b04 100644
--- a/webrtc/video/payload_router.h
+++ b/webrtc/video/payload_router.h
@@ -17,6 +17,7 @@
#include "webrtc/base/criticalsection.h"
#include "webrtc/base/thread_annotations.h"
#include "webrtc/common_types.h"
+#include "webrtc/video_encoder.h"
#include "webrtc/system_wrappers/include/atomic32.h"
namespace webrtc {
@@ -27,10 +28,11 @@
// PayloadRouter routes outgoing data to the correct sending RTP module, based
// on the simulcast layer in RTPVideoHeader.
-class PayloadRouter {
+class PayloadRouter : public EncodedImageCallback {
public:
// Rtp modules are assumed to be sorted in simulcast index order.
- explicit PayloadRouter(const std::vector<RtpRtcp*>& rtp_modules);
+ explicit PayloadRouter(const std::vector<RtpRtcp*>& rtp_modules,
+ int payload_type);
~PayloadRouter();
static size_t DefaultMaxPayloadLength();
@@ -41,16 +43,11 @@
void set_active(bool active);
bool active();
- // Input parameters according to the signature of RtpRtcp::SendOutgoingData.
- // Returns true if the packet was routed / sent, false otherwise.
- bool RoutePayload(FrameType frame_type,
- int8_t payload_type,
- uint32_t time_stamp,
- int64_t capture_time_ms,
- const uint8_t* payload_data,
- size_t payload_size,
- const RTPFragmentationHeader* fragmentation,
- const RTPVideoHeader* rtp_video_hdr);
+ // Implements EncodedImageCallback.
+ // Returns 0 if the packet was routed / sent, -1 otherwise.
+ int32_t Encoded(const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const RTPFragmentationHeader* fragmentation) override;
// Configures current target bitrate per module. 'stream_bitrates' is assumed
// to be in the same order as 'SetSendingRtpModules'.
@@ -69,6 +66,7 @@
// Rtp modules are assumed to be sorted in simulcast index order. Not owned.
const std::vector<RtpRtcp*> rtp_modules_;
+ const int payload_type_;
RTC_DISALLOW_COPY_AND_ASSIGN(PayloadRouter);
};
diff --git a/webrtc/video/payload_router_unittest.cc b/webrtc/video/payload_router_unittest.cc
index c5d3f38..41e173b 100644
--- a/webrtc/video/payload_router_unittest.cc
+++ b/webrtc/video/payload_router_unittest.cc
@@ -14,6 +14,7 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
#include "webrtc/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/video/payload_router.h"
using ::testing::_;
@@ -27,46 +28,56 @@
MockRtpRtcp rtp;
std::vector<RtpRtcp*> modules(1, &rtp);
- PayloadRouter payload_router(modules);
+ uint8_t payload = 'a';
+ int8_t payload_type = 96;
+ EncodedImage encoded_image;
+ encoded_image._timeStamp = 1;
+ encoded_image.capture_time_ms_ = 2;
+ encoded_image._frameType = kVideoFrameKey;
+ encoded_image._buffer = &payload;
+ encoded_image._length = 1;
+
+ PayloadRouter payload_router(modules, payload_type);
payload_router.SetSendingRtpModules(modules.size());
- uint8_t payload = 'a';
- FrameType frame_type = kVideoFrameKey;
- int8_t payload_type = 96;
-
- EXPECT_CALL(rtp, SendOutgoingData(frame_type, payload_type, 0, 0, _, 1,
- nullptr, nullptr))
+ EXPECT_CALL(rtp, SendOutgoingData(encoded_image._frameType, payload_type,
+ encoded_image._timeStamp,
+ encoded_image.capture_time_ms_, &payload,
+ encoded_image._length, nullptr, _))
.Times(0);
- EXPECT_FALSE(payload_router.RoutePayload(frame_type, payload_type, 0, 0,
- &payload, 1, nullptr, nullptr));
+ EXPECT_EQ(-1, payload_router.Encoded(encoded_image, nullptr, nullptr));
payload_router.set_active(true);
- EXPECT_CALL(rtp, SendOutgoingData(frame_type, payload_type, 0, 0, _, 1,
- nullptr, nullptr))
+ EXPECT_CALL(rtp, SendOutgoingData(encoded_image._frameType, payload_type,
+ encoded_image._timeStamp,
+ encoded_image.capture_time_ms_, &payload,
+ encoded_image._length, nullptr, _))
.Times(1);
- EXPECT_TRUE(payload_router.RoutePayload(frame_type, payload_type, 0, 0,
- &payload, 1, nullptr, nullptr));
+ EXPECT_EQ(0, payload_router.Encoded(encoded_image, nullptr, nullptr));
payload_router.set_active(false);
- EXPECT_CALL(rtp, SendOutgoingData(frame_type, payload_type, 0, 0, _, 1,
- nullptr, nullptr))
+ EXPECT_CALL(rtp, SendOutgoingData(encoded_image._frameType, payload_type,
+ encoded_image._timeStamp,
+ encoded_image.capture_time_ms_, &payload,
+ encoded_image._length, nullptr, _))
.Times(0);
- EXPECT_FALSE(payload_router.RoutePayload(frame_type, payload_type, 0, 0,
- &payload, 1, nullptr, nullptr));
+ EXPECT_EQ(-1, payload_router.Encoded(encoded_image, nullptr, nullptr));
payload_router.set_active(true);
- EXPECT_CALL(rtp, SendOutgoingData(frame_type, payload_type, 0, 0, _, 1,
- nullptr, nullptr))
+ EXPECT_CALL(rtp, SendOutgoingData(encoded_image._frameType, payload_type,
+ encoded_image._timeStamp,
+ encoded_image.capture_time_ms_, &payload,
+ encoded_image._length, nullptr, _))
.Times(1);
- EXPECT_TRUE(payload_router.RoutePayload(frame_type, payload_type, 0, 0,
- &payload, 1, nullptr, nullptr));
+ EXPECT_EQ(0, payload_router.Encoded(encoded_image, nullptr, nullptr));
payload_router.SetSendingRtpModules(0);
- EXPECT_CALL(rtp, SendOutgoingData(frame_type, payload_type, 0, 0, _, 1,
- nullptr, nullptr))
+ EXPECT_CALL(rtp, SendOutgoingData(encoded_image._frameType, payload_type,
+ encoded_image._timeStamp,
+ encoded_image.capture_time_ms_, &payload,
+ encoded_image._length, nullptr, _))
.Times(0);
- EXPECT_FALSE(payload_router.RoutePayload(frame_type, payload_type, 0, 0,
- &payload, 1, nullptr, nullptr));
+ EXPECT_EQ(-1, payload_router.Encoded(encoded_image, nullptr, nullptr));
}
TEST(PayloadRouterTest, SendSimulcast) {
@@ -76,36 +87,46 @@
modules.push_back(&rtp_1);
modules.push_back(&rtp_2);
- PayloadRouter payload_router(modules);
+ int8_t payload_type = 96;
+ uint8_t payload = 'a';
+ EncodedImage encoded_image;
+ encoded_image._timeStamp = 1;
+ encoded_image.capture_time_ms_ = 2;
+ encoded_image._frameType = kVideoFrameKey;
+ encoded_image._buffer = &payload;
+ encoded_image._length = 1;
+
+ PayloadRouter payload_router(modules, payload_type);
payload_router.SetSendingRtpModules(modules.size());
- uint8_t payload_1 = 'a';
- FrameType frame_type_1 = kVideoFrameKey;
- int8_t payload_type_1 = 96;
- RTPVideoHeader rtp_hdr_1;
- rtp_hdr_1.simulcastIdx = 0;
+ CodecSpecificInfo codec_info_1;
+ memset(&codec_info_1, 0, sizeof(CodecSpecificInfo));
+ codec_info_1.codecType = kVideoCodecVP8;
+ codec_info_1.codecSpecific.VP8.simulcastIdx = 0;
payload_router.set_active(true);
- EXPECT_CALL(rtp_1, SendOutgoingData(frame_type_1, payload_type_1, 0, 0, _, 1,
- nullptr, &rtp_hdr_1))
+ EXPECT_CALL(rtp_1, SendOutgoingData(encoded_image._frameType, payload_type,
+ encoded_image._timeStamp,
+ encoded_image.capture_time_ms_, &payload,
+ encoded_image._length, nullptr, _))
.Times(1);
EXPECT_CALL(rtp_2, SendOutgoingData(_, _, _, _, _, _, _, _))
.Times(0);
- EXPECT_TRUE(payload_router.RoutePayload(frame_type_1, payload_type_1, 0, 0,
- &payload_1, 1, nullptr, &rtp_hdr_1));
+ EXPECT_EQ(0, payload_router.Encoded(encoded_image, &codec_info_1, nullptr));
- uint8_t payload_2 = 'b';
- FrameType frame_type_2 = kVideoFrameDelta;
- int8_t payload_type_2 = 97;
- RTPVideoHeader rtp_hdr_2;
- rtp_hdr_2.simulcastIdx = 1;
- EXPECT_CALL(rtp_2, SendOutgoingData(frame_type_2, payload_type_2, 0, 0, _, 1,
- nullptr, &rtp_hdr_2))
+ CodecSpecificInfo codec_info_2;
+ memset(&codec_info_2, 0, sizeof(CodecSpecificInfo));
+ codec_info_2.codecType = kVideoCodecVP8;
+ codec_info_2.codecSpecific.VP8.simulcastIdx = 1;
+
+ EXPECT_CALL(rtp_2, SendOutgoingData(encoded_image._frameType, payload_type,
+ encoded_image._timeStamp,
+ encoded_image.capture_time_ms_, &payload,
+ encoded_image._length, nullptr, _))
.Times(1);
EXPECT_CALL(rtp_1, SendOutgoingData(_, _, _, _, _, _, _, _))
.Times(0);
- EXPECT_TRUE(payload_router.RoutePayload(frame_type_2, payload_type_2, 0, 0,
- &payload_2, 1, nullptr, &rtp_hdr_2));
+ EXPECT_EQ(0, payload_router.Encoded(encoded_image, &codec_info_2, nullptr));
// Inactive.
payload_router.set_active(false);
@@ -113,10 +134,8 @@
.Times(0);
EXPECT_CALL(rtp_2, SendOutgoingData(_, _, _, _, _, _, _, _))
.Times(0);
- EXPECT_FALSE(payload_router.RoutePayload(frame_type_1, payload_type_1, 0, 0,
- &payload_1, 1, nullptr, &rtp_hdr_1));
- EXPECT_FALSE(payload_router.RoutePayload(frame_type_2, payload_type_2, 0, 0,
- &payload_2, 1, nullptr, &rtp_hdr_2));
+ EXPECT_EQ(-1, payload_router.Encoded(encoded_image, &codec_info_1, nullptr));
+ EXPECT_EQ(-1, payload_router.Encoded(encoded_image, &codec_info_2, nullptr));
// Invalid simulcast index.
payload_router.SetSendingRtpModules(1);
@@ -125,9 +144,8 @@
.Times(0);
EXPECT_CALL(rtp_2, SendOutgoingData(_, _, _, _, _, _, _, _))
.Times(0);
- rtp_hdr_1.simulcastIdx = 1;
- EXPECT_FALSE(payload_router.RoutePayload(frame_type_1, payload_type_1, 0, 0,
- &payload_1, 1, nullptr, &rtp_hdr_1));
+ codec_info_2.codecSpecific.VP8.simulcastIdx = 1;
+ EXPECT_EQ(-1, payload_router.Encoded(encoded_image, &codec_info_2, nullptr));
}
TEST(PayloadRouterTest, MaxPayloadLength) {
@@ -139,7 +157,7 @@
std::vector<RtpRtcp*> modules;
modules.push_back(&rtp_1);
modules.push_back(&rtp_2);
- PayloadRouter payload_router(modules);
+ PayloadRouter payload_router(modules, 42);
EXPECT_EQ(kDefaultMaxLength, PayloadRouter::DefaultMaxPayloadLength());
payload_router.SetSendingRtpModules(modules.size());
@@ -170,7 +188,7 @@
std::vector<RtpRtcp*> modules;
modules.push_back(&rtp_1);
modules.push_back(&rtp_2);
- PayloadRouter payload_router(modules);
+ PayloadRouter payload_router(modules, 42);
payload_router.SetSendingRtpModules(modules.size());
const uint32_t bitrate_1 = 10000;
diff --git a/webrtc/video/send_statistics_proxy.cc b/webrtc/video/send_statistics_proxy.cc
index 6951fda..cf3332c 100644
--- a/webrtc/video/send_statistics_proxy.cc
+++ b/webrtc/video/send_statistics_proxy.cc
@@ -17,6 +17,7 @@
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/system_wrappers/include/metrics.h"
namespace webrtc {
@@ -426,8 +427,17 @@
void SendStatisticsProxy::OnSendEncodedImage(
const EncodedImage& encoded_image,
- const RTPVideoHeader* rtp_video_header) {
- size_t simulcast_idx = rtp_video_header ? rtp_video_header->simulcastIdx : 0;
+ const CodecSpecificInfo* codec_info) {
+ size_t simulcast_idx = 0;
+
+ if (codec_info) {
+ if (codec_info->codecType == kVideoCodecVP8) {
+ simulcast_idx = codec_info->codecSpecific.VP8.simulcastIdx;
+ } else if (codec_info->codecType == kVideoCodecGeneric) {
+ simulcast_idx = codec_info->codecSpecific.generic.simulcast_idx;
+ }
+ }
+
if (simulcast_idx >= config_.rtp.ssrcs.size()) {
LOG(LS_ERROR) << "Encoded image outside simulcast range (" << simulcast_idx
<< " >= " << config_.rtp.ssrcs.size() << ").";
@@ -469,17 +479,16 @@
}
}
- if (encoded_image.qp_ != -1 && rtp_video_header) {
- if (rtp_video_header->codec == kRtpVideoVp8) {
+ if (encoded_image.qp_ != -1 && codec_info) {
+ if (codec_info->codecType == kVideoCodecVP8) {
int spatial_idx = (config_.rtp.ssrcs.size() == 1)
? -1
: static_cast<int>(simulcast_idx);
uma_container_->qp_counters_[spatial_idx].vp8.Add(encoded_image.qp_);
- } else if (rtp_video_header->codec == kRtpVideoVp9) {
- int spatial_idx =
- (rtp_video_header->codecHeader.VP9.num_spatial_layers == 1)
- ? -1
- : rtp_video_header->codecHeader.VP9.spatial_idx;
+ } else if (codec_info->codecType == kVideoCodecVP9) {
+ int spatial_idx = (codec_info->codecSpecific.VP9.num_spatial_layers == 1)
+ ? -1
+ : codec_info->codecSpecific.VP9.spatial_idx;
uma_container_->qp_counters_[spatial_idx].vp9.Add(encoded_image.qp_);
}
}
diff --git a/webrtc/video/send_statistics_proxy.h b/webrtc/video/send_statistics_proxy.h
index bb0372a..ce4c05e 100644
--- a/webrtc/video/send_statistics_proxy.h
+++ b/webrtc/video/send_statistics_proxy.h
@@ -49,7 +49,7 @@
VideoSendStream::Stats GetStats();
virtual void OnSendEncodedImage(const EncodedImage& encoded_image,
- const RTPVideoHeader* rtp_video_header);
+ const CodecSpecificInfo* codec_info);
// Used to update incoming frame rate.
void OnIncomingFrame(int width, int height);
diff --git a/webrtc/video/send_statistics_proxy_unittest.cc b/webrtc/video/send_statistics_proxy_unittest.cc
index dae0b46..eb77a27 100644
--- a/webrtc/video/send_statistics_proxy_unittest.cc
+++ b/webrtc/video/send_statistics_proxy_unittest.cc
@@ -334,16 +334,16 @@
TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_Vp8) {
test::ClearHistograms();
EncodedImage encoded_image;
- RTPVideoHeader rtp_video_header;
- rtp_video_header.codec = kRtpVideoVp8;
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP8;
for (int i = 0; i < kMinRequiredSamples; ++i) {
- rtp_video_header.simulcastIdx = 0;
+ codec_info.codecSpecific.VP8.simulcastIdx = 0;
encoded_image.qp_ = kQpIdx0;
- statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
- rtp_video_header.simulcastIdx = 1;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ codec_info.codecSpecific.VP8.simulcastIdx = 1;
encoded_image.qp_ = kQpIdx1;
- statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
}
statistics_proxy_.reset();
EXPECT_EQ(1, test::NumHistogramSamples("WebRTC.Video.Encoded.Qp.Vp8.S0"));
@@ -362,13 +362,13 @@
test::ClearHistograms();
EncodedImage encoded_image;
- RTPVideoHeader rtp_video_header;
- rtp_video_header.codec = kRtpVideoVp8;
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP8;
for (int i = 0; i < kMinRequiredSamples; ++i) {
- rtp_video_header.simulcastIdx = 0;
+ codec_info.codecSpecific.VP8.simulcastIdx = 0;
encoded_image.qp_ = kQpIdx0;
- statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
}
statistics_proxy_.reset();
EXPECT_EQ(1, test::NumHistogramSamples("WebRTC.Video.Encoded.Qp.Vp8"));
@@ -378,18 +378,17 @@
TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_Vp9) {
test::ClearHistograms();
EncodedImage encoded_image;
- RTPVideoHeader rtp_video_header;
- rtp_video_header.simulcastIdx = 0;
- rtp_video_header.codec = kRtpVideoVp9;
- rtp_video_header.codecHeader.VP9.num_spatial_layers = 2;
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP9;
+ codec_info.codecSpecific.VP9.num_spatial_layers = 2;
for (int i = 0; i < kMinRequiredSamples; ++i) {
encoded_image.qp_ = kQpIdx0;
- rtp_video_header.codecHeader.VP9.spatial_idx = 0;
- statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
+ codec_info.codecSpecific.VP9.spatial_idx = 0;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
encoded_image.qp_ = kQpIdx1;
- rtp_video_header.codecHeader.VP9.spatial_idx = 1;
- statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
+ codec_info.codecSpecific.VP9.spatial_idx = 1;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
}
statistics_proxy_.reset();
EXPECT_EQ(1, test::NumHistogramSamples("WebRTC.Video.Encoded.Qp.Vp9.S0"));
@@ -408,15 +407,14 @@
test::ClearHistograms();
EncodedImage encoded_image;
- RTPVideoHeader rtp_video_header;
- rtp_video_header.simulcastIdx = 0;
- rtp_video_header.codec = kRtpVideoVp9;
- rtp_video_header.codecHeader.VP9.num_spatial_layers = 1;
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP9;
+ codec_info.codecSpecific.VP9.num_spatial_layers = 1;
for (int i = 0; i < kMinRequiredSamples; ++i) {
encoded_image.qp_ = kQpIdx0;
- rtp_video_header.codecHeader.VP9.spatial_idx = 0;
- statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
+ codec_info.codecSpecific.VP9.spatial_idx = 0;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
}
statistics_proxy_.reset();
EXPECT_EQ(1, test::NumHistogramSamples("WebRTC.Video.Encoded.Qp.Vp9"));
@@ -458,12 +456,13 @@
encoded_image._encodedWidth = kEncodedWidth;
encoded_image._encodedHeight = kEncodedHeight;
- RTPVideoHeader rtp_video_header;
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP8;
+ codec_info.codecSpecific.VP8.simulcastIdx = 0;
- rtp_video_header.simulcastIdx = 0;
- statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
- rtp_video_header.simulcastIdx = 1;
- statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ codec_info.codecSpecific.VP8.simulcastIdx = 1;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
VideoSendStream::Stats stats = statistics_proxy_->GetStats();
EXPECT_EQ(kEncodedWidth, stats.substreams[config_.rtp.ssrcs[0]].width);
@@ -485,8 +484,8 @@
// Report stats for second SSRC to make sure it's not outdated along with the
// first SSRC.
- rtp_video_header.simulcastIdx = 1;
- statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
+ codec_info.codecSpecific.VP8.simulcastIdx = 1;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
// Forward 1 ms, reach timeout, substream 0 should have no resolution
// reported, but substream 1 should.
@@ -505,12 +504,13 @@
encoded_image._encodedWidth = kEncodedWidth;
encoded_image._encodedHeight = kEncodedHeight;
- RTPVideoHeader rtp_video_header;
+ CodecSpecificInfo codec_info;
+ codec_info.codecType = kVideoCodecVP8;
+ codec_info.codecSpecific.VP8.simulcastIdx = 0;
- rtp_video_header.simulcastIdx = 0;
- statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
- rtp_video_header.simulcastIdx = 1;
- statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
+ codec_info.codecSpecific.VP8.simulcastIdx = 1;
+ statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
statistics_proxy_->OnInactiveSsrc(config_.rtp.ssrcs[1]);
VideoSendStream::Stats stats = statistics_proxy_->GetStats();
diff --git a/webrtc/video/video_receive_stream.cc b/webrtc/video/video_receive_stream.cc
index 7ba076a..2af1d89 100644
--- a/webrtc/video/video_receive_stream.cc
+++ b/webrtc/video/video_receive_stream.cc
@@ -405,25 +405,10 @@
if (kEnableFrameRecording) {
if (!ivf_writer_.get()) {
RTC_DCHECK(codec_specific_info);
- RtpVideoCodecTypes rtp_codec_type;
- switch (codec_specific_info->codecType) {
- case kVideoCodecVP8:
- rtp_codec_type = kRtpVideoVp8;
- break;
- case kVideoCodecVP9:
- rtp_codec_type = kRtpVideoVp9;
- break;
- case kVideoCodecH264:
- rtp_codec_type = kRtpVideoH264;
- break;
- default:
- rtp_codec_type = kRtpVideoNone;
- RTC_NOTREACHED() << "Unsupported codec "
- << codec_specific_info->codecType;
- }
std::ostringstream oss;
oss << "receive_bitstream_ssrc_" << config_.rtp.remote_ssrc << ".ivf";
- ivf_writer_ = IvfFileWriter::Open(oss.str(), rtp_codec_type);
+ ivf_writer_ =
+ IvfFileWriter::Open(oss.str(), codec_specific_info->codecType);
}
if (ivf_writer_.get()) {
bool ok = ivf_writer_->WriteFrame(encoded_image);
diff --git a/webrtc/video/video_send_stream.cc b/webrtc/video/video_send_stream.cc
index baec5a2..85e6555 100644
--- a/webrtc/video/video_send_stream.cc
+++ b/webrtc/video/video_send_stream.cc
@@ -229,14 +229,16 @@
this,
config.post_encode_callback,
&stats_proxy_),
- vie_encoder_(num_cpu_cores,
- config_.rtp.ssrcs,
- module_process_thread_,
- &stats_proxy_,
- config.pre_encode_callback,
- &overuse_detector_,
- congestion_controller_->pacer(),
- &payload_router_),
+ vie_encoder_(
+ num_cpu_cores,
+ config_.rtp.ssrcs,
+ module_process_thread_,
+ &stats_proxy_,
+ config.pre_encode_callback,
+ &overuse_detector_,
+ congestion_controller_->pacer(),
+ &payload_router_,
+ config.post_encode_callback ? &encoded_frame_proxy_ : nullptr),
vcm_(vie_encoder_.vcm()),
bandwidth_observer_(congestion_controller_->GetBitrateController()
->CreateRtcpBandwidthObserver()),
@@ -250,7 +252,7 @@
congestion_controller_->packet_router(),
&stats_proxy_,
config_.rtp.ssrcs.size())),
- payload_router_(rtp_rtcp_modules_),
+ payload_router_(rtp_rtcp_modules_, config.encoder_settings.payload_type),
input_(&encoder_wakeup_event_,
config_.local_renderer,
&stats_proxy_,
@@ -319,9 +321,6 @@
ReconfigureVideoEncoder(encoder_config);
- if (config_.post_encode_callback)
- vie_encoder_.RegisterPostEncodeImageCallback(&encoded_frame_proxy_);
-
if (config_.suspend_below_min_bitrate) {
vcm_->SuspendBelowMinBitrate();
bitrate_allocator_->EnforceMinBitrate(false);
diff --git a/webrtc/video/video_send_stream_tests.cc b/webrtc/video/video_send_stream_tests.cc
index 5040911..bd83581 100644
--- a/webrtc/video/video_send_stream_tests.cc
+++ b/webrtc/video/video_send_stream_tests.cc
@@ -1807,7 +1807,6 @@
const CodecSpecificInfo* codecSpecificInfo,
const std::vector<FrameType>* frame_types) override {
CodecSpecificInfo specifics;
- memset(&specifics, 0, sizeof(specifics));
specifics.codecType = kVideoCodecGeneric;
uint8_t buffer[16] = {0};
diff --git a/webrtc/video/vie_encoder.cc b/webrtc/video/vie_encoder.cc
index de3ab71..0c781b8 100644
--- a/webrtc/video/vie_encoder.cc
+++ b/webrtc/video/vie_encoder.cc
@@ -85,7 +85,8 @@
rtc::VideoSinkInterface<VideoFrame>* pre_encode_callback,
OveruseFrameDetector* overuse_detector,
PacedSender* pacer,
- PayloadRouter* payload_router)
+ PayloadRouter* payload_router,
+ EncodedImageCallback* post_encode_callback)
: number_of_cores_(number_of_cores),
ssrcs_(ssrcs),
vp_(VideoProcessing::Create()),
@@ -98,6 +99,7 @@
overuse_detector_(overuse_detector),
pacer_(pacer),
send_payload_router_(payload_router),
+ post_encode_callback_(post_encode_callback),
time_of_last_frame_activity_ms_(0),
encoder_config_(),
min_transmit_bitrate_bps_(0),
@@ -121,6 +123,10 @@
// Enable/disable content analysis: off by default for now.
vp_->EnableContentAnalysis(false);
+ vcm_->RegisterPostEncodeImageCallback(this);
+
+ // TODO(perkj): Remove |RegisterTransportCallback| as soon as we don't use
+ // VCMPacketizationCallback::OnEncoderImplementationName.
if (vcm_->RegisterTransportCallback(this) != 0) {
return false;
}
@@ -403,10 +409,14 @@
stats_proxy_->OnSetRates(bitrate_bps, framerate);
}
-int32_t ViEEncoder::SendData(const uint8_t payload_type,
- const EncodedImage& encoded_image,
- const RTPFragmentationHeader* fragmentation_header,
- const RTPVideoHeader* rtp_video_hdr) {
+void ViEEncoder::OnEncoderImplementationName(const char* implementation_name) {
+ if (stats_proxy_)
+ stats_proxy_->OnEncoderImplementationName(implementation_name);
+}
+
+int32_t ViEEncoder::Encoded(const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const RTPFragmentationHeader* fragmentation) {
RTC_DCHECK(send_payload_router_);
{
@@ -414,17 +424,22 @@
time_of_last_frame_activity_ms_ = TickTime::MillisecondTimestamp();
}
- if (stats_proxy_)
- stats_proxy_->OnSendEncodedImage(encoded_image, rtp_video_hdr);
+ if (post_encode_callback_) {
+ post_encode_callback_->Encoded(encoded_image, codec_specific_info,
+ fragmentation);
+ }
- bool success = send_payload_router_->RoutePayload(
- encoded_image._frameType, payload_type, encoded_image._timeStamp,
- encoded_image.capture_time_ms_, encoded_image._buffer,
- encoded_image._length, fragmentation_header, rtp_video_hdr);
+ if (stats_proxy_) {
+ stats_proxy_->OnSendEncodedImage(encoded_image, codec_specific_info);
+ }
+ int success = send_payload_router_->Encoded(
+ encoded_image, codec_specific_info, fragmentation);
overuse_detector_->FrameSent(encoded_image._timeStamp);
if (kEnableFrameRecording) {
- int layer = rtp_video_hdr->simulcastIdx;
+ int layer = codec_specific_info->codecType == kVideoCodecVP8
+ ? codec_specific_info->codecSpecific.VP8.simulcastIdx
+ : 0;
IvfFileWriter* file_writer;
{
rtc::CritScope lock(&data_cs_);
@@ -435,7 +450,7 @@
oss << "_" << ssrc;
oss << "_layer" << layer << ".ivf";
file_writers_[layer] =
- IvfFileWriter::Open(oss.str(), rtp_video_hdr->codec);
+ IvfFileWriter::Open(oss.str(), codec_specific_info->codecType);
}
file_writer = file_writers_[layer].get();
}
@@ -445,13 +460,7 @@
}
}
- return success ? 0 : -1;
-}
-
-void ViEEncoder::OnEncoderImplementationName(
- const char* implementation_name) {
- if (stats_proxy_)
- stats_proxy_->OnEncoderImplementationName(implementation_name);
+ return success;
}
int32_t ViEEncoder::SendStatistics(const uint32_t bit_rate,
@@ -531,11 +540,6 @@
stats_proxy_->OnSuspendChange(video_is_suspended);
}
-void ViEEncoder::RegisterPostEncodeImageCallback(
- EncodedImageCallback* post_encode_callback) {
- vcm_->RegisterPostEncodeImageCallback(post_encode_callback);
-}
-
QMVideoSettingsCallback::QMVideoSettingsCallback(VideoProcessing* vpm)
: vp_(vpm) {
}
diff --git a/webrtc/video/vie_encoder.h b/webrtc/video/vie_encoder.h
index 994c223..27f2d11 100644
--- a/webrtc/video/vie_encoder.h
+++ b/webrtc/video/vie_encoder.h
@@ -18,6 +18,7 @@
#include "webrtc/base/scoped_ref_ptr.h"
#include "webrtc/base/thread_annotations.h"
#include "webrtc/common_types.h"
+#include "webrtc/video_encoder.h"
#include "webrtc/media/base/videosinkinterface.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/video_coding/include/video_coding_defines.h"
@@ -41,6 +42,7 @@
class VideoEncoder;
class ViEEncoder : public VideoEncoderRateObserver,
+ public EncodedImageCallback,
public VCMPacketizationCallback,
public VCMSendStatisticsCallback {
public:
@@ -54,7 +56,8 @@
rtc::VideoSinkInterface<VideoFrame>* pre_encode_callback,
OveruseFrameDetector* overuse_detector,
PacedSender* pacer,
- PayloadRouter* payload_router);
+ PayloadRouter* payload_router,
+ EncodedImageCallback* post_encode_callback);
~ViEEncoder();
bool Init();
@@ -92,12 +95,13 @@
void OnSetRates(uint32_t bitrate_bps, int framerate) override;
// Implements VCMPacketizationCallback.
- int32_t SendData(uint8_t payload_type,
- const EncodedImage& encoded_image,
- const RTPFragmentationHeader* fragmentation_header,
- const RTPVideoHeader* rtp_video_hdr) override;
void OnEncoderImplementationName(const char* implementation_name) override;
+ // Implements EncodedImageCallback.
+ int32_t Encoded(const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const RTPFragmentationHeader* fragmentation) override;
+
// Implements VideoSendStatisticsCallback.
int32_t SendStatistics(const uint32_t bit_rate,
const uint32_t frame_rate) override;
@@ -107,10 +111,6 @@
virtual void OnReceivedSLI(uint32_t ssrc, uint8_t picture_id);
virtual void OnReceivedRPSI(uint32_t ssrc, uint64_t picture_id);
- // New-style callbacks, used by VideoSendStream.
- void RegisterPostEncodeImageCallback(
- EncodedImageCallback* post_encode_callback);
-
int GetPaddingNeededBps() const;
void OnBitrateUpdated(uint32_t bitrate_bps,
@@ -139,6 +139,7 @@
OveruseFrameDetector* const overuse_detector_;
PacedSender* const pacer_;
PayloadRouter* const send_payload_router_;
+ EncodedImageCallback* const post_encode_callback_;
// The time we last received an input frame or encoded frame. This is used to
// track when video is stopped long enough that we also want to stop sending
diff --git a/webrtc/video_encoder.h b/webrtc/video_encoder.h
index 89a6464..8c82753 100644
--- a/webrtc/video_encoder.h
+++ b/webrtc/video_encoder.h
@@ -31,6 +31,7 @@
virtual ~EncodedImageCallback() {}
// Callback function which is called when an image has been encoded.
+ // TODO(perkj): Change this to return void.
virtual int32_t Encoded(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) = 0;