Fix video shadowing issues
BUG=webrtc:42223409
Change-Id: If64e16c5da68f7499f26fe70f5450258f9eb7724
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/382040
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Reviewed-by: Erik Språng <sprang@webrtc.org>
Commit-Queue: Philipp Hancke <phancke@meta.com>
Cr-Commit-Position: refs/heads/main@{#44199}
diff --git a/media/base/video_adapter_unittest.cc b/media/base/video_adapter_unittest.cc
index 818c6b2..2397d2e 100644
--- a/media/base/video_adapter_unittest.cc
+++ b/media/base/video_adapter_unittest.cc
@@ -1122,24 +1122,25 @@
}
TEST_P(VideoAdapterTest, AdaptResolutionInStepsFirst2_3) {
- const int kWidth = 1920;
- const int kHeight = 1080;
- OnOutputFormatRequest(kWidth, kHeight, std::nullopt); // 16:9 aspect.
+ const int kWidth1080p = 1920;
+ const int kHeight1080p = 1080;
+ OnOutputFormatRequest(kWidth1080p, kHeight1080p,
+ std::nullopt); // 16:9 aspect.
// Scale factors: 2/3, 3/4, 2/3, 3/4, ...
// Scale: 2/3, 1/2, 1/3, 1/4, 1/6, 1/8, 1/12.
const int kExpectedWidths[] = {1280, 960, 640, 480, 320, 240, 160};
const int kExpectedHeights[] = {720, 540, 360, 270, 180, 135, 90};
- int request_width = kWidth;
- int request_height = kHeight;
+ int request_width = kWidth1080p;
+ int request_height = kHeight1080p;
for (size_t i = 0; i < arraysize(kExpectedWidths); ++i) {
// Adapt down one step.
adapter_.OnSinkWants(BuildSinkWants(std::nullopt,
request_width * request_height - 1,
std::numeric_limits<int>::max()));
- EXPECT_TRUE(adapter_.AdaptFrameResolution(kWidth, kHeight, 0,
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(kWidth1080p, kHeight1080p, 0,
&cropped_width_, &cropped_height_,
&out_width_, &out_height_));
EXPECT_EQ(kExpectedWidths[i], out_width_);
@@ -1150,26 +1151,27 @@
}
TEST_P(VideoAdapterTest, AdaptResolutionInStepsFirst2x2_3) {
- const int kWidth = 1440;
- const int kHeight = 1080;
- OnOutputFormatRequest(kWidth, kHeight, std::nullopt); // 4:3 aspect.
+ const int kWidth1080p4to3 = 1440;
+ const int kHeight1080p4to3 = 1080;
+ OnOutputFormatRequest(kWidth1080p4to3, kHeight1080p4to3,
+ std::nullopt); // 4:3 aspect.
// Scale factors: 2/3, 2/3, 3/4, 2/3, 3/4, ...
// Scale : 2/3, 4/9, 1/3, 2/9, 1/6, 1/9, 1/12, 1/18, 1/24, 1/36.
const int kExpectedWidths[] = {960, 640, 480, 320, 240, 160, 120, 80, 60, 40};
const int kExpectedHeights[] = {720, 480, 360, 240, 180, 120, 90, 60, 45, 30};
- int request_width = kWidth;
- int request_height = kHeight;
+ int request_width = kWidth1080p4to3;
+ int request_height = kHeight1080p4to3;
for (size_t i = 0; i < arraysize(kExpectedWidths); ++i) {
// Adapt down one step.
adapter_.OnSinkWants(BuildSinkWants(std::nullopt,
request_width * request_height - 1,
std::numeric_limits<int>::max()));
- EXPECT_TRUE(adapter_.AdaptFrameResolution(kWidth, kHeight, 0,
- &cropped_width_, &cropped_height_,
- &out_width_, &out_height_));
+ EXPECT_TRUE(adapter_.AdaptFrameResolution(
+ kWidth1080p4to3, kHeight1080p4to3, 0, &cropped_width_, &cropped_height_,
+ &out_width_, &out_height_));
EXPECT_EQ(kExpectedWidths[i], out_width_);
EXPECT_EQ(kExpectedHeights[i], out_height_);
request_width = out_width_;
diff --git a/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc b/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc
index 08d7867..589ad5d 100644
--- a/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc
+++ b/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc
@@ -806,16 +806,18 @@
EXPECT_TRUE(seen_ss_data);
// Force key-frame.
- std::vector<VideoFrameType> frame_types = {VideoFrameType::kVideoFrameKey};
- SetWaitForEncodedFramesThreshold(1);
- EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
- encoder_->Encode(NextInputFrame(), &frame_types));
- std::vector<EncodedImage> encoded_frame;
- std::vector<CodecSpecificInfo> codec_specific_info;
- ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
- // Key-frame should be produced.
- EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameKey);
- EXPECT_EQ(encoded_frame[0].SpatialIndex().value_or(-1), 2);
+ {
+ std::vector<VideoFrameType> frame_types = {VideoFrameType::kVideoFrameKey};
+ SetWaitForEncodedFramesThreshold(1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), &frame_types));
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ // Key-frame should be produced.
+ EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameKey);
+ EXPECT_EQ(encoded_frame[0].SpatialIndex().value_or(-1), 2);
+ }
// Encode some more frames.
for (size_t frame_num = 0; frame_num < num_frames_to_encode; ++frame_num) {
@@ -1064,22 +1066,24 @@
}
// Force key-frame.
- std::vector<VideoFrameType> frame_types = {VideoFrameType::kVideoFrameKey};
- SetWaitForEncodedFramesThreshold(1);
- EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
- encoder_->Encode(NextInputFrame(), &frame_types));
- std::vector<EncodedImage> encoded_frame;
- std::vector<CodecSpecificInfo> codec_specific_info;
- ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
- // Key-frame should be produced.
- EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameKey);
+ {
+ std::vector<VideoFrameType> frame_types = {VideoFrameType::kVideoFrameKey};
+ SetWaitForEncodedFramesThreshold(1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), &frame_types));
+ std::vector<EncodedImage> encoded_frame;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific_info));
+ // Key-frame should be produced.
+ EXPECT_EQ(encoded_frame[0]._frameType, VideoFrameType::kVideoFrameKey);
- // Enable the second layer back.
- // Allocate high bit rate to avoid frame dropping due to rate control.
- bitrate_allocation.SetBitrate(
- 1, 0, codec_settings_.spatialLayers[0].targetBitrate * 1000 * 2);
- encoder_->SetRates(VideoEncoder::RateControlParameters(
- bitrate_allocation, codec_settings_.maxFramerate));
+ // Enable the second layer back.
+ // Allocate high bit rate to avoid frame dropping due to rate control.
+ bitrate_allocation.SetBitrate(
+ 1, 0, codec_settings_.spatialLayers[0].targetBitrate * 1000 * 2);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+ }
for (size_t frame_num = 0; frame_num < num_frames_to_encode; ++frame_num) {
SetWaitForEncodedFramesThreshold(2);
@@ -1613,17 +1617,20 @@
EXPECT_TRUE(frame_dropped);
// Enable the last layer.
- bitrate_allocation.SetBitrate(
- 2, 0, codec_settings_.spatialLayers[2].targetBitrate * 1000);
- encoder_->SetRates(VideoEncoder::RateControlParameters(
- bitrate_allocation, codec_settings_.maxFramerate));
- SetWaitForEncodedFramesThreshold(1);
- EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr));
- std::vector<EncodedImage> encoded_frames;
- std::vector<CodecSpecificInfo> codec_specific_info;
- ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_info));
- // No drop allowed.
- EXPECT_EQ(encoded_frames.size(), 3u);
+ {
+ bitrate_allocation.SetBitrate(
+ 2, 0, codec_settings_.spatialLayers[2].targetBitrate * 1000);
+ encoder_->SetRates(VideoEncoder::RateControlParameters(
+ bitrate_allocation, codec_settings_.maxFramerate));
+ SetWaitForEncodedFramesThreshold(1);
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ encoder_->Encode(NextInputFrame(), nullptr));
+ std::vector<EncodedImage> encoded_frames;
+ std::vector<CodecSpecificInfo> codec_specific_info;
+ ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_info));
+ // No drop allowed.
+ EXPECT_EQ(encoded_frames.size(), 3u);
+ }
// Verify that frame-dropping is re-enabled back.
frame_dropped = false;
diff --git a/video/rtp_video_stream_receiver2_unittest.cc b/video/rtp_video_stream_receiver2_unittest.cc
index f9d7dab..eb9ce04 100644
--- a/video/rtp_video_stream_receiver2_unittest.cc
+++ b/video/rtp_video_stream_receiver2_unittest.cc
@@ -828,13 +828,13 @@
}
TEST_P(RtpVideoStreamReceiver2TestH264, OutOfBandFmtpSpsPps) {
- constexpr int kPayloadType = 99;
+ constexpr int kH264PayloadType = 99;
webrtc::CodecParameterMap codec_params;
// Example parameter sets from https://tools.ietf.org/html/rfc3984#section-8.2
// .
codec_params.insert(
{cricket::kH264FmtpSpropParameterSets, "Z0IACpZTBYmI,aMljiA=="});
- rtp_video_stream_receiver_->AddReceiveCodec(kPayloadType, kVideoCodecH264,
+ rtp_video_stream_receiver_->AddReceiveCodec(kH264PayloadType, kVideoCodecH264,
codec_params,
/*raw_payload=*/false);
rtp_video_stream_receiver_->StartReceive();
@@ -853,7 +853,7 @@
RtpPacketReceived rtp_packet;
RTPVideoHeader video_header = GetDefaultH264VideoHeader();
AddIdr(&video_header, 0);
- rtp_packet.SetPayloadType(kPayloadType);
+ rtp_packet.SetPayloadType(kH264PayloadType);
rtp_packet.SetSequenceNumber(2);
rtp_packet.SetMarker(true);
video_header.is_first_packet_in_frame = true;
@@ -877,7 +877,6 @@
}
TEST_P(RtpVideoStreamReceiver2TestH264, ForceSpsPpsIdrIsKeyframe) {
- constexpr int kPayloadType = 99;
webrtc::CodecParameterMap codec_params;
// Forcing can be done either with field trial or codec_params.
if (!env_.field_trials().IsEnabled("WebRTC-SpsPpsIdrIsH264Keyframe")) {
@@ -1265,10 +1264,10 @@
TEST_F(RtpVideoStreamReceiver2Test, UnwrapsFrameId) {
const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
- const int kPayloadType = 123;
+ const int kPayloadTypeGeneric = 123;
- rtp_video_stream_receiver_->AddReceiveCodec(kPayloadType, kVideoCodecGeneric,
- {},
+ rtp_video_stream_receiver_->AddReceiveCodec(kPayloadTypeGeneric,
+ kVideoCodecGeneric, {},
/*raw_payload=*/true);
rtp_video_stream_receiver_->StartReceive();
RtpHeaderExtensionMap extension_map;
@@ -1292,7 +1291,7 @@
mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
data.size());
rtp_packet.SetMarker(true);
- rtp_packet.SetPayloadType(kPayloadType);
+ rtp_packet.SetPayloadType(kPayloadTypeGeneric);
rtp_packet.SetSequenceNumber(++rtp_sequence_number);
rtp_video_stream_receiver_->OnRtpPacket(rtp_packet);
};
diff --git a/video/video_stream_encoder_unittest.cc b/video/video_stream_encoder_unittest.cc
index 41ee0ca..158c459 100644
--- a/video/video_stream_encoder_unittest.cc
+++ b/video/video_stream_encoder_unittest.cc
@@ -3888,9 +3888,9 @@
TEST_F(VideoStreamEncoderTest,
FpsCountReturnsToZeroForFewerAdaptationsUpThanDown) {
+ const int64_t kFrameInterval150Ms = 150;
const int kWidth = 640;
const int kHeight = 360;
- const int64_t kFrameIntervalMs = 150;
video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
@@ -3900,7 +3900,7 @@
video_stream_encoder_->SetSource(&source,
webrtc::DegradationPreference::BALANCED);
- int64_t timestamp_ms = kFrameIntervalMs;
+ int64_t timestamp_ms = kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
sink_.WaitForEncodedFrame(kWidth, kHeight);
EXPECT_THAT(source.sink_wants(), UnlimitedSinkWants());
@@ -3910,7 +3910,7 @@
// Trigger adapt down, expect reduced fps (640x360@15fps).
video_stream_encoder_->TriggerQualityLow();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
sink_.WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(),
@@ -3921,14 +3921,14 @@
// Source requests 270p, expect reduced resolution (480x270@15fps).
source.OnOutputFormatRequest(480, 270);
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(480, 270);
EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
// Trigger adapt down, expect reduced fps (480x270@10fps).
video_stream_encoder_->TriggerQualityLow();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
sink_.WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsLtResolutionEq(source.last_wants()));
@@ -3938,14 +3938,14 @@
// Source requests QVGA, expect reduced resolution (320x180@10fps).
source.OnOutputFormatRequest(320, 180);
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(320, 180);
EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
// Trigger adapt down, expect reduced fps (320x180@7fps).
video_stream_encoder_->TriggerQualityLow();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
sink_.WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsLtResolutionEq(source.last_wants()));
@@ -3955,14 +3955,14 @@
// Source requests VGA, expect increased resolution (640x360@7fps).
source.OnOutputFormatRequest(640, 360);
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes);
// Trigger adapt up, expect increased fps (640x360@(max-2)fps).
video_stream_encoder_->TriggerQualityHigh();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsGtResolutionEq(source.last_wants()));
@@ -3972,7 +3972,7 @@
// Trigger adapt up, expect increased fps (640x360@(max-1)fps).
video_stream_encoder_->TriggerQualityHigh();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsGtResolutionEq(source.last_wants()));
@@ -3982,7 +3982,7 @@
// Trigger adapt up, expect increased fps (640x360@maxfps).
video_stream_encoder_->TriggerQualityHigh();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsGtResolutionEq(source.last_wants()));
@@ -3997,7 +3997,7 @@
FpsCountReturnsToZeroForFewerAdaptationsUpThanDownWithTwoResources) {
const int kWidth = 1280;
const int kHeight = 720;
- const int64_t kFrameIntervalMs = 150;
+ const int64_t kFrameInterval150Ms = 150;
video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
@@ -4007,7 +4007,7 @@
video_stream_encoder_->SetSource(&source,
webrtc::DegradationPreference::BALANCED);
- int64_t timestamp_ms = kFrameIntervalMs;
+ int64_t timestamp_ms = kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
sink_.WaitForEncodedFrame(kWidth, kHeight);
EXPECT_THAT(source.sink_wants(), UnlimitedSinkWants());
@@ -4017,7 +4017,7 @@
// Trigger adapt down, expect scaled down resolution (960x540@maxfps).
video_stream_encoder_->TriggerQualityLow();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
sink_.WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(),
@@ -4028,7 +4028,7 @@
// Trigger adapt down, expect scaled down resolution (640x360@maxfps).
video_stream_encoder_->TriggerQualityLow();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
sink_.WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(),
@@ -4039,7 +4039,7 @@
// Trigger adapt down, expect reduced fps (640x360@15fps).
video_stream_encoder_->TriggerQualityLow();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsLtResolutionEq(source.last_wants()));
@@ -4049,7 +4049,7 @@
// Source requests QVGA, expect reduced resolution (320x180@15fps).
source.OnOutputFormatRequest(320, 180);
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(320, 180);
EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes);
@@ -4057,7 +4057,7 @@
// Trigger adapt down, expect reduced fps (320x180@7fps).
video_stream_encoder_->TriggerCpuOveruse();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsLtResolutionEq(source.last_wants()));
@@ -4070,7 +4070,7 @@
// Source requests HD, expect increased resolution (640x360@7fps).
source.OnOutputFormatRequest(1280, 720);
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes);
@@ -4078,7 +4078,7 @@
// Trigger adapt up, expect increased fps (640x360@(max-1)fps).
video_stream_encoder_->TriggerCpuUnderuse();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsGtResolutionEq(source.last_wants()));
@@ -4092,7 +4092,7 @@
// Trigger adapt up, expect increased fps (640x360@maxfps).
video_stream_encoder_->TriggerQualityHigh();
video_stream_encoder_->TriggerCpuUnderuse();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsGtResolutionEq(source.last_wants()));
@@ -4106,7 +4106,7 @@
// Trigger adapt up, expect increased resolution (960x570@maxfps).
video_stream_encoder_->TriggerQualityHigh();
video_stream_encoder_->TriggerCpuUnderuse();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsEqResolutionGt(source.last_wants()));
@@ -4120,7 +4120,7 @@
// Trigger adapt up, expect increased resolution (1280x720@maxfps).
video_stream_encoder_->TriggerQualityHigh();
video_stream_encoder_->TriggerCpuUnderuse();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsEqResolutionGt(source.last_wants()));
@@ -6145,14 +6145,18 @@
}
TEST_F(VideoStreamEncoderTest, InitialFrameDropActivatesWhenLayersChange) {
- const DataRate kLowTargetBitrate = DataRate::KilobitsPerSec(400);
// Set simulcast.
ResetEncoder("VP8", 3, 1, 1, false);
fake_encoder_.SetQualityScaling(true);
const int kWidth = 1280;
const int kHeight = 720;
video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
- kLowTargetBitrate, kLowTargetBitrate, kLowTargetBitrate, 0, 0, 0);
+ /*target_bitrate=*/DataRate::KilobitsPerSec(400),
+ /*stable_target_bitrate=*/DataRate::KilobitsPerSec(400),
+ /*link_allocation=*/DataRate::KilobitsPerSec(400),
+ /*fraction_lost=*/0,
+ /*round_trip_time_ms=*/0,
+ /*cwnd_reduce_ratio=*/0);
video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
// Frame should not be dropped.
WaitForEncodedFrame(1);
@@ -6205,14 +6209,18 @@
}
TEST_F(VideoStreamEncoderTest, InitialFrameDropActivatesWhenSVCLayersChange) {
- const DataRate kLowTargetBitrate = DataRate::KilobitsPerSec(400);
// Set simulcast.
ResetEncoder("VP9", 1, 1, 3, false);
fake_encoder_.SetQualityScaling(true);
const int kWidth = 1280;
const int kHeight = 720;
video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
- kLowTargetBitrate, kLowTargetBitrate, kLowTargetBitrate, 0, 0, 0);
+ /*target_bitrate=*/DataRate::KilobitsPerSec(400),
+ /*stable_target_bitrate=*/DataRate::KilobitsPerSec(400),
+ /*link_allocation=*/DataRate::KilobitsPerSec(400),
+ /*fraction_lost=*/0,
+ /*round_trip_time_ms=*/0,
+ /*cwnd_reduce_ratio=*/0);
video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
// Frame should not be dropped.
WaitForEncodedFrame(1);
@@ -6771,10 +6779,10 @@
fake_encoder_.SetQp(kQpHigh + 1);
const int kWidth = 1280;
const int kHeight = 720;
- const int64_t kFrameIntervalMs = 100;
+ const int64_t kFrameInterval100Ms = 100;
int64_t timestamp_ms = kFrameIntervalMs;
for (size_t i = 1; i <= 100; i++) {
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval100Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
}
@@ -6787,7 +6795,7 @@
IsTrue(),
{.timeout = webrtc::TimeDelta::Millis(2000 * 2.5 * 2)}),
IsRtcOk());
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval100Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
video_stream_encoder_->WaitUntilTaskQueueIsIdle();
@@ -7015,7 +7023,7 @@
TEST_F(VideoStreamEncoderTest, DoesntAdaptDownPastMinFramerate) {
const int kFramerateFps = 5;
- const int kFrameIntervalMs = kNumMillisecsPerSec / kFramerateFps;
+ const int kFrameInterval5FpsInMs = kNumMillisecsPerSec / kFramerateFps;
const int kFrameWidth = 1280;
const int kFrameHeight = 720;
@@ -7043,8 +7051,8 @@
if (video_source_.last_sent_width()) {
sink_.WaitForEncodedFrame(timestamp_ms);
}
- timestamp_ms += kFrameIntervalMs;
- AdvanceTime(TimeDelta::Millis(kFrameIntervalMs));
+ timestamp_ms += kFrameInterval5FpsInMs;
+ AdvanceTime(TimeDelta::Millis(kFrameInterval5FpsInMs));
}
// ...and then try to adapt again.
video_stream_encoder_->TriggerCpuOveruse();
@@ -7061,8 +7069,8 @@
AdaptsResolutionAndFramerateForLowQuality_BalancedMode) {
const int kWidth = 1280;
const int kHeight = 720;
- const int64_t kFrameIntervalMs = 150;
- int64_t timestamp_ms = kFrameIntervalMs;
+ const int64_t kFrameInterval150Ms = 150;
+ int64_t timestamp_ms = kFrameInterval150Ms;
ASSERT_EQ(video_encoder_config_.simulcast_layers.size(), 1u);
video_encoder_config_.simulcast_layers[0].width = kWidth;
video_encoder_config_.simulcast_layers[0].height = kHeight;
@@ -7076,7 +7084,7 @@
source.set_adaptation_enabled(true);
video_stream_encoder_->SetSource(&source,
webrtc::DegradationPreference::BALANCED);
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(kWidth, kHeight);
EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
@@ -7086,7 +7094,7 @@
// Trigger adapt down, expect scaled down resolution (960x540@30fps).
video_stream_encoder_->TriggerQualityLow();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(),
@@ -7097,7 +7105,7 @@
// Trigger adapt down, expect scaled down resolution (640x360@30fps).
video_stream_encoder_->TriggerQualityLow();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsMaxResolutionLt(source.last_wants()));
@@ -7107,7 +7115,7 @@
// Trigger adapt down, expect reduced fps (640x360@15fps).
video_stream_encoder_->TriggerQualityLow();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsLtResolutionEq(source.last_wants()));
@@ -7117,7 +7125,7 @@
// Trigger adapt down, expect scaled down resolution (480x270@15fps).
video_stream_encoder_->TriggerQualityLow();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsEqResolutionLt(source.last_wants()));
@@ -7127,7 +7135,7 @@
// Restrict bitrate, trigger adapt down, expect reduced fps (480x270@10fps).
video_stream_encoder_->TriggerQualityLow();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsLtResolutionEq(source.last_wants()));
@@ -7137,7 +7145,7 @@
// Trigger adapt down, expect scaled down resolution (320x180@10fps).
video_stream_encoder_->TriggerQualityLow();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsEqResolutionLt(source.last_wants()));
@@ -7147,7 +7155,7 @@
// Trigger adapt down, expect reduced fps (320x180@7fps).
video_stream_encoder_->TriggerQualityLow();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsLtResolutionEq(source.last_wants()));
@@ -7158,7 +7166,7 @@
// Trigger adapt down, min resolution reached, expect no change.
video_stream_encoder_->TriggerQualityLow();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsEqResolutionEqTo(last_wants));
@@ -7168,7 +7176,7 @@
// Trigger adapt up, expect increased fps (320x180@10fps).
video_stream_encoder_->TriggerQualityHigh();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsGtResolutionEq(source.last_wants()));
@@ -7178,7 +7186,7 @@
// Trigger adapt up, expect upscaled resolution (480x270@10fps).
video_stream_encoder_->TriggerQualityHigh();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsEqResolutionGt(source.last_wants()));
@@ -7188,7 +7196,7 @@
// Increase bitrate, trigger adapt up, expect increased fps (480x270@15fps).
video_stream_encoder_->TriggerQualityHigh();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsGtResolutionEq(source.last_wants()));
@@ -7198,7 +7206,7 @@
// Trigger adapt up, expect upscaled resolution (640x360@15fps).
video_stream_encoder_->TriggerQualityHigh();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsEqResolutionGt(source.last_wants()));
@@ -7208,7 +7216,7 @@
// Trigger adapt up, expect increased fps (640x360@30fps).
video_stream_encoder_->TriggerQualityHigh();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsMax());
@@ -7220,7 +7228,7 @@
// Trigger adapt up, expect upscaled resolution (960x540@30fps).
video_stream_encoder_->TriggerQualityHigh();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsMaxResolutionGt(source.last_wants()));
@@ -7230,7 +7238,7 @@
// Trigger adapt up, expect no restriction (1280x720fps@30fps).
video_stream_encoder_->TriggerQualityHigh();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(kWidth, kHeight);
EXPECT_THAT(source.sink_wants(), FpsMaxResolutionGt(source.last_wants()));
@@ -7250,8 +7258,8 @@
TEST_F(VideoStreamEncoderTest, AdaptWithTwoReasonsAndDifferentOrder_Framerate) {
const int kWidth = 1280;
const int kHeight = 720;
- const int64_t kFrameIntervalMs = 150;
- int64_t timestamp_ms = kFrameIntervalMs;
+ const int64_t kFrameInterval150Ms = 150;
+ int64_t timestamp_ms = kFrameInterval150Ms;
ASSERT_EQ(video_encoder_config_.simulcast_layers.size(), 1u);
video_encoder_config_.simulcast_layers[0].width = kWidth;
video_encoder_config_.simulcast_layers[0].height = kHeight;
@@ -7265,7 +7273,7 @@
source.set_adaptation_enabled(true);
video_stream_encoder_->SetSource(&source,
webrtc::DegradationPreference::BALANCED);
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(kWidth, kHeight);
EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
@@ -7278,7 +7286,7 @@
// Trigger cpu adapt down, expect scaled down resolution (960x540@30fps).
video_stream_encoder_->TriggerCpuOveruse();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(),
@@ -7292,7 +7300,7 @@
// Trigger cpu adapt down, expect scaled down resolution (640x360@30fps).
video_stream_encoder_->TriggerCpuOveruse();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsMaxResolutionLt(source.last_wants()));
@@ -7305,7 +7313,7 @@
// Trigger quality adapt down, expect reduced fps (640x360@15fps).
video_stream_encoder_->TriggerQualityLow();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsLtResolutionEq(source.last_wants()));
@@ -7322,7 +7330,7 @@
// change then last_wants() is not updated.
auto previous_sink_wants = source.sink_wants();
video_stream_encoder_->TriggerCpuUnderuse();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsEqResolutionEqTo(previous_sink_wants));
@@ -7332,7 +7340,7 @@
// Trigger quality adapt up, expect increased fps (640x360@30fps).
video_stream_encoder_->TriggerQualityHigh();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsGtResolutionEq(source.last_wants()));
@@ -7347,7 +7355,7 @@
// expect increased resolution (960x540@30fps).
video_stream_encoder_->TriggerQualityHigh();
video_stream_encoder_->TriggerCpuUnderuse();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsMaxResolutionGt(source.last_wants()));
@@ -7362,7 +7370,7 @@
// expect no restriction (1280x720fps@30fps).
video_stream_encoder_->TriggerQualityHigh();
video_stream_encoder_->TriggerCpuUnderuse();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(kWidth, kHeight);
EXPECT_THAT(source.sink_wants(), FpsMaxResolutionGt(source.last_wants()));
@@ -7388,13 +7396,13 @@
const int kWidth = 640;
const int kHeight = 360;
const int kFpsLimit = 15;
- const int64_t kFrameIntervalMs = 150;
+ const int64_t kFrameInterval150Ms = 150;
ASSERT_EQ(video_encoder_config_.simulcast_layers.size(), 1u);
video_encoder_config_.simulcast_layers[0].width = kWidth;
video_encoder_config_.simulcast_layers[0].height = kHeight;
video_encoder_config_.simulcast_layers[0].max_framerate = kDefaultFramerate;
ConfigureEncoder(video_encoder_config_.Copy());
- int64_t timestamp_ms = kFrameIntervalMs;
+ int64_t timestamp_ms = kFrameInterval150Ms;
video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
@@ -7403,7 +7411,7 @@
source.set_adaptation_enabled(true);
video_stream_encoder_->SetSource(&source,
webrtc::DegradationPreference::BALANCED);
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(kWidth, kHeight);
EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
@@ -7416,7 +7424,7 @@
// Trigger cpu adapt down, expect scaled down framerate (640x360@15fps).
video_stream_encoder_->TriggerCpuOveruse();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsMatchesResolutionMax(Eq(kFpsLimit)));
@@ -7429,7 +7437,7 @@
// Trigger quality adapt down, expect scaled down resolution (480x270@15fps).
video_stream_encoder_->TriggerQualityLow();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsEqResolutionLt(source.last_wants()));
@@ -7446,7 +7454,7 @@
// Store current sink wants since we expect no change ind if there is no
// change then last__wants() is not updated.
video_stream_encoder_->TriggerCpuUnderuse();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsEqResolutionEqTo(previous_sink_wants));
@@ -7456,7 +7464,7 @@
// Trigger quality adapt up, expect upscaled resolution (640x360@15fps).
video_stream_encoder_->TriggerQualityHigh();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsEqResolutionGt(source.last_wants()));
@@ -7470,7 +7478,7 @@
// Trigger quality and cpu adapt up, expect increased fps (640x360@30fps).
video_stream_encoder_->TriggerQualityHigh();
video_stream_encoder_->TriggerCpuUnderuse();
- timestamp_ms += kFrameIntervalMs;
+ timestamp_ms += kFrameInterval150Ms;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
@@ -7699,17 +7707,22 @@
const int kFrameWidth = 320;
const int kFrameHeight = 240;
const int kFps = 30;
- const DataRate kTargetBitrate = DataRate::KilobitsPerSec(120);
const int kNumFramesInRun = kFps * 5; // Runs of five seconds.
video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
- kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ /*target_bitrate=*/DataRate::KilobitsPerSec(120),
+ /*stable_target_bitrate=*/DataRate::KilobitsPerSec(120),
+ /*link_allocation=*/DataRate::KilobitsPerSec(120),
+ /*fraction_lost=*/0,
+ /*round_trip_time_ms=*/0,
+ /*cwnd_reduce_ratio=*/0);
int64_t timestamp_ms = CurrentTimeMs();
max_framerate_ = kFps;
// Insert 3 seconds of video, verify number of drops with normal bitrate.
fake_encoder_.SimulateOvershoot(1.0);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
int num_dropped = 0;
for (int i = 0; i < kNumFramesInRun; ++i) {
video_source_.IncomingCapturedFrame(
@@ -7743,10 +7756,7 @@
overshoot_factor = 3.0;
}
fake_encoder_.SimulateOvershoot(overshoot_factor);
- video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
- kTargetBitrate + DataRate::KilobitsPerSec(1),
- kTargetBitrate + DataRate::KilobitsPerSec(1),
- kTargetBitrate + DataRate::KilobitsPerSec(1), 0, 0, 0);
+ video_stream_encoder_->WaitUntilTaskQueueIsIdle();
num_dropped = 0;
for (int i = 0; i < kNumFramesInRun; ++i) {
video_source_.IncomingCapturedFrame(
@@ -7759,9 +7769,6 @@
timestamp_ms += 1000 / kFps;
}
- video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
- kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
-
// Target framerate should be still be near the expected target, despite
// the frame drops.
EXPECT_NEAR(fake_encoder_.GetLastFramerate(), kFps, 1);
@@ -7776,14 +7783,18 @@
const int kFrameWidth = 320;
const int kFrameHeight = 240;
const int kActualInputFps = 24;
- const DataRate kTargetBitrate = DataRate::KilobitsPerSec(120);
ASSERT_GT(max_framerate_, kActualInputFps);
int64_t timestamp_ms = CurrentTimeMs();
max_framerate_ = kActualInputFps;
video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
- kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ /*target_bitrate=*/DataRate::KilobitsPerSec(120),
+ /*stable_target_bitrate=*/DataRate::KilobitsPerSec(120),
+ /*link_allocation=*/DataRate::KilobitsPerSec(120),
+ /*fraction_lost=*/0,
+ /*round_trip_time_ms=*/0,
+ /*cwnd_reduce_ratio=*/0);
// Insert 3 seconds of video, with an input fps lower than configured max.
for (int i = 0; i < kActualInputFps * 3; ++i) {
@@ -7959,11 +7970,14 @@
TEST_F(VideoStreamEncoderTest, CopiesVideoFrameMetadataAfterDownscale) {
const int kFrameWidth = 1280;
const int kFrameHeight = 720;
- const DataRate kTargetBitrate =
- DataRate::KilobitsPerSec(300); // Too low for HD resolution.
video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
- kTargetBitrate, kTargetBitrate, kTargetBitrate, 0, 0, 0);
+ /*target_bitrate=*/DataRate::KilobitsPerSec(300),
+ /*stable_target_bitrate=*/DataRate::KilobitsPerSec(300),
+ /*link_allocation=*/DataRate::KilobitsPerSec(300),
+ /*fraction_lost=*/0,
+ /*round_trip_time_ms=*/0,
+ /*cwnd_reduce_ratio=*/0);
video_stream_encoder_->WaitUntilTaskQueueIsIdle();
// Insert a first video frame. It should be dropped because of downscale in