Move usage of QualityScaler to ViEEncoder.
This brings QualityScaler much more in line with OveruseFrameDetector.
The two classes are conceptually similar, and should be used in the
same way. The biggest changes in this CL are:
- Quality scaling is now only done in ViEEncoder and not in each
encoder implementation separately.
- QualityScaler now checks the average QP asynchronously, instead of
having to be polled on each frame.
- QualityScaler is no longer responsible for actually scaling the frames,
but has a callback to ViEEncoder that it uses to express it's desire
for lower resolution.
BUG=webrtc:6495
Review-Url: https://codereview.webrtc.org/2398963003
Cr-Commit-Position: refs/heads/master@{#15286}
diff --git a/webrtc/api/android/jni/androidmediaencoder_jni.cc b/webrtc/api/android/jni/androidmediaencoder_jni.cc
index f6d15b7..8a7a724 100644
--- a/webrtc/api/android/jni/androidmediaencoder_jni.cc
+++ b/webrtc/api/android/jni/androidmediaencoder_jni.cc
@@ -37,6 +37,7 @@
#include "webrtc/modules/video_coding/utility/vp8_header_parser.h"
#include "webrtc/system_wrappers/include/field_trial.h"
#include "webrtc/system_wrappers/include/logcat_trace_context.h"
+#include "webrtc/video_encoder.h"
using rtc::Bind;
using rtc::Thread;
@@ -120,8 +121,6 @@
// rtc::MessageHandler implementation.
void OnMessage(rtc::Message* msg) override;
- void OnDroppedFrame() override;
-
bool SupportsNativeHandle() const override { return egl_context_ != nullptr; }
const char* ImplementationName() const override;
@@ -168,7 +167,6 @@
webrtc::EncodedImageCallback* callback);
int32_t ReleaseOnCodecThread();
int32_t SetRatesOnCodecThread(uint32_t new_bit_rate, uint32_t frame_rate);
- void OnDroppedFrameOnCodecThread();
// Helper accessors for MediaCodecVideoEncoder$OutputBufferInfo members.
int GetOutputBufferInfoIndex(JNIEnv* jni, jobject j_output_buffer_info);
@@ -184,6 +182,8 @@
// Search for H.264 start codes.
int32_t NextNaluPosition(uint8_t *buffer, size_t buffer_size);
+ VideoEncoder::ScalingSettings GetScalingSettings() const override;
+
// Displays encoder statistics.
void LogStatistics(bool force_log);
@@ -269,13 +269,9 @@
// True only when between a callback_->OnEncodedImage() call return a positive
// value and the next Encode() call being ignored.
bool drop_next_input_frame_;
+ bool scale_;
// Global references; must be deleted in Release().
std::vector<jobject> input_buffers_;
- QualityScaler quality_scaler_;
- // Dynamic resolution change, off by default.
- bool scale_;
-
- // H264 bitstream parser, used to extract QP from encoded bitstreams.
webrtc::H264BitstreamParser h264_bitstream_parser_;
// VP9 variables to populate codec specific structure.
@@ -417,23 +413,6 @@
ALOGD << "InitEncode request: " << init_width << " x " << init_height;
ALOGD << "Encoder automatic resize " << (scale_ ? "enabled" : "disabled");
- if (scale_) {
- if (codec_type == kVideoCodecVP8 || codec_type == kVideoCodecH264) {
- quality_scaler_.Init(codec_type, codec_settings->startBitrate,
- codec_settings->width, codec_settings->height,
- codec_settings->maxFramerate);
- } else {
- // When adding codec support to additional hardware codecs, also configure
- // their QP thresholds for scaling.
- RTC_NOTREACHED() << "Unsupported codec without configured QP thresholds.";
- scale_ = false;
- }
- QualityScaler::Resolution res = quality_scaler_.GetScaledResolution();
- init_width = res.width;
- init_height = res.height;
- ALOGD << "Scaled resolution: " << init_width << " x " << init_height;
- }
-
return codec_thread_->Invoke<int32_t>(
RTC_FROM_HERE,
Bind(&MediaCodecVideoEncoder::InitEncodeOnCodecThread, this, init_width,
@@ -715,7 +694,6 @@
drop_next_input_frame_ = false;
current_timestamp_us_ += rtc::kNumMicrosecsPerSec / last_set_fps_;
frames_dropped_media_encoder_++;
- OnDroppedFrameOnCodecThread();
return WEBRTC_VIDEO_CODEC_OK;
}
@@ -737,32 +715,12 @@
return ProcessHWErrorOnEncodeOnCodecThread();
}
frames_dropped_media_encoder_++;
- OnDroppedFrameOnCodecThread();
return WEBRTC_VIDEO_CODEC_OK;
}
consecutive_full_queue_frame_drops_ = 0;
rtc::scoped_refptr<webrtc::VideoFrameBuffer> input_buffer(
frame.video_frame_buffer());
- if (scale_) {
- // Check framerate before spatial resolution change.
- quality_scaler_.OnEncodeFrame(frame.width(), frame.height());
- const webrtc::QualityScaler::Resolution scaled_resolution =
- quality_scaler_.GetScaledResolution();
- if (scaled_resolution.width != frame.width() ||
- scaled_resolution.height != frame.height()) {
- if (input_buffer->native_handle() != nullptr) {
- input_buffer = static_cast<AndroidTextureBuffer*>(input_buffer.get())
- ->CropScaleAndRotate(frame.width(), frame.height(),
- 0, 0,
- scaled_resolution.width,
- scaled_resolution.height,
- webrtc::kVideoRotation_0);
- } else {
- input_buffer = quality_scaler_.GetScaledBuffer(input_buffer);
- }
- }
- }
VideoFrame input_frame(input_buffer, frame.timestamp(),
frame.render_time_ms(), frame.rotation());
@@ -788,7 +746,6 @@
if (frames_received_ > 1) {
current_timestamp_us_ += rtc::kNumMicrosecsPerSec / last_set_fps_;
frames_dropped_media_encoder_++;
- OnDroppedFrameOnCodecThread();
} else {
// Input buffers are not ready after codec initialization, HW is still
// allocating thme - this is expected and should not result in drop
@@ -969,9 +926,6 @@
last_set_fps_ == frame_rate) {
return WEBRTC_VIDEO_CODEC_OK;
}
- if (scale_) {
- quality_scaler_.ReportFramerate(frame_rate);
- }
JNIEnv* jni = AttachCurrentThreadIfNeeded();
ScopedLocalRefFrame local_ref_frame(jni);
if (new_bit_rate > 0) {
@@ -1086,9 +1040,6 @@
image->_frameType =
(key_frame ? webrtc::kVideoFrameKey : webrtc::kVideoFrameDelta);
image->_completeFrame = true;
- image->adapt_reason_.quality_resolution_downscales =
- scale_ ? quality_scaler_.downscale_shift() : -1;
-
webrtc::CodecSpecificInfo info;
memset(&info, 0, sizeof(info));
info.codecType = codec_type;
@@ -1135,23 +1086,19 @@
header.fragmentationLength[0] = image->_length;
header.fragmentationPlType[0] = 0;
header.fragmentationTimeDiff[0] = 0;
- if (codec_type == kVideoCodecVP8 && scale_) {
+ if (codec_type == kVideoCodecVP8) {
int qp;
if (webrtc::vp8::GetQp(payload, payload_size, &qp)) {
current_acc_qp_ += qp;
- quality_scaler_.ReportQP(qp);
image->qp_ = qp;
}
}
} else if (codec_type == kVideoCodecH264) {
- if (scale_) {
- h264_bitstream_parser_.ParseBitstream(payload, payload_size);
- int qp;
- if (h264_bitstream_parser_.GetLastSliceQp(&qp)) {
- current_acc_qp_ += qp;
- quality_scaler_.ReportQP(qp);
- image->qp_ = qp;
- }
+ h264_bitstream_parser_.ParseBitstream(payload, payload_size);
+ int qp;
+ if (h264_bitstream_parser_.GetLastSliceQp(&qp)) {
+ current_acc_qp_ += qp;
+ image->qp_ = qp;
}
// For H.264 search for start codes.
int32_t scPositions[MAX_NALUS_PERFRAME + 1] = {};
@@ -1252,6 +1199,11 @@
}
}
+webrtc::VideoEncoder::ScalingSettings
+MediaCodecVideoEncoder::GetScalingSettings() const {
+ return VideoEncoder::ScalingSettings(scale_);
+}
+
int32_t MediaCodecVideoEncoder::NextNaluPosition(
uint8_t *buffer, size_t buffer_size) {
if (buffer_size < H264_SC_LENGTH) {
@@ -1284,22 +1236,6 @@
return -1;
}
-void MediaCodecVideoEncoder::OnDroppedFrame() {
- // Methods running on the codec thread should call OnDroppedFrameOnCodecThread
- // directly.
- RTC_DCHECK(!codec_thread_checker_.CalledOnValidThread());
- codec_thread_->Invoke<void>(
- RTC_FROM_HERE,
- Bind(&MediaCodecVideoEncoder::OnDroppedFrameOnCodecThread, this));
-}
-
-void MediaCodecVideoEncoder::OnDroppedFrameOnCodecThread() {
- RTC_DCHECK(codec_thread_checker_.CalledOnValidThread());
- // Report dropped frame to quality_scaler_.
- if (scale_)
- quality_scaler_.ReportDroppedFrame();
-}
-
const char* MediaCodecVideoEncoder::ImplementationName() const {
return "MediaCodec";
}
diff --git a/webrtc/api/peerconnection_unittest.cc b/webrtc/api/peerconnection_unittest.cc
index 18718c3..05dddfa 100644
--- a/webrtc/api/peerconnection_unittest.cc
+++ b/webrtc/api/peerconnection_unittest.cc
@@ -1465,7 +1465,13 @@
FakeConstraints setup_constraints;
setup_constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp,
true);
- ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints));
+ // Disable resolution adaptation, we don't want it interfering with the
+ // test results.
+ webrtc::PeerConnectionInterface::RTCConfiguration rtc_config;
+ rtc_config.set_cpu_adaptation(false);
+
+ ASSERT_TRUE(CreateTestClients(&setup_constraints, nullptr, &rtc_config,
+ &setup_constraints, nullptr, &rtc_config));
LocalP2PTest();
VerifyRenderedSize(640, 480);
}
@@ -1474,6 +1480,10 @@
FakeConstraints setup_constraints;
setup_constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp,
true);
+ // Disable resolution adaptation, we don't want it interfering with the
+ // test results.
+ webrtc::PeerConnectionInterface::RTCConfiguration rtc_config;
+ rtc_config.set_cpu_adaptation(false);
std::unique_ptr<FakeRTCCertificateGenerator> cert_generator(
rtc::SSLStreamAdapter::HaveDtlsSrtp() ?
@@ -1482,7 +1492,7 @@
// Make sure the new client is using a different certificate.
return PeerConnectionTestClient::CreateClientWithDtlsIdentityStore(
- "New Peer: ", &setup_constraints, nullptr, nullptr,
+ "New Peer: ", &setup_constraints, nullptr, &rtc_config,
std::move(cert_generator), prefer_constraint_apis_,
network_thread_.get(), worker_thread_.get());
}
diff --git a/webrtc/media/engine/videoencodersoftwarefallbackwrapper.cc b/webrtc/media/engine/videoencodersoftwarefallbackwrapper.cc
index 0de15b3..a4d865e 100644
--- a/webrtc/media/engine/videoencodersoftwarefallbackwrapper.cc
+++ b/webrtc/media/engine/videoencodersoftwarefallbackwrapper.cc
@@ -159,16 +159,15 @@
return ret;
}
-void VideoEncoderSoftwareFallbackWrapper::OnDroppedFrame() {
- if (fallback_encoder_)
- return fallback_encoder_->OnDroppedFrame();
- return encoder_->OnDroppedFrame();
-}
-
bool VideoEncoderSoftwareFallbackWrapper::SupportsNativeHandle() const {
if (fallback_encoder_)
return fallback_encoder_->SupportsNativeHandle();
return encoder_->SupportsNativeHandle();
}
+VideoEncoder::ScalingSettings
+VideoEncoderSoftwareFallbackWrapper::GetScalingSettings() const {
+ return encoder_->GetScalingSettings();
+}
+
} // namespace webrtc
diff --git a/webrtc/media/engine/videoencodersoftwarefallbackwrapper.h b/webrtc/media/engine/videoencodersoftwarefallbackwrapper.h
index 05e815d..3d53090 100644
--- a/webrtc/media/engine/videoencodersoftwarefallbackwrapper.h
+++ b/webrtc/media/engine/videoencodersoftwarefallbackwrapper.h
@@ -42,8 +42,8 @@
int32_t SetChannelParameters(uint32_t packet_loss, int64_t rtt) override;
int32_t SetRateAllocation(const BitrateAllocation& bitrate_allocation,
uint32_t framerate) override;
- void OnDroppedFrame() override;
bool SupportsNativeHandle() const override;
+ ScalingSettings GetScalingSettings() const override;
private:
bool InitFallbackEncoder();
diff --git a/webrtc/media/engine/videoencodersoftwarefallbackwrapper_unittest.cc b/webrtc/media/engine/videoencodersoftwarefallbackwrapper_unittest.cc
index ca06e92..096552b 100644
--- a/webrtc/media/engine/videoencodersoftwarefallbackwrapper_unittest.cc
+++ b/webrtc/media/engine/videoencodersoftwarefallbackwrapper_unittest.cc
@@ -74,8 +74,6 @@
return WEBRTC_VIDEO_CODEC_OK;
}
- void OnDroppedFrame() override { ++on_dropped_frame_count_; }
-
bool SupportsNativeHandle() const override {
++supports_native_handle_count_;
return false;
@@ -93,7 +91,6 @@
int release_count_ = 0;
int set_channel_parameters_count_ = 0;
int set_rates_count_ = 0;
- int on_dropped_frame_count_ = 0;
mutable int supports_native_handle_count_ = 0;
};
@@ -271,20 +268,6 @@
}
TEST_F(VideoEncoderSoftwareFallbackWrapperTest,
- OnDroppedFrameForwardedWithoutFallback) {
- fallback_wrapper_.OnDroppedFrame();
- EXPECT_EQ(1, fake_encoder_.on_dropped_frame_count_);
-}
-
-TEST_F(VideoEncoderSoftwareFallbackWrapperTest,
- OnDroppedFrameNotForwardedDuringFallback) {
- UtilizeFallbackEncoder();
- fallback_wrapper_.OnDroppedFrame();
- EXPECT_EQ(0, fake_encoder_.on_dropped_frame_count_);
- EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, fallback_wrapper_.Release());
-}
-
-TEST_F(VideoEncoderSoftwareFallbackWrapperTest,
SupportsNativeHandleForwardedWithoutFallback) {
fallback_wrapper_.SupportsNativeHandle();
EXPECT_EQ(1, fake_encoder_.supports_native_handle_count_);
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_encoder_impl.cc b/webrtc/modules/video_coding/codecs/h264/h264_encoder_impl.cc
index 5332a3c..d7df122 100644
--- a/webrtc/modules/video_coding/codecs/h264/h264_encoder_impl.cc
+++ b/webrtc/modules/video_coding/codecs/h264/h264_encoder_impl.cc
@@ -235,9 +235,6 @@
return WEBRTC_VIDEO_CODEC_ERROR;
}
// TODO(pbos): Base init params on these values before submitting.
- quality_scaler_.Init(codec_settings->codecType, codec_settings->startBitrate,
- codec_settings->width, codec_settings->height,
- codec_settings->maxFramerate);
int video_format = EVideoFormatType::videoFormatI420;
openh264_encoder_->SetOption(ENCODER_OPTION_DATAFORMAT,
&video_format);
@@ -279,7 +276,6 @@
target_bps_ = bitrate_allocation.get_sum_bps();
max_frame_rate_ = static_cast<float>(framerate);
- quality_scaler_.ReportFramerate(framerate);
SBitrateInfo target_bitrate;
memset(&target_bitrate, 0, sizeof(SBitrateInfo));
@@ -309,20 +305,6 @@
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
- quality_scaler_.OnEncodeFrame(input_frame.width(), input_frame.height());
- rtc::scoped_refptr<const VideoFrameBuffer> frame_buffer =
- quality_scaler_.GetScaledBuffer(input_frame.video_frame_buffer());
- if (frame_buffer->width() != width_ || frame_buffer->height() != height_) {
- LOG(LS_INFO) << "Encoder reinitialized from " << width_ << "x" << height_
- << " to " << frame_buffer->width() << "x"
- << frame_buffer->height();
- width_ = frame_buffer->width();
- height_ = frame_buffer->height();
- SEncParamExt encoder_params = CreateEncoderParams();
- openh264_encoder_->SetOption(ENCODER_OPTION_SVC_ENCODE_PARAM_EXT,
- &encoder_params);
- }
-
bool force_key_frame = false;
if (frame_types != nullptr) {
// We only support a single stream.
@@ -340,7 +322,8 @@
// (If every frame is a key frame we get lag/delays.)
openh264_encoder_->ForceIntraFrame(true);
}
-
+ rtc::scoped_refptr<const VideoFrameBuffer> frame_buffer =
+ input_frame.video_frame_buffer();
// EncodeFrame input.
SSourcePicture picture;
memset(&picture, 0, sizeof(SSourcePicture));
@@ -384,22 +367,16 @@
// Encoder can skip frames to save bandwidth in which case
// |encoded_image_._length| == 0.
if (encoded_image_._length > 0) {
- // Parse and report QP.
- h264_bitstream_parser_.ParseBitstream(encoded_image_._buffer,
- encoded_image_._length);
- int qp = -1;
- if (h264_bitstream_parser_.GetLastSliceQp(&qp)) {
- quality_scaler_.ReportQP(qp);
- encoded_image_.qp_ = qp;
- }
-
// Deliver encoded image.
CodecSpecificInfo codec_specific;
codec_specific.codecType = kVideoCodecH264;
encoded_image_callback_->OnEncodedImage(encoded_image_, &codec_specific,
&frag_header);
- } else {
- quality_scaler_.ReportDroppedFrame();
+
+ // Parse and report QP.
+ h264_bitstream_parser_.ParseBitstream(encoded_image_._buffer,
+ encoded_image_._length);
+ h264_bitstream_parser_.GetLastSliceQp(&encoded_image_.qp_);
}
return WEBRTC_VIDEO_CODEC_OK;
}
@@ -500,8 +477,8 @@
return WEBRTC_VIDEO_CODEC_OK;
}
-void H264EncoderImpl::OnDroppedFrame() {
- quality_scaler_.ReportDroppedFrame();
+VideoEncoder::ScalingSettings H264EncoderImpl::GetScalingSettings() const {
+ return VideoEncoder::ScalingSettings(true);
}
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_encoder_impl.h b/webrtc/modules/video_coding/codecs/h264/h264_encoder_impl.h
index ca28eb3..aab16ac 100644
--- a/webrtc/modules/video_coding/codecs/h264/h264_encoder_impl.h
+++ b/webrtc/modules/video_coding/codecs/h264/h264_encoder_impl.h
@@ -55,17 +55,17 @@
const char* ImplementationName() const override;
+ VideoEncoder::ScalingSettings GetScalingSettings() const override;
+
// Unsupported / Do nothing.
int32_t SetChannelParameters(uint32_t packet_loss, int64_t rtt) override;
int32_t SetPeriodicKeyFrames(bool enable) override;
- void OnDroppedFrame() override;
private:
bool IsInitialized() const;
SEncParamExt CreateEncoderParams() const;
webrtc::H264BitstreamParser h264_bitstream_parser_;
- QualityScaler quality_scaler_;
// Reports statistics with histograms.
void ReportInit();
void ReportError();
diff --git a/webrtc/modules/video_coding/codecs/i420/include/i420.h b/webrtc/modules/video_coding/codecs/i420/include/i420.h
index 38dcafd..0a8051f 100644
--- a/webrtc/modules/video_coding/codecs/i420/include/i420.h
+++ b/webrtc/modules/video_coding/codecs/i420/include/i420.h
@@ -70,8 +70,6 @@
return WEBRTC_VIDEO_CODEC_OK;
}
- void OnDroppedFrame() override {}
-
private:
static uint8_t* InsertHeader(uint8_t* buffer,
uint16_t width,
diff --git a/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc b/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc
index fbb464d..9b06e69 100644
--- a/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc
+++ b/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc
@@ -977,38 +977,6 @@
rc_metrics);
}
-// Run with no packet loss, at low bitrate. During this time we should've
-// resized once. Expect 2 key frames generated (first and one for resize).
-// Too slow to finish before timeout on iOS. See webrtc:4755.
-#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
-#define MAYBE_ProcessNoLossSpatialResizeFrameDropVP8 \
- DISABLED_ProcessNoLossSpatialResizeFrameDropVP8
-#else
-#define MAYBE_ProcessNoLossSpatialResizeFrameDropVP8 \
- ProcessNoLossSpatialResizeFrameDropVP8
-#endif
-TEST_F(VideoProcessorIntegrationTest,
- MAYBE_ProcessNoLossSpatialResizeFrameDropVP8) {
- config_.networking_config.packet_loss_probability = 0;
- // Bitrate and frame rate profile.
- RateProfile rate_profile;
- SetRateProfilePars(&rate_profile, 0, 50, 30, 0);
- rate_profile.frame_index_rate_update[1] = kNbrFramesLong + 1;
- rate_profile.num_frames = kNbrFramesLong;
- // Codec/network settings.
- CodecConfigPars process_settings;
- SetCodecParameters(&process_settings, kVideoCodecVP8, 0.0f, -1, 1, false,
- true, true, true);
- // Metrics for expected quality.
- QualityMetrics quality_metrics;
- SetQualityMetrics(&quality_metrics, 25.0, 15.0, 0.70, 0.40);
- // Metrics for rate control.
- RateControlMetrics rc_metrics[1];
- SetRateControlMetrics(rc_metrics, 0, 160, 80, 120, 20, 70, 1, 2);
- ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
- rc_metrics);
-}
-
// VP8: Run with no packet loss, with 3 temporal layers, with a rate update in
// the middle of the sequence. The max values for the frame size mismatch and
// encoding rate mismatch are applied to each layer.
diff --git a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
index 3247275..6026cb3 100644
--- a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
@@ -451,10 +451,6 @@
return !streaminfos_.empty();
}
-void SimulcastEncoderAdapter::OnDroppedFrame() {
- streaminfos_[0].encoder->OnDroppedFrame();
-}
-
bool SimulcastEncoderAdapter::SupportsNativeHandle() const {
// We should not be calling this method before streaminfos_ are configured.
RTC_DCHECK(!streaminfos_.empty());
@@ -465,6 +461,14 @@
return true;
}
+VideoEncoder::ScalingSettings SimulcastEncoderAdapter::GetScalingSettings()
+ const {
+ // Turn off quality scaling for simulcast.
+ if (NumberOfStreams(codec_) != 1)
+ return VideoEncoder::ScalingSettings(false);
+ return streaminfos_[0].encoder->GetScalingSettings();
+}
+
const char* SimulcastEncoderAdapter::ImplementationName() const {
return implementation_name_.c_str();
}
diff --git a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h
index b76e7f4..2be8779 100644
--- a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h
+++ b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h
@@ -60,7 +60,7 @@
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation);
- void OnDroppedFrame() override;
+ VideoEncoder::ScalingSettings GetScalingSettings() const override;
bool SupportsNativeHandle() const override;
const char* ImplementationName() const override;
diff --git a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
index 148140e..9fd5f65 100644
--- a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
@@ -123,8 +123,7 @@
token_partitions_(VP8_ONE_TOKENPARTITION),
down_scale_requested_(false),
down_scale_bitrate_(0),
- key_frame_request_(kMaxSimulcastStreams, false),
- quality_scaler_enabled_(false) {
+ key_frame_request_(kMaxSimulcastStreams, false) {
uint32_t seed = rtc::Time32();
srand(seed);
@@ -253,15 +252,9 @@
return WEBRTC_VIDEO_CODEC_ERROR;
}
}
- quality_scaler_.ReportFramerate(new_framerate);
return WEBRTC_VIDEO_CODEC_OK;
}
-void VP8EncoderImpl::OnDroppedFrame() {
- if (quality_scaler_enabled_)
- quality_scaler_.ReportDroppedFrame();
-}
-
const char* VP8EncoderImpl::ImplementationName() const {
return "libvpx";
}
@@ -530,15 +523,6 @@
}
rps_.Init();
- quality_scaler_.Init(codec_.codecType, codec_.startBitrate, codec_.width,
- codec_.height, codec_.maxFramerate);
-
- // Only apply scaling to improve for single-layer streams. The scaling metrics
- // use frame drops as a signal and is only applicable when we drop frames.
- quality_scaler_enabled_ = encoders_.size() == 1 &&
- configurations_[0].rc_dropframe_thresh > 0 &&
- codec_.VP8()->automaticResizeOn;
-
return InitAndSetControlSettings();
}
@@ -671,6 +655,9 @@
int VP8EncoderImpl::Encode(const VideoFrame& frame,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) {
+ RTC_DCHECK_EQ(frame.width(), codec_.width);
+ RTC_DCHECK_EQ(frame.height(), codec_.height);
+
if (!inited_)
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
if (frame.IsZeroSize())
@@ -679,20 +666,6 @@
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
rtc::scoped_refptr<VideoFrameBuffer> input_image = frame.video_frame_buffer();
-
- if (quality_scaler_enabled_) {
- quality_scaler_.OnEncodeFrame(frame.width(), frame.height());
- input_image = quality_scaler_.GetScaledBuffer(input_image);
-
- if (input_image->width() != codec_.width ||
- input_image->height() != codec_.height) {
- int ret =
- UpdateCodecFrameSize(input_image->width(), input_image->height());
- if (ret < 0)
- return ret;
- }
- }
-
// Since we are extracting raw pointers from |input_image| to
// |raw_images_[0]|, the resolution of these frames must match. Note that
// |input_image| might be scaled from |frame|. In that case, the resolution of
@@ -989,9 +962,6 @@
codec_.simulcastStream[stream_idx].height;
encoded_images_[encoder_idx]._encodedWidth =
codec_.simulcastStream[stream_idx].width;
- encoded_images_[encoder_idx]
- .adapt_reason_.quality_resolution_downscales =
- quality_scaler_enabled_ ? quality_scaler_.downscale_shift() : -1;
// Report once per frame (lowest stream always sent).
encoded_images_[encoder_idx].adapt_reason_.bw_resolutions_disabled =
(stream_idx == 0) ? bw_resolutions_disabled : -1;
@@ -1006,18 +976,16 @@
}
}
}
- if (encoders_.size() == 1 && send_stream_[0]) {
- if (encoded_images_[0]._length > 0) {
- int qp_128;
- vpx_codec_control(&encoders_[0], VP8E_GET_LAST_QUANTIZER, &qp_128);
- quality_scaler_.ReportQP(qp_128);
- } else {
- quality_scaler_.ReportDroppedFrame();
- }
- }
return result;
}
+VideoEncoder::ScalingSettings VP8EncoderImpl::GetScalingSettings() const {
+ const bool enable_scaling = encoders_.size() == 1 &&
+ configurations_[0].rc_dropframe_thresh > 0 &&
+ codec_.VP8().automaticResizeOn;
+ return VideoEncoder::ScalingSettings(enable_scaling);
+}
+
int VP8EncoderImpl::SetChannelParameters(uint32_t packetLoss, int64_t rtt) {
rps_.SetRtt(rtt);
return WEBRTC_VIDEO_CODEC_OK;
diff --git a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.h b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.h
index 3b3a78a..10977b1 100644
--- a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.h
+++ b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.h
@@ -57,7 +57,7 @@
int SetRateAllocation(const BitrateAllocation& bitrate,
uint32_t new_framerate) override;
- void OnDroppedFrame() override;
+ ScalingSettings GetScalingSettings() const override;
const char* ImplementationName() const override;
@@ -116,8 +116,6 @@
std::vector<vpx_codec_ctx_t> encoders_;
std::vector<vpx_codec_enc_cfg_t> configurations_;
std::vector<vpx_rational_t> downsampling_factors_;
- QualityScaler quality_scaler_;
- bool quality_scaler_enabled_;
}; // end of VP8EncoderImpl class
class VP8DecoderImpl : public VP8Decoder {
diff --git a/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h b/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h
index 4eb31a4..7078c50 100644
--- a/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h
+++ b/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h
@@ -49,8 +49,6 @@
int SetRateAllocation(const BitrateAllocation& bitrate_allocation,
uint32_t frame_rate) override;
- void OnDroppedFrame() override {}
-
const char* ImplementationName() const override;
struct LayerFrameRefSettings {
diff --git a/webrtc/modules/video_coding/generic_encoder.cc b/webrtc/modules/video_coding/generic_encoder.cc
index 80e48d1..75bdc7c 100644
--- a/webrtc/modules/video_coding/generic_encoder.cc
+++ b/webrtc/modules/video_coding/generic_encoder.cc
@@ -131,11 +131,6 @@
return internal_source_;
}
-void VCMGenericEncoder::OnDroppedFrame() {
- RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
- encoder_->OnDroppedFrame();
-}
-
bool VCMGenericEncoder::SupportsNativeHandle() const {
RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
return encoder_->SupportsNativeHandle();
diff --git a/webrtc/modules/video_coding/utility/quality_scaler.cc b/webrtc/modules/video_coding/utility/quality_scaler.cc
index 0f63edc..ba1d978 100644
--- a/webrtc/modules/video_coding/utility/quality_scaler.cc
+++ b/webrtc/modules/video_coding/utility/quality_scaler.cc
@@ -13,8 +13,11 @@
#include <math.h>
#include <algorithm>
+#include <memory>
#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/base/task_queue.h"
// TODO(kthelgason): Some versions of Android have issues with log2.
// See https://code.google.com/p/android/issues/detail?id=212634 for details
@@ -26,21 +29,9 @@
namespace {
// Threshold constant used until first downscale (to permit fast rampup).
-static const int kMeasureSecondsFastUpscale = 2;
-static const int kMeasureSecondsUpscale = 5;
-static const int kMeasureSecondsDownscale = 5;
+static const int kMeasureMs = 2000;
+static const float kSamplePeriodScaleFactor = 2.5;
static const int kFramedropPercentThreshold = 60;
-// Min width/height to downscale to, set to not go below QVGA, but with some
-// margin to permit "almost-QVGA" resolutions, such as QCIF.
-static const int kMinDownscaleDimension = 140;
-// Initial resolutions corresponding to a bitrate. Aa bit above their actual
-// values to permit near-VGA and near-QVGA resolutions to use the same
-// mechanism.
-static const int kVgaBitrateThresholdKbps = 500;
-static const int kVgaNumPixels = 700 * 500; // 640x480
-static const int kQvgaBitrateThresholdKbps = 250;
-static const int kQvgaNumPixels = 400 * 300; // 320x240
-
// QP scaling threshold defaults:
static const int kLowH264QpThreshold = 24;
static const int kHighH264QpThreshold = 37;
@@ -48,20 +39,13 @@
// bitstream range of [0, 127] and not the user-level range of [0,63].
static const int kLowVp8QpThreshold = 29;
static const int kHighVp8QpThreshold = 95;
-} // namespace
+const ScalingObserverInterface::ScaleReason scale_reason_ =
+ ScalingObserverInterface::ScaleReason::kQuality;
-// Default values. Should immediately get set to something more sensible.
-QualityScaler::QualityScaler()
- : average_qp_(kMeasureSecondsUpscale * 30),
- framedrop_percent_(kMeasureSecondsUpscale * 30),
- low_qp_threshold_(-1) {}
-
-void QualityScaler::Init(VideoCodecType codec_type,
- int initial_bitrate_kbps,
- int width,
- int height,
- int fps) {
- int low = -1, high = -1;
+static VideoEncoder::QpThresholds CodecTypeToDefaultThresholds(
+ VideoCodecType codec_type) {
+ int low = -1;
+ int high = -1;
switch (codec_type) {
case kVideoCodecH264:
low = kLowH264QpThreshold;
@@ -74,138 +58,132 @@
default:
RTC_NOTREACHED() << "Invalid codec type for QualityScaler.";
}
- Init(low, high, initial_bitrate_kbps, width, height, fps);
+ return VideoEncoder::QpThresholds(low, high);
}
+} // namespace
-void QualityScaler::Init(int low_qp_threshold,
- int high_qp_threshold,
- int initial_bitrate_kbps,
- int width,
- int height,
- int fps) {
- ClearSamples();
- low_qp_threshold_ = low_qp_threshold;
- high_qp_threshold_ = high_qp_threshold;
- downscale_shift_ = 0;
- fast_rampup_ = true;
-
- const int init_width = width;
- const int init_height = height;
- if (initial_bitrate_kbps > 0) {
- int init_num_pixels = width * height;
- if (initial_bitrate_kbps < kVgaBitrateThresholdKbps)
- init_num_pixels = kVgaNumPixels;
- if (initial_bitrate_kbps < kQvgaBitrateThresholdKbps)
- init_num_pixels = kQvgaNumPixels;
- while (width * height > init_num_pixels) {
- ++downscale_shift_;
- width /= 2;
- height /= 2;
- }
+class QualityScaler::CheckQPTask : public rtc::QueuedTask {
+ public:
+ explicit CheckQPTask(QualityScaler* scaler) : scaler_(scaler) {
+ LOG(LS_INFO) << "Created CheckQPTask. Scheduling on queue...";
+ rtc::TaskQueue::Current()->PostDelayedTask(
+ std::unique_ptr<rtc::QueuedTask>(this), scaler_->GetSamplingPeriodMs());
}
- UpdateTargetResolution(init_width, init_height);
- ReportFramerate(fps);
+ void Stop() {
+ RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
+ LOG(LS_INFO) << "Stopping QP Check task.";
+ stop_ = true;
+ }
+
+ private:
+ bool Run() override {
+ RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
+ if (stop_)
+ return true; // TaskQueue will free this task.
+ scaler_->CheckQP();
+ rtc::TaskQueue::Current()->PostDelayedTask(
+ std::unique_ptr<rtc::QueuedTask>(this), scaler_->GetSamplingPeriodMs());
+ return false; // Retain the task in order to reuse it.
+ }
+
+ QualityScaler* const scaler_;
+ bool stop_ = false;
+ rtc::SequencedTaskChecker task_checker_;
+};
+
+QualityScaler::QualityScaler(ScalingObserverInterface* observer,
+ VideoCodecType codec_type)
+ : QualityScaler(observer, CodecTypeToDefaultThresholds(codec_type)) {}
+
+QualityScaler::QualityScaler(ScalingObserverInterface* observer,
+ VideoEncoder::QpThresholds thresholds)
+ : QualityScaler(observer, thresholds, kMeasureMs) {}
+
+// Protected ctor, should not be called directly.
+QualityScaler::QualityScaler(ScalingObserverInterface* observer,
+ VideoEncoder::QpThresholds thresholds,
+ int64_t sampling_period)
+ : check_qp_task_(nullptr),
+ observer_(observer),
+ sampling_period_ms_(sampling_period),
+ fast_rampup_(true),
+ // Arbitrarily choose size based on 30 fps for 5 seconds.
+ average_qp_(5 * 30),
+ framedrop_percent_(5 * 30),
+ thresholds_(thresholds) {
+ RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
+ RTC_DCHECK(observer_ != nullptr);
+ check_qp_task_ = new CheckQPTask(this);
}
-// Report framerate(fps) to estimate # of samples.
-void QualityScaler::ReportFramerate(int framerate) {
- // Use a faster window for upscaling initially.
- // This enables faster initial rampups without risking strong up-down
- // behavior later.
- num_samples_upscale_ = framerate * (fast_rampup_ ? kMeasureSecondsFastUpscale
- : kMeasureSecondsUpscale);
- num_samples_downscale_ = framerate * kMeasureSecondsDownscale;
+QualityScaler::~QualityScaler() {
+ RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
+ check_qp_task_->Stop();
+}
+
+int64_t QualityScaler::GetSamplingPeriodMs() const {
+ RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
+ return fast_rampup_ ? sampling_period_ms_
+ : (sampling_period_ms_ * kSamplePeriodScaleFactor);
+}
+
+void QualityScaler::ReportDroppedFrame() {
+ RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
+ framedrop_percent_.AddSample(100);
}
void QualityScaler::ReportQP(int qp) {
+ RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
framedrop_percent_.AddSample(0);
average_qp_.AddSample(qp);
}
-void QualityScaler::ReportDroppedFrame() {
- framedrop_percent_.AddSample(100);
-}
-
-void QualityScaler::OnEncodeFrame(int width, int height) {
+void QualityScaler::CheckQP() {
+ RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
// Should be set through InitEncode -> Should be set by now.
- RTC_DCHECK_GE(low_qp_threshold_, 0);
- if (target_res_.width != width || target_res_.height != height) {
- UpdateTargetResolution(width, height);
- }
-
+ RTC_DCHECK_GE(thresholds_.low, 0);
+ LOG(LS_INFO) << "Checking if average QP exceeds threshold";
// Check if we should scale down due to high frame drop.
- const auto drop_rate = framedrop_percent_.GetAverage(num_samples_downscale_);
+ const rtc::Optional<int> drop_rate = framedrop_percent_.GetAverage();
if (drop_rate && *drop_rate >= kFramedropPercentThreshold) {
- ScaleDown();
+ ReportQPHigh();
return;
}
// Check if we should scale up or down based on QP.
- const auto avg_qp_down = average_qp_.GetAverage(num_samples_downscale_);
- if (avg_qp_down && *avg_qp_down > high_qp_threshold_) {
- ScaleDown();
+ const rtc::Optional<int> avg_qp = average_qp_.GetAverage();
+ if (avg_qp && *avg_qp > thresholds_.high) {
+ ReportQPHigh();
return;
}
- const auto avg_qp_up = average_qp_.GetAverage(num_samples_upscale_);
- if (avg_qp_up && *avg_qp_up <= low_qp_threshold_) {
+ if (avg_qp && *avg_qp <= thresholds_.low) {
// QP has been low. We want to try a higher resolution.
- ScaleUp();
+ ReportQPLow();
return;
}
}
-void QualityScaler::ScaleUp() {
- downscale_shift_ = std::max(0, downscale_shift_ - 1);
+void QualityScaler::ReportQPLow() {
+ RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
+ LOG(LS_INFO) << "QP has been low, asking for higher resolution.";
ClearSamples();
+ observer_->ScaleUp(scale_reason_);
}
-void QualityScaler::ScaleDown() {
- downscale_shift_ = std::min(maximum_shift_, downscale_shift_ + 1);
+void QualityScaler::ReportQPHigh() {
+ RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
+ LOG(LS_INFO) << "QP has been high , asking for lower resolution.";
ClearSamples();
+ observer_->ScaleDown(scale_reason_);
// If we've scaled down, wait longer before scaling up again.
if (fast_rampup_) {
fast_rampup_ = false;
- num_samples_upscale_ = (num_samples_upscale_ / kMeasureSecondsFastUpscale) *
- kMeasureSecondsUpscale;
}
}
-QualityScaler::Resolution QualityScaler::GetScaledResolution() const {
- const int frame_width = target_res_.width >> downscale_shift_;
- const int frame_height = target_res_.height >> downscale_shift_;
- return Resolution{frame_width, frame_height};
-}
-
-rtc::scoped_refptr<VideoFrameBuffer> QualityScaler::GetScaledBuffer(
- const rtc::scoped_refptr<VideoFrameBuffer>& frame) {
- Resolution res = GetScaledResolution();
- const int src_width = frame->width();
- const int src_height = frame->height();
-
- if (res.width == src_width && res.height == src_height)
- return frame;
- rtc::scoped_refptr<I420Buffer> scaled_buffer =
- pool_.CreateBuffer(res.width, res.height);
-
- scaled_buffer->ScaleFrom(*frame);
-
- return scaled_buffer;
-}
-
-void QualityScaler::UpdateTargetResolution(int width, int height) {
- if (width < kMinDownscaleDimension || height < kMinDownscaleDimension) {
- maximum_shift_ = 0;
- } else {
- maximum_shift_ = static_cast<int>(
- log2(std::min(width, height) / kMinDownscaleDimension));
- }
- target_res_ = Resolution{width, height};
-}
-
void QualityScaler::ClearSamples() {
+ RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
framedrop_percent_.Reset();
average_qp_.Reset();
}
-
-
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/utility/quality_scaler.h b/webrtc/modules/video_coding/utility/quality_scaler.h
index c0f9440..5734e66 100644
--- a/webrtc/modules/video_coding/utility/quality_scaler.h
+++ b/webrtc/modules/video_coding/utility/quality_scaler.h
@@ -11,61 +11,73 @@
#ifndef WEBRTC_MODULES_VIDEO_CODING_UTILITY_QUALITY_SCALER_H_
#define WEBRTC_MODULES_VIDEO_CODING_UTILITY_QUALITY_SCALER_H_
+#include <utility>
+
#include "webrtc/common_types.h"
-#include "webrtc/common_video/include/i420_buffer_pool.h"
+#include "webrtc/video_encoder.h"
+#include "webrtc/base/optional.h"
+#include "webrtc/base/sequenced_task_checker.h"
#include "webrtc/modules/video_coding/utility/moving_average.h"
namespace webrtc {
-class QualityScaler {
+
+// An interface for a class that receives scale up/down requests.
+class ScalingObserverInterface {
public:
- struct Resolution {
- int width;
- int height;
- };
+ enum ScaleReason : size_t { kQuality = 0, kCpu = 1 };
+ static const size_t kScaleReasonSize = 2;
+ // Called to signal that we can handle larger frames.
+ virtual void ScaleUp(ScaleReason reason) = 0;
+ // Called to signal that encoder to scale down.
+ virtual void ScaleDown(ScaleReason reason) = 0;
- QualityScaler();
- void Init(VideoCodecType codec_type,
- int initial_bitrate_kbps,
- int width,
- int height,
- int fps);
- void Init(int low_qp_threshold,
- int high_qp_threshold,
- int initial_bitrate_kbps,
- int width,
- int height,
- int fps);
- void ReportFramerate(int framerate);
- void ReportQP(int qp);
- void ReportDroppedFrame();
- void OnEncodeFrame(int width, int height);
- Resolution GetScaledResolution() const;
- rtc::scoped_refptr<VideoFrameBuffer> GetScaledBuffer(
- const rtc::scoped_refptr<VideoFrameBuffer>& frame);
- int downscale_shift() const { return downscale_shift_; }
-
- private:
- void ClearSamples();
- void ScaleUp();
- void ScaleDown();
- void UpdateTargetResolution(int width, int height);
-
- I420BufferPool pool_;
-
- size_t num_samples_downscale_;
- size_t num_samples_upscale_;
- bool fast_rampup_;
- MovingAverage average_qp_;
- MovingAverage framedrop_percent_;
-
- int low_qp_threshold_;
- int high_qp_threshold_;
- Resolution target_res_;
-
- int downscale_shift_;
- int maximum_shift_;
+ protected:
+ virtual ~ScalingObserverInterface() {}
};
+// QualityScaler runs asynchronously and monitors QP values of encoded frames.
+// It holds a reference to a ScalingObserverInterface implementation to signal
+// an intent to scale up or down.
+class QualityScaler {
+ public:
+ // Construct a QualityScaler with a given |observer|.
+ // This starts the quality scaler periodically checking what the average QP
+ // has been recently.
+ QualityScaler(ScalingObserverInterface* observer, VideoCodecType codec_type);
+ // If specific thresholds are desired these can be supplied as |thresholds|.
+ QualityScaler(ScalingObserverInterface* observer,
+ VideoEncoder::QpThresholds thresholds);
+ virtual ~QualityScaler();
+ // Should be called each time the encoder drops a frame
+ void ReportDroppedFrame();
+ // Inform the QualityScaler of the last seen QP.
+ void ReportQP(int qp);
+
+ // The following members declared protected for testing purposes
+ protected:
+ QualityScaler(ScalingObserverInterface* observer,
+ VideoEncoder::QpThresholds thresholds,
+ int64_t sampling_period);
+
+ private:
+ class CheckQPTask;
+ void CheckQP();
+ void ClearSamples();
+ void ReportQPLow();
+ void ReportQPHigh();
+ int64_t GetSamplingPeriodMs() const;
+
+ CheckQPTask* check_qp_task_ GUARDED_BY(&task_checker_);
+ ScalingObserverInterface* const observer_ GUARDED_BY(&task_checker_);
+ rtc::SequencedTaskChecker task_checker_;
+
+ const int64_t sampling_period_ms_;
+ bool fast_rampup_ GUARDED_BY(&task_checker_);
+ MovingAverage average_qp_ GUARDED_BY(&task_checker_);
+ MovingAverage framedrop_percent_ GUARDED_BY(&task_checker_);
+
+ VideoEncoder::QpThresholds thresholds_ GUARDED_BY(&task_checker_);
+};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_UTILITY_QUALITY_SCALER_H_
diff --git a/webrtc/modules/video_coding/utility/quality_scaler_unittest.cc b/webrtc/modules/video_coding/utility/quality_scaler_unittest.cc
index 24d5c48..babe14e 100644
--- a/webrtc/modules/video_coding/utility/quality_scaler_unittest.cc
+++ b/webrtc/modules/video_coding/utility/quality_scaler_unittest.cc
@@ -10,28 +10,49 @@
#include "webrtc/modules/video_coding/utility/quality_scaler.h"
+#include <memory>
+
+#include "webrtc/base/event.h"
+#include "webrtc/base/task_queue.h"
+#include "webrtc/test/gmock.h"
#include "webrtc/test/gtest.h"
namespace webrtc {
namespace {
-static const int kNumSeconds = 10;
-static const int kWidth = 1920;
-static const int kHeight = 1080;
static const int kFramerate = 30;
static const int kLowQp = 15;
-static const int kNormalQp = 30;
static const int kLowQpThreshold = 18;
static const int kHighQp = 40;
-static const int kDisabledBadQpThreshold = 64;
-static const int kLowInitialBitrateKbps = 300;
-// These values need to be in sync with corresponding constants
-// in quality_scaler.cc
-static const int kMeasureSecondsFastUpscale = 2;
-static const int kMeasureSecondsUpscale = 5;
-static const int kMeasureSecondsDownscale = 5;
-static const int kMinDownscaleDimension = 140;
+static const size_t kDefaultTimeoutMs = 1000;
} // namespace
+class MockScaleObserver : public ScalingObserverInterface {
+ public:
+ MockScaleObserver() : event(false, false) {}
+ virtual ~MockScaleObserver() {}
+
+ void ScaleUp(ScaleReason r) override {
+ scaled_up++;
+ event.Set();
+ }
+ void ScaleDown(ScaleReason r) override {
+ scaled_down++;
+ event.Set();
+ }
+
+ rtc::Event event;
+ int scaled_up = 0;
+ int scaled_down = 0;
+};
+
+// Pass a lower sampling period to speed up the tests.
+class QualityScalerUnderTest : public QualityScaler {
+ public:
+ explicit QualityScalerUnderTest(ScalingObserverInterface* observer,
+ VideoEncoder::QpThresholds thresholds)
+ : QualityScaler(observer, thresholds, 5) {}
+};
+
class QualityScalerTest : public ::testing::Test {
protected:
enum ScaleDirection {
@@ -41,346 +62,116 @@
kScaleUp
};
- QualityScalerTest() {
- input_frame_ = I420Buffer::Create(kWidth, kHeight);
- qs_.Init(kLowQpThreshold, kHighQp, 0, kWidth, kHeight, kFramerate);
- qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
+ QualityScalerTest()
+ : q_(new rtc::TaskQueue("QualityScalerTestQueue")),
+ observer_(new MockScaleObserver()) {
+ rtc::Event event(false, false);
+ q_->PostTask([this, &event] {
+ qs_ = std::unique_ptr<QualityScaler>(new QualityScalerUnderTest(
+ observer_.get(),
+ VideoEncoder::QpThresholds(kLowQpThreshold, kHighQp)));
+ event.Set();
+ });
+ EXPECT_TRUE(event.Wait(kDefaultTimeoutMs));
}
- bool TriggerScale(ScaleDirection scale_direction) {
- qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
- int initial_width = qs_.GetScaledResolution().width;
- for (int i = 0; i < kFramerate * kNumSeconds; ++i) {
+ ~QualityScalerTest() {
+ rtc::Event event(false, false);
+ q_->PostTask([this, &event] {
+ qs_.reset(nullptr);
+ event.Set();
+ });
+ EXPECT_TRUE(event.Wait(kDefaultTimeoutMs));
+ }
+
+ void TriggerScale(ScaleDirection scale_direction) {
+ for (int i = 0; i < kFramerate * 5; ++i) {
switch (scale_direction) {
case kScaleUp:
- qs_.ReportQP(kLowQp);
+ qs_->ReportQP(kLowQp);
break;
case kScaleDown:
- qs_.ReportDroppedFrame();
+ qs_->ReportDroppedFrame();
break;
case kKeepScaleAtHighQp:
- qs_.ReportQP(kHighQp);
+ qs_->ReportQP(kHighQp);
break;
case kScaleDownAboveHighQp:
- qs_.ReportQP(kHighQp + 1);
+ qs_->ReportQP(kHighQp + 1);
break;
}
- qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
- if (qs_.GetScaledResolution().width != initial_width)
- return true;
}
-
- return false;
}
- void ExpectOriginalFrame() {
- EXPECT_EQ(input_frame_, qs_.GetScaledBuffer(input_frame_))
- << "Using scaled frame instead of original input.";
- }
-
- void ExpectScaleUsingReportedResolution() {
- qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
- QualityScaler::Resolution res = qs_.GetScaledResolution();
- rtc::scoped_refptr<VideoFrameBuffer> scaled_frame =
- qs_.GetScaledBuffer(input_frame_);
- EXPECT_EQ(res.width, scaled_frame->width());
- EXPECT_EQ(res.height, scaled_frame->height());
- }
-
- void ContinuouslyDownscalesByHalfDimensionsAndBackUp();
-
- void DoesNotDownscaleFrameDimensions(int width, int height);
-
- void DownscaleEndsAt(int input_width,
- int input_height,
- int end_width,
- int end_height);
-
- QualityScaler qs_;
- rtc::scoped_refptr<VideoFrameBuffer> input_frame_;
+ std::unique_ptr<rtc::TaskQueue> q_;
+ std::unique_ptr<QualityScaler> qs_;
+ std::unique_ptr<MockScaleObserver> observer_;
};
-TEST_F(QualityScalerTest, UsesOriginalFrameInitially) {
- ExpectOriginalFrame();
-}
-
-TEST_F(QualityScalerTest, ReportsOriginalResolutionInitially) {
- qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
- QualityScaler::Resolution res = qs_.GetScaledResolution();
- EXPECT_EQ(input_frame_->width(), res.width);
- EXPECT_EQ(input_frame_->height(), res.height);
-}
-
TEST_F(QualityScalerTest, DownscalesAfterContinuousFramedrop) {
- EXPECT_TRUE(TriggerScale(kScaleDown)) << "No downscale within " << kNumSeconds
- << " seconds.";
- QualityScaler::Resolution res = qs_.GetScaledResolution();
- EXPECT_LT(res.width, input_frame_->width());
- EXPECT_LT(res.height, input_frame_->height());
+ q_->PostTask([this] { TriggerScale(kScaleDown); });
+ EXPECT_TRUE(observer_->event.Wait(50));
+ EXPECT_EQ(1, observer_->scaled_down);
}
TEST_F(QualityScalerTest, KeepsScaleAtHighQp) {
- EXPECT_FALSE(TriggerScale(kKeepScaleAtHighQp))
- << "Downscale at high threshold which should keep scale.";
- QualityScaler::Resolution res = qs_.GetScaledResolution();
- EXPECT_EQ(res.width, input_frame_->width());
- EXPECT_EQ(res.height, input_frame_->height());
+ q_->PostTask([this] { TriggerScale(kKeepScaleAtHighQp); });
+ EXPECT_FALSE(observer_->event.Wait(50));
+ EXPECT_EQ(0, observer_->scaled_down);
+ EXPECT_EQ(0, observer_->scaled_up);
}
TEST_F(QualityScalerTest, DownscalesAboveHighQp) {
- EXPECT_TRUE(TriggerScale(kScaleDownAboveHighQp))
- << "No downscale within " << kNumSeconds << " seconds.";
- QualityScaler::Resolution res = qs_.GetScaledResolution();
- EXPECT_LT(res.width, input_frame_->width());
- EXPECT_LT(res.height, input_frame_->height());
+ q_->PostTask([this] { TriggerScale(kScaleDownAboveHighQp); });
+ EXPECT_TRUE(observer_->event.Wait(50));
+ EXPECT_EQ(1, observer_->scaled_down);
+ EXPECT_EQ(0, observer_->scaled_up);
}
TEST_F(QualityScalerTest, DownscalesAfterTwoThirdsFramedrop) {
- for (int i = 0; i < kFramerate * kNumSeconds / 3; ++i) {
- qs_.ReportQP(kNormalQp);
- qs_.ReportDroppedFrame();
- qs_.ReportDroppedFrame();
- qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
- if (qs_.GetScaledResolution().width < input_frame_->width())
- return;
- }
-
- FAIL() << "No downscale within " << kNumSeconds << " seconds.";
+ q_->PostTask([this] {
+ qs_->ReportDroppedFrame();
+ qs_->ReportDroppedFrame();
+ qs_->ReportQP(kHighQp);
+ });
+ EXPECT_TRUE(observer_->event.Wait(50));
+ EXPECT_EQ(1, observer_->scaled_down);
+ EXPECT_EQ(0, observer_->scaled_up);
}
TEST_F(QualityScalerTest, DoesNotDownscaleOnNormalQp) {
- for (int i = 0; i < kFramerate * kNumSeconds; ++i) {
- qs_.ReportQP(kNormalQp);
- qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
- ASSERT_EQ(input_frame_->width(), qs_.GetScaledResolution().width)
- << "Unexpected scale on half framedrop.";
- }
+ q_->PostTask([this] { TriggerScale(kScaleDownAboveHighQp); });
+ EXPECT_TRUE(observer_->event.Wait(50));
+ EXPECT_EQ(1, observer_->scaled_down);
+ EXPECT_EQ(0, observer_->scaled_up);
}
TEST_F(QualityScalerTest, DoesNotDownscaleAfterHalfFramedrop) {
- for (int i = 0; i < kFramerate * kNumSeconds / 2; ++i) {
- qs_.ReportQP(kNormalQp);
- qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
- ASSERT_EQ(input_frame_->width(), qs_.GetScaledResolution().width)
- << "Unexpected scale on half framedrop.";
-
- qs_.ReportDroppedFrame();
- qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
- ASSERT_EQ(input_frame_->width(), qs_.GetScaledResolution().width)
- << "Unexpected scale on half framedrop.";
- }
+ q_->PostTask([this] {
+ qs_->ReportDroppedFrame();
+ qs_->ReportQP(kHighQp);
+ });
+ EXPECT_FALSE(observer_->event.Wait(50));
+ EXPECT_EQ(0, observer_->scaled_down);
+ EXPECT_EQ(0, observer_->scaled_up);
}
-void QualityScalerTest::ContinuouslyDownscalesByHalfDimensionsAndBackUp() {
- const int initial_min_dimension =
- input_frame_->width() < input_frame_->height() ? input_frame_->width()
- : input_frame_->height();
- int min_dimension = initial_min_dimension;
- int current_shift = 0;
- // Drop all frames to force-trigger downscaling.
- while (min_dimension >= 2 * kMinDownscaleDimension) {
- EXPECT_TRUE(TriggerScale(kScaleDown)) << "No downscale within "
- << kNumSeconds << " seconds.";
- qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
- QualityScaler::Resolution res = qs_.GetScaledResolution();
- min_dimension = res.width < res.height ? res.width : res.height;
- ++current_shift;
- ASSERT_EQ(input_frame_->width() >> current_shift, res.width);
- ASSERT_EQ(input_frame_->height() >> current_shift, res.height);
- ExpectScaleUsingReportedResolution();
- }
-
- // Make sure we can scale back with good-quality frames.
- while (min_dimension < initial_min_dimension) {
- EXPECT_TRUE(TriggerScale(kScaleUp)) << "No upscale within " << kNumSeconds
- << " seconds.";
- qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
- QualityScaler::Resolution res = qs_.GetScaledResolution();
- min_dimension = res.width < res.height ? res.width : res.height;
- --current_shift;
- ASSERT_EQ(input_frame_->width() >> current_shift, res.width);
- ASSERT_EQ(input_frame_->height() >> current_shift, res.height);
- ExpectScaleUsingReportedResolution();
- }
-
- // Verify we don't start upscaling after further low use.
- for (int i = 0; i < kFramerate * kNumSeconds; ++i) {
- qs_.ReportQP(kLowQp);
- ExpectOriginalFrame();
- }
+TEST_F(QualityScalerTest, UpscalesAfterLowQp) {
+ q_->PostTask([this] { TriggerScale(kScaleUp); });
+ EXPECT_TRUE(observer_->event.Wait(50));
+ EXPECT_EQ(0, observer_->scaled_down);
+ EXPECT_EQ(1, observer_->scaled_up);
}
-TEST_F(QualityScalerTest, ContinuouslyDownscalesByHalfDimensionsAndBackUp) {
- ContinuouslyDownscalesByHalfDimensionsAndBackUp();
-}
-
-TEST_F(QualityScalerTest,
- ContinuouslyDownscalesOddResolutionsByHalfDimensionsAndBackUp) {
- const int kOddWidth = 517;
- const int kOddHeight = 1239;
- input_frame_ = I420Buffer::Create(kOddWidth, kOddHeight);
- ContinuouslyDownscalesByHalfDimensionsAndBackUp();
-}
-
-void QualityScalerTest::DoesNotDownscaleFrameDimensions(int width, int height) {
- input_frame_ = I420Buffer::Create(width, height);
-
- for (int i = 0; i < kFramerate * kNumSeconds; ++i) {
- qs_.ReportDroppedFrame();
- qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
- ASSERT_EQ(input_frame_->width(), qs_.GetScaledResolution().width)
- << "Unexpected scale of minimal-size frame.";
- }
-}
-
-TEST_F(QualityScalerTest, DoesNotDownscaleFrom1PxWidth) {
- DoesNotDownscaleFrameDimensions(1, kHeight);
-}
-
-TEST_F(QualityScalerTest, DoesNotDownscaleFrom1PxHeight) {
- DoesNotDownscaleFrameDimensions(kWidth, 1);
-}
-
-TEST_F(QualityScalerTest, DoesNotDownscaleFrom1Px) {
- DoesNotDownscaleFrameDimensions(1, 1);
-}
-
-TEST_F(QualityScalerTest, DoesNotDownscaleBelow2xDefaultMinDimensionsWidth) {
- DoesNotDownscaleFrameDimensions(
- 2 * kMinDownscaleDimension - 1, 1000);
-}
-
-TEST_F(QualityScalerTest, DoesNotDownscaleBelow2xDefaultMinDimensionsHeight) {
- DoesNotDownscaleFrameDimensions(
- 1000, 2 * kMinDownscaleDimension - 1);
-}
-
-TEST_F(QualityScalerTest, DownscaleToVgaOnLowInitialBitrate) {
- static const int kWidth720p = 1280;
- static const int kHeight720p = 720;
- static const int kInitialBitrateKbps = 300;
- input_frame_ = I420Buffer::Create(kWidth720p, kHeight720p);
- qs_.Init(kLowQpThreshold, kDisabledBadQpThreshold, kInitialBitrateKbps,
- kWidth720p, kHeight720p, kFramerate);
- qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
- int init_width = qs_.GetScaledResolution().width;
- int init_height = qs_.GetScaledResolution().height;
- EXPECT_EQ(640, init_width);
- EXPECT_EQ(360, init_height);
-}
-
-TEST_F(QualityScalerTest, DownscaleToQvgaOnLowerInitialBitrate) {
- static const int kWidth720p = 1280;
- static const int kHeight720p = 720;
- static const int kInitialBitrateKbps = 200;
- input_frame_ = I420Buffer::Create(kWidth720p, kHeight720p);
- qs_.Init(kLowQpThreshold, kDisabledBadQpThreshold, kInitialBitrateKbps,
- kWidth720p, kHeight720p, kFramerate);
- qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
- int init_width = qs_.GetScaledResolution().width;
- int init_height = qs_.GetScaledResolution().height;
- EXPECT_EQ(320, init_width);
- EXPECT_EQ(180, init_height);
-}
-
-TEST_F(QualityScalerTest, DownscaleAfterMeasuredSecondsThenSlowerBackUp) {
- qs_.Init(kLowQpThreshold, kHighQp, 0, kWidth, kHeight, kFramerate);
- qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
- QualityScaler::Resolution initial_res = qs_.GetScaledResolution();
-
- // Should not downscale if less than kMeasureSecondsDownscale seconds passed.
- for (int i = 0; i < kFramerate * kMeasureSecondsDownscale - 1; ++i) {
- qs_.ReportQP(kHighQp + 1);
- qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
- }
- EXPECT_EQ(initial_res.width, qs_.GetScaledResolution().width);
- EXPECT_EQ(initial_res.height, qs_.GetScaledResolution().height);
-
- // Should downscale if more than kMeasureSecondsDownscale seconds passed (add
- // last frame).
- qs_.ReportQP(kHighQp + 1);
- qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
- EXPECT_GT(initial_res.width, qs_.GetScaledResolution().width);
- EXPECT_GT(initial_res.height, qs_.GetScaledResolution().height);
-
- // Should not upscale if less than kMeasureSecondsUpscale seconds passed since
- // we saw issues initially (have already gone down).
- for (int i = 0; i < kFramerate * kMeasureSecondsUpscale - 1; ++i) {
- qs_.ReportQP(kLowQp);
- qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
- }
- EXPECT_GT(initial_res.width, qs_.GetScaledResolution().width);
- EXPECT_GT(initial_res.height, qs_.GetScaledResolution().height);
-
- // Should upscale (back to initial) if kMeasureSecondsUpscale seconds passed
- // (add last frame).
- qs_.ReportQP(kLowQp);
- qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
- EXPECT_EQ(initial_res.width, qs_.GetScaledResolution().width);
- EXPECT_EQ(initial_res.height, qs_.GetScaledResolution().height);
-}
-
-TEST_F(QualityScalerTest, UpscaleQuicklyInitiallyAfterMeasuredSeconds) {
- qs_.Init(kLowQpThreshold, kHighQp, kLowInitialBitrateKbps, kWidth, kHeight,
- kFramerate);
- qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
- QualityScaler::Resolution initial_res = qs_.GetScaledResolution();
-
- // Should not upscale if less than kMeasureSecondsFastUpscale seconds passed.
- for (int i = 0; i < kFramerate * kMeasureSecondsFastUpscale - 1; ++i) {
- qs_.ReportQP(kLowQp);
- qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
- }
- EXPECT_EQ(initial_res.width, qs_.GetScaledResolution().width);
- EXPECT_EQ(initial_res.height, qs_.GetScaledResolution().height);
-
- // Should upscale if kMeasureSecondsFastUpscale seconds passed (add last
- // frame).
- qs_.ReportQP(kLowQp);
- qs_.OnEncodeFrame(input_frame_->width(), input_frame_->height());
- EXPECT_LT(initial_res.width, qs_.GetScaledResolution().width);
- EXPECT_LT(initial_res.height, qs_.GetScaledResolution().height);
-}
-
-void QualityScalerTest::DownscaleEndsAt(int input_width,
- int input_height,
- int end_width,
- int end_height) {
- // Create a frame with 2x expected end width/height to verify that we can
- // scale down to expected end width/height.
- input_frame_ = I420Buffer::Create(input_width, input_height);
-
- int last_width = input_width;
- int last_height = input_height;
- // Drop all frames to force-trigger downscaling.
- while (true) {
- TriggerScale(kScaleDown);
- QualityScaler::Resolution res = qs_.GetScaledResolution();
- if (last_width == res.width) {
- EXPECT_EQ(last_height, res.height);
- EXPECT_EQ(end_width, res.width);
- EXPECT_EQ(end_height, res.height);
- break;
- }
- last_width = res.width;
- last_height = res.height;
- }
-}
-
-TEST_F(QualityScalerTest, DownscalesTo320x180) {
- DownscaleEndsAt(640, 360, 320, 180);
-}
-
-TEST_F(QualityScalerTest, DownscalesTo180x320) {
- DownscaleEndsAt(360, 640, 180, 320);
-}
-
-TEST_F(QualityScalerTest, DownscalesFrom1280x720To320x180) {
- DownscaleEndsAt(1280, 720, 320, 180);
-}
-
-TEST_F(QualityScalerTest, DoesntDownscaleInitialQvga) {
- DownscaleEndsAt(320, 180, 320, 180);
+TEST_F(QualityScalerTest, ScalesDownAndBackUp) {
+ q_->PostTask([this] { TriggerScale(kScaleDown); });
+ EXPECT_TRUE(observer_->event.Wait(50));
+ EXPECT_EQ(1, observer_->scaled_down);
+ EXPECT_EQ(0, observer_->scaled_up);
+ q_->PostTask([this] { TriggerScale(kScaleUp); });
+ EXPECT_TRUE(observer_->event.Wait(50));
+ EXPECT_EQ(1, observer_->scaled_down);
+ EXPECT_EQ(1, observer_->scaled_up);
}
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/video_coding_impl.h b/webrtc/modules/video_coding/video_coding_impl.h
index b76c617..f6e5306 100644
--- a/webrtc/modules/video_coding/video_coding_impl.h
+++ b/webrtc/modules/video_coding/video_coding_impl.h
@@ -115,6 +115,7 @@
VCMGenericEncoder* _encoder;
media_optimization::MediaOptimization _mediaOpt;
VCMEncodedFrameCallback _encodedFrameCallback GUARDED_BY(encoder_crit_);
+ EncodedImageCallback* const post_encode_callback_;
VCMSendStatisticsCallback* const send_stats_callback_;
VCMCodecDataBase _codecDataBase GUARDED_BY(encoder_crit_);
bool frame_dropper_enabled_ GUARDED_BY(encoder_crit_);
diff --git a/webrtc/modules/video_coding/video_sender.cc b/webrtc/modules/video_coding/video_sender.cc
index 148d237..d754c56 100644
--- a/webrtc/modules/video_coding/video_sender.cc
+++ b/webrtc/modules/video_coding/video_sender.cc
@@ -34,6 +34,7 @@
_encoder(nullptr),
_mediaOpt(clock_),
_encodedFrameCallback(post_encode_callback, &_mediaOpt),
+ post_encode_callback_(post_encode_callback),
send_stats_callback_(send_stats_callback),
_codecDataBase(&_encodedFrameCallback),
frame_dropper_enabled_(true),
@@ -310,7 +311,7 @@
<< " loss rate " << encoder_params.loss_rate << " rtt "
<< encoder_params.rtt << " input frame rate "
<< encoder_params.input_frame_rate;
- _encoder->OnDroppedFrame();
+ post_encode_callback_->OnDroppedFrame();
return VCM_OK;
}
// TODO(pbos): Make sure setting send codec is synchronized with video
diff --git a/webrtc/sdk/objc/Framework/Classes/h264_video_toolbox_encoder.h b/webrtc/sdk/objc/Framework/Classes/h264_video_toolbox_encoder.h
index 2413f77..0588722 100644
--- a/webrtc/sdk/objc/Framework/Classes/h264_video_toolbox_encoder.h
+++ b/webrtc/sdk/objc/Framework/Classes/h264_video_toolbox_encoder.h
@@ -46,7 +46,6 @@
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
- void OnDroppedFrame() override;
int SetChannelParameters(uint32_t packet_loss, int64_t rtt) override;
int SetRates(uint32_t new_bitrate_kbit, uint32_t frame_rate) override;
@@ -67,6 +66,8 @@
uint32_t timestamp,
VideoRotation rotation);
+ ScalingSettings GetScalingSettings() const override;
+
private:
int ResetCompressionSession();
void ConfigureCompressionSession();
@@ -85,10 +86,7 @@
int32_t height_;
const CFStringRef profile_;
- rtc::CriticalSection quality_scaler_crit_;
- QualityScaler quality_scaler_ GUARDED_BY(quality_scaler_crit_);
H264BitstreamParser h264_bitstream_parser_;
- bool enable_scaling_;
std::vector<uint8_t> nv12_scale_buffer_;
}; // H264VideoToolboxEncoder
diff --git a/webrtc/sdk/objc/Framework/Classes/h264_video_toolbox_encoder.mm b/webrtc/sdk/objc/Framework/Classes/h264_video_toolbox_encoder.mm
index 9f435b2..01f6d11 100644
--- a/webrtc/sdk/objc/Framework/Classes/h264_video_toolbox_encoder.mm
+++ b/webrtc/sdk/objc/Framework/Classes/h264_video_toolbox_encoder.mm
@@ -360,18 +360,9 @@
size_t max_payload_size) {
RTC_DCHECK(codec_settings);
RTC_DCHECK_EQ(codec_settings->codecType, kVideoCodecH264);
- {
- rtc::CritScope lock(&quality_scaler_crit_);
- quality_scaler_.Init(internal::kLowH264QpThreshold,
- internal::kHighH264QpThreshold,
- codec_settings->startBitrate, codec_settings->width,
- codec_settings->height, codec_settings->maxFramerate);
- QualityScaler::Resolution res = quality_scaler_.GetScaledResolution();
- // TODO(tkchin): We may need to enforce width/height dimension restrictions
- // to match what the encoder supports.
- width_ = res.width;
- height_ = res.height;
- }
+
+ width_ = codec_settings->width;
+ height_ = codec_settings->height;
// We can only set average bitrate on the HW encoder.
target_bitrate_bps_ = codec_settings->startBitrate;
bitrate_adjuster_.SetTargetBitrateBps(target_bitrate_bps_);
@@ -386,6 +377,9 @@
const VideoFrame& frame,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) {
+ // |input_frame| size should always match codec settings.
+ RTC_DCHECK_EQ(frame.width(), width_);
+ RTC_DCHECK_EQ(frame.height(), height_);
RTC_DCHECK(!frame.IsZeroSize());
if (!callback_ || !compression_session_) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
@@ -399,18 +393,6 @@
#endif
bool is_keyframe_required = false;
- quality_scaler_.OnEncodeFrame(frame.width(), frame.height());
- const QualityScaler::Resolution scaled_res =
- quality_scaler_.GetScaledResolution();
-
- if (scaled_res.width != width_ || scaled_res.height != height_) {
- width_ = scaled_res.width;
- height_ = scaled_res.height;
- int ret = ResetCompressionSession();
- if (ret < 0)
- return ret;
- }
-
// Get a pixel buffer from the pool and copy frame data over.
CVPixelBufferPoolRef pixel_buffer_pool =
VTCompressionSessionGetPixelBufferPool(compression_session_);
@@ -457,11 +439,8 @@
if (!pixel_buffer) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
- // TODO(magjed): Optimize by merging scaling and NV12 pixel buffer
- // conversion once libyuv::MergeUVPlanes is available.
- rtc::scoped_refptr<VideoFrameBuffer> scaled_i420_buffer =
- quality_scaler_.GetScaledBuffer(frame.video_frame_buffer());
- if (!internal::CopyVideoFrameToPixelBuffer(scaled_i420_buffer,
+ RTC_DCHECK(pixel_buffer);
+ if (!internal::CopyVideoFrameToPixelBuffer(frame.video_frame_buffer(),
pixel_buffer)) {
LOG(LS_ERROR) << "Failed to copy frame data.";
CVBufferRelease(pixel_buffer);
@@ -517,11 +496,6 @@
return WEBRTC_VIDEO_CODEC_OK;
}
-void H264VideoToolboxEncoder::OnDroppedFrame() {
- rtc::CritScope lock(&quality_scaler_crit_);
- quality_scaler_.ReportDroppedFrame();
-}
-
int H264VideoToolboxEncoder::SetChannelParameters(uint32_t packet_loss,
int64_t rtt) {
// Encoder doesn't know anything about packet loss or rtt so just return.
@@ -533,10 +507,6 @@
target_bitrate_bps_ = 1000 * new_bitrate_kbit;
bitrate_adjuster_.SetTargetBitrateBps(target_bitrate_bps_);
SetBitrateBps(bitrate_adjuster_.GetAdjustedBitrateBps());
-
- rtc::CritScope lock(&quality_scaler_crit_);
- quality_scaler_.ReportFramerate(frame_rate);
-
return WEBRTC_VIDEO_CODEC_OK;
}
@@ -709,8 +679,6 @@
}
if (info_flags & kVTEncodeInfo_FrameDropped) {
LOG(LS_INFO) << "H264 encode dropped frame.";
- rtc::CritScope lock(&quality_scaler_crit_);
- quality_scaler_.ReportDroppedFrame();
return;
}
@@ -752,20 +720,20 @@
frame.rotation_ = rotation;
h264_bitstream_parser_.ParseBitstream(buffer->data(), buffer->size());
- int qp;
- if (h264_bitstream_parser_.GetLastSliceQp(&qp)) {
- rtc::CritScope lock(&quality_scaler_crit_);
- quality_scaler_.ReportQP(qp);
- frame.qp_ = qp;
- }
+ h264_bitstream_parser_.GetLastSliceQp(&frame.qp_);
- EncodedImageCallback::Result result =
+ EncodedImageCallback::Result res =
callback_->OnEncodedImage(frame, &codec_specific_info, header.get());
- if (result.error != EncodedImageCallback::Result::OK) {
- LOG(LS_ERROR) << "Encode callback failed: " << result.error;
+ if (res.error != EncodedImageCallback::Result::OK) {
+ LOG(LS_ERROR) << "Encode callback failed: " << res.error;
return;
}
bitrate_adjuster_.Update(frame._size);
}
+VideoEncoder::ScalingSettings H264VideoToolboxEncoder::GetScalingSettings()
+ const {
+ return VideoEncoder::ScalingSettings(true, internal::kLowH264QpThreshold,
+ internal::kHighH264QpThreshold);
+}
} // namespace webrtc
diff --git a/webrtc/video/overuse_frame_detector.cc b/webrtc/video/overuse_frame_detector.cc
index 1670dc3..f4a9c86 100644
--- a/webrtc/video/overuse_frame_detector.cc
+++ b/webrtc/video/overuse_frame_detector.cc
@@ -49,6 +49,7 @@
const float kSampleDiffMs = 33.0f;
const float kMaxExp = 7.0f;
+const auto kScaleReasonCpu = ScalingObserverInterface::ScaleReason::kCpu;
} // namespace
CpuOveruseOptions::CpuOveruseOptions()
@@ -204,7 +205,7 @@
OveruseFrameDetector::OveruseFrameDetector(
Clock* clock,
const CpuOveruseOptions& options,
- CpuOveruseObserver* observer,
+ ScalingObserverInterface* observer,
EncodedFrameObserver* encoder_timing,
CpuOveruseMetricsObserver* metrics_observer)
: check_overuse_task_(nullptr),
@@ -370,13 +371,13 @@
++num_overuse_detections_;
if (observer_)
- observer_->OveruseDetected();
+ observer_->ScaleDown(kScaleReasonCpu);
} else if (IsUnderusing(*metrics_, now)) {
last_rampup_time_ms_ = now;
in_quick_rampup_ = true;
if (observer_)
- observer_->NormalUsage();
+ observer_->ScaleUp(kScaleReasonCpu);
}
int rampup_delay =
diff --git a/webrtc/video/overuse_frame_detector.h b/webrtc/video/overuse_frame_detector.h
index f1a99d7..7021984 100644
--- a/webrtc/video/overuse_frame_detector.h
+++ b/webrtc/video/overuse_frame_detector.h
@@ -20,6 +20,7 @@
#include "webrtc/base/sequenced_task_checker.h"
#include "webrtc/base/task_queue.h"
#include "webrtc/base/thread_annotations.h"
+#include "webrtc/modules/video_coding/utility/quality_scaler.h"
namespace webrtc {
@@ -27,19 +28,6 @@
class EncodedFrameObserver;
class VideoFrame;
-// CpuOveruseObserver is called when a system overuse is detected and
-// VideoEngine cannot keep up the encoding frequency.
-class CpuOveruseObserver {
- public:
- // Called as soon as an overuse is detected.
- virtual void OveruseDetected() = 0;
- // Called periodically when the system is not overused any longer.
- virtual void NormalUsage() = 0;
-
- protected:
- virtual ~CpuOveruseObserver() {}
-};
-
struct CpuOveruseOptions {
CpuOveruseOptions();
@@ -79,7 +67,7 @@
public:
OveruseFrameDetector(Clock* clock,
const CpuOveruseOptions& options,
- CpuOveruseObserver* overuse_observer,
+ ScalingObserverInterface* overuse_observer,
EncodedFrameObserver* encoder_timing_,
CpuOveruseMetricsObserver* metrics_observer);
~OveruseFrameDetector();
@@ -131,7 +119,7 @@
const CpuOveruseOptions options_;
// Observer getting overuse reports.
- CpuOveruseObserver* const observer_;
+ ScalingObserverInterface* const observer_;
EncodedFrameObserver* const encoder_timing_;
// Stats metrics.
diff --git a/webrtc/video/overuse_frame_detector_unittest.cc b/webrtc/video/overuse_frame_detector_unittest.cc
index f00b32d..6768591 100644
--- a/webrtc/video/overuse_frame_detector_unittest.cc
+++ b/webrtc/video/overuse_frame_detector_unittest.cc
@@ -16,10 +16,11 @@
#include "webrtc/test/gtest.h"
#include "webrtc/video/overuse_frame_detector.h"
#include "webrtc/video_frame.h"
+#include "webrtc/modules/video_coding/utility/quality_scaler.h"
namespace webrtc {
-using ::testing::Invoke;
+using ::testing::InvokeWithoutArgs;
namespace {
const int kWidth = 640;
@@ -29,24 +30,24 @@
const int kProcessTime5ms = 5;
} // namespace
-class MockCpuOveruseObserver : public CpuOveruseObserver {
+class MockCpuOveruseObserver : public ScalingObserverInterface {
public:
MockCpuOveruseObserver() {}
virtual ~MockCpuOveruseObserver() {}
- MOCK_METHOD0(OveruseDetected, void());
- MOCK_METHOD0(NormalUsage, void());
+ MOCK_METHOD1(ScaleUp, void(ScaleReason));
+ MOCK_METHOD1(ScaleDown, void(ScaleReason));
};
-class CpuOveruseObserverImpl : public CpuOveruseObserver {
+class CpuOveruseObserverImpl : public ScalingObserverInterface {
public:
CpuOveruseObserverImpl() :
overuse_(0),
normaluse_(0) {}
virtual ~CpuOveruseObserverImpl() {}
- void OveruseDetected() { ++overuse_; }
- void NormalUsage() { ++normaluse_; }
+ void ScaleDown(ScaleReason) { ++overuse_; }
+ void ScaleUp(ScaleReason) { ++normaluse_; }
int overuse_;
int normaluse_;
@@ -56,7 +57,7 @@
public:
OveruseFrameDetectorUnderTest(Clock* clock,
const CpuOveruseOptions& options,
- CpuOveruseObserver* overuse_observer,
+ ScalingObserverInterface* overuse_observer,
EncodedFrameObserver* encoder_timing,
CpuOveruseMetricsObserver* metrics_observer)
: OveruseFrameDetector(clock,
@@ -145,6 +146,8 @@
std::unique_ptr<MockCpuOveruseObserver> observer_;
std::unique_ptr<OveruseFrameDetectorUnderTest> overuse_detector_;
CpuOveruseMetrics metrics_;
+
+ static const auto reason_ = ScalingObserverInterface::ScaleReason::kCpu;
};
@@ -152,33 +155,33 @@
// UsagePercent() < low_encode_usage_threshold_percent => underuse.
TEST_F(OveruseFrameDetectorTest, TriggerOveruse) {
// usage > high => overuse
- EXPECT_CALL(*(observer_.get()), OveruseDetected()).Times(1);
+ EXPECT_CALL(*(observer_.get()), ScaleDown(reason_)).Times(1);
TriggerOveruse(options_.high_threshold_consecutive_count);
}
TEST_F(OveruseFrameDetectorTest, OveruseAndRecover) {
// usage > high => overuse
- EXPECT_CALL(*(observer_.get()), OveruseDetected()).Times(1);
+ EXPECT_CALL(*(observer_.get()), ScaleDown(reason_)).Times(1);
TriggerOveruse(options_.high_threshold_consecutive_count);
// usage < low => underuse
- EXPECT_CALL(*(observer_.get()), NormalUsage()).Times(testing::AtLeast(1));
+ EXPECT_CALL(*(observer_.get()), ScaleUp(reason_)).Times(testing::AtLeast(1));
TriggerUnderuse();
}
TEST_F(OveruseFrameDetectorTest, OveruseAndRecoverWithNoObserver) {
overuse_detector_.reset(new OveruseFrameDetectorUnderTest(
clock_.get(), options_, nullptr, nullptr, this));
- EXPECT_CALL(*(observer_.get()), OveruseDetected()).Times(0);
+ EXPECT_CALL(*(observer_.get()), ScaleDown(reason_)).Times(0);
TriggerOveruse(options_.high_threshold_consecutive_count);
- EXPECT_CALL(*(observer_.get()), NormalUsage()).Times(0);
+ EXPECT_CALL(*(observer_.get()), ScaleUp(reason_)).Times(0);
TriggerUnderuse();
}
TEST_F(OveruseFrameDetectorTest, DoubleOveruseAndRecover) {
- EXPECT_CALL(*(observer_.get()), OveruseDetected()).Times(2);
+ EXPECT_CALL(*(observer_.get()), ScaleDown(reason_)).Times(2);
TriggerOveruse(options_.high_threshold_consecutive_count);
TriggerOveruse(options_.high_threshold_consecutive_count);
- EXPECT_CALL(*(observer_.get()), NormalUsage()).Times(testing::AtLeast(1));
+ EXPECT_CALL(*(observer_.get()), ScaleUp(reason_)).Times(testing::AtLeast(1));
TriggerUnderuse();
}
@@ -197,22 +200,22 @@
}
TEST_F(OveruseFrameDetectorTest, ConstantOveruseGivesNoNormalUsage) {
- EXPECT_CALL(*(observer_.get()), NormalUsage()).Times(0);
- EXPECT_CALL(*(observer_.get()), OveruseDetected()).Times(64);
+ EXPECT_CALL(*(observer_.get()), ScaleUp(reason_)).Times(0);
+ EXPECT_CALL(*(observer_.get()), ScaleDown(reason_)).Times(64);
for (size_t i = 0; i < 64; ++i) {
TriggerOveruse(options_.high_threshold_consecutive_count);
}
}
TEST_F(OveruseFrameDetectorTest, ConsecutiveCountTriggersOveruse) {
- EXPECT_CALL(*(observer_.get()), OveruseDetected()).Times(1);
+ EXPECT_CALL(*(observer_.get()), ScaleDown(reason_)).Times(1);
options_.high_threshold_consecutive_count = 2;
ReinitializeOveruseDetector();
TriggerOveruse(2);
}
TEST_F(OveruseFrameDetectorTest, IncorrectConsecutiveCountTriggersNoOveruse) {
- EXPECT_CALL(*(observer_.get()), OveruseDetected()).Times(0);
+ EXPECT_CALL(*(observer_.get()), ScaleDown(reason_)).Times(0);
options_.high_threshold_consecutive_count = 2;
ReinitializeOveruseDetector();
TriggerOveruse(1);
@@ -278,7 +281,8 @@
}
TEST_F(OveruseFrameDetectorTest, MeasuresMultipleConcurrentSamples) {
- EXPECT_CALL(*(observer_.get()), OveruseDetected()).Times(testing::AtLeast(1));
+ EXPECT_CALL(*(observer_.get()), ScaleDown(reason_))
+ .Times(testing::AtLeast(1));
static const int kIntervalMs = 33;
static const size_t kNumFramesEncodingDelay = 3;
VideoFrame frame(I420Buffer::Create(kWidth, kHeight),
@@ -299,7 +303,8 @@
TEST_F(OveruseFrameDetectorTest, UpdatesExistingSamples) {
// >85% encoding time should trigger overuse.
- EXPECT_CALL(*(observer_.get()), OveruseDetected()).Times(testing::AtLeast(1));
+ EXPECT_CALL(*(observer_.get()), ScaleDown(reason_))
+ .Times(testing::AtLeast(1));
static const int kIntervalMs = 33;
static const int kDelayMs = 30;
VideoFrame frame(I420Buffer::Create(kWidth, kHeight),
@@ -332,8 +337,8 @@
// Expect NormalUsage(). When called, stop the |overuse_detector_| and then
// set |event| to end the test.
- EXPECT_CALL(*(observer_.get()), NormalUsage())
- .WillOnce(Invoke([this, &event] {
+ EXPECT_CALL(*(observer_.get()), ScaleUp(reason_))
+ .WillOnce(InvokeWithoutArgs([this, &event] {
overuse_detector_->StopCheckForOveruse();
event.Set();
}));
diff --git a/webrtc/video/send_statistics_proxy.cc b/webrtc/video/send_statistics_proxy.cc
index 8c4dae5..82d7f0d 100644
--- a/webrtc/video/send_statistics_proxy.cc
+++ b/webrtc/video/send_statistics_proxy.cc
@@ -505,9 +505,8 @@
uma_container_->key_frame_counter_.Add(encoded_image._frameType ==
kVideoFrameKey);
-
stats_.bw_limited_resolution =
- encoded_image.adapt_reason_.quality_resolution_downscales > 0 ||
+ stats_.bw_limited_resolution ||
encoded_image.adapt_reason_.bw_resolutions_disabled > 0;
if (encoded_image.adapt_reason_.quality_resolution_downscales != -1) {
@@ -588,10 +587,11 @@
uma_container_->cpu_limited_frame_counter_.Add(stats_.cpu_limited_resolution);
}
-void SendStatisticsProxy::SetCpuRestrictedResolution(
- bool cpu_restricted_resolution) {
+void SendStatisticsProxy::SetResolutionRestrictionStats(bool bandwidth,
+ bool cpu) {
rtc::CritScope lock(&crit_);
- stats_.cpu_limited_resolution = cpu_restricted_resolution;
+ stats_.bw_limited_resolution = bandwidth;
+ stats_.cpu_limited_resolution = cpu;
}
void SendStatisticsProxy::OnCpuRestrictedResolutionChanged(
@@ -601,6 +601,13 @@
++stats_.number_of_cpu_adapt_changes;
}
+void SendStatisticsProxy::OnQualityRestrictedResolutionChanged(
+ bool restricted) {
+ rtc::CritScope lock(&crit_);
+ uma_container_->quality_downscales_counter_.Add(restricted);
+ stats_.bw_limited_resolution = restricted;
+}
+
void SendStatisticsProxy::RtcpPacketTypesCounterUpdated(
uint32_t ssrc,
const RtcpPacketTypeCounter& packet_counter) {
diff --git a/webrtc/video/send_statistics_proxy.h b/webrtc/video/send_statistics_proxy.h
index 803fb61..934dab2 100644
--- a/webrtc/video/send_statistics_proxy.h
+++ b/webrtc/video/send_statistics_proxy.h
@@ -57,12 +57,9 @@
// Used to update incoming frame rate.
void OnIncomingFrame(int width, int height);
- // Used to indicate that the current input frame resolution is restricted due
- // to cpu usage.
- void SetCpuRestrictedResolution(bool cpu_restricted);
- // Used to update the number of times the input frame resolution has changed
- // due to cpu adaptation.
void OnCpuRestrictedResolutionChanged(bool cpu_restricted_resolution);
+ void OnQualityRestrictedResolutionChanged(bool restricted);
+ void SetResolutionRestrictionStats(bool bandwidth, bool cpu);
void OnEncoderStatsUpdate(uint32_t framerate, uint32_t bitrate);
void OnSuspendChange(bool is_suspended);
diff --git a/webrtc/video/send_statistics_proxy_unittest.cc b/webrtc/video/send_statistics_proxy_unittest.cc
index aa7c14f..4cee104 100644
--- a/webrtc/video/send_statistics_proxy_unittest.cc
+++ b/webrtc/video/send_statistics_proxy_unittest.cc
@@ -722,22 +722,10 @@
EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
// Resolution not scaled.
encoded_image.adapt_reason_.bw_resolutions_disabled = 0;
- encoded_image.adapt_reason_.quality_resolution_downscales = 0;
statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
// Resolution scaled due to bandwidth.
encoded_image.adapt_reason_.bw_resolutions_disabled = 1;
- encoded_image.adapt_reason_.quality_resolution_downscales = 0;
- statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
- EXPECT_TRUE(statistics_proxy_->GetStats().bw_limited_resolution);
- // Resolution not scaled.
- encoded_image.adapt_reason_.bw_resolutions_disabled = 0;
- encoded_image.adapt_reason_.quality_resolution_downscales = 0;
- statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
- EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
- // Resolution scaled due to quality.
- encoded_image.adapt_reason_.bw_resolutions_disabled = 0;
- encoded_image.adapt_reason_.quality_resolution_downscales = 1;
statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
EXPECT_TRUE(statistics_proxy_->GetStats().bw_limited_resolution);
}
diff --git a/webrtc/video/vie_encoder.cc b/webrtc/video/vie_encoder.cc
index c2598f4..5722d06 100644
--- a/webrtc/video/vie_encoder.cc
+++ b/webrtc/video/vie_encoder.cc
@@ -257,9 +257,6 @@
has_received_rpsi_(false),
picture_id_rpsi_(0),
clock_(Clock::GetRealTimeClock()),
- degradation_preference_(
- VideoSendStream::DegradationPreference::kBalanced),
- cpu_restricted_counter_(0),
last_frame_width_(0),
last_frame_height_(0),
last_captured_timestamp_(0),
@@ -292,6 +289,7 @@
rate_allocator_.reset();
video_sender_.RegisterExternalEncoder(nullptr, settings_.payload_type,
false);
+ quality_scaler_ = nullptr;
shutdown_event_.Set();
});
@@ -318,17 +316,12 @@
source_proxy_->SetSource(source, degradation_preference);
encoder_queue_.PostTask([this, degradation_preference] {
RTC_DCHECK_RUN_ON(&encoder_queue_);
- degradation_preference_ = degradation_preference;
- // Set the stats for if we are currently CPU restricted. We are CPU
- // restricted depending on degradation preference and
- // if the overusedetector has currently detected overuse which is counted in
- // |cpu_restricted_counter_|
- // We do this on the encoder task queue to avoid a race with the stats set
- // in ViEEncoder::NormalUsage and ViEEncoder::OveruseDetected.
- stats_proxy_->SetCpuRestrictedResolution(
- degradation_preference_ !=
- VideoSendStream::DegradationPreference::kMaintainResolution &&
- cpu_restricted_counter_ != 0);
+ scaling_enabled_ =
+ (degradation_preference !=
+ VideoSendStream::DegradationPreference::kMaintainResolution);
+ stats_proxy_->SetResolutionRestrictionStats(
+ scaling_enabled_ && scale_counter_[kQuality] > 0,
+ scaling_enabled_ && scale_counter_[kCpu] > 0);
});
}
@@ -420,6 +413,18 @@
sink_->OnEncoderConfigurationChanged(
std::move(streams), encoder_config_.min_transmit_bitrate_bps);
+
+ const auto scaling_settings = settings_.encoder->GetScalingSettings();
+ if (scaling_settings.enabled && scaling_enabled_) {
+ if (scaling_settings.thresholds) {
+ quality_scaler_.reset(
+ new QualityScaler(this, *(scaling_settings.thresholds)));
+ } else {
+ quality_scaler_.reset(new QualityScaler(this, codec_type_));
+ }
+ } else {
+ quality_scaler_.reset(nullptr);
+ }
}
void ViEEncoder::OnFrame(const VideoFrame& video_frame) {
@@ -497,6 +502,7 @@
void ViEEncoder::EncodeVideoFrame(const VideoFrame& video_frame,
int64_t time_when_posted_in_ms) {
RTC_DCHECK_RUN_ON(&encoder_queue_);
+
if (pre_encode_callback_)
pre_encode_callback_->OnFrame(video_frame);
@@ -574,15 +580,25 @@
int64_t time_sent = clock_->TimeInMilliseconds();
uint32_t timestamp = encoded_image._timeStamp;
-
- encoder_queue_.PostTask([this, timestamp, time_sent] {
+ const int qp = encoded_image.qp_;
+ encoder_queue_.PostTask([this, timestamp, time_sent, qp] {
RTC_DCHECK_RUN_ON(&encoder_queue_);
overuse_detector_.FrameSent(timestamp, time_sent);
+ if (quality_scaler_)
+ quality_scaler_->ReportQP(qp);
});
return result;
}
+void ViEEncoder::OnDroppedFrame() {
+ encoder_queue_.PostTask([this] {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ if (quality_scaler_)
+ quality_scaler_->ReportDroppedFrame();
+ });
+}
+
void ViEEncoder::SendStatistics(uint32_t bit_rate, uint32_t frame_rate) {
RTC_DCHECK(module_process_thread_checker_.CalledOnValidThread());
if (stats_proxy_)
@@ -654,47 +670,67 @@
}
}
-void ViEEncoder::OveruseDetected() {
+void ViEEncoder::ScaleDown(ScaleReason reason) {
RTC_DCHECK_RUN_ON(&encoder_queue_);
- if (degradation_preference_ ==
- VideoSendStream::DegradationPreference::kMaintainResolution ||
- cpu_restricted_counter_ >= kMaxCpuDowngrades) {
+ if (!scaling_enabled_)
return;
- }
- LOG(LS_INFO) << "CPU overuse detected. Requesting lower resolution.";
// Request lower resolution if the current resolution is lower than last time
// we asked for the resolution to be lowered.
- // Update stats accordingly.
int current_pixel_count = last_frame_height_ * last_frame_width_;
- if (!max_pixel_count_ || current_pixel_count < *max_pixel_count_) {
- max_pixel_count_ = rtc::Optional<int>(current_pixel_count);
- max_pixel_count_step_up_ = rtc::Optional<int>();
- stats_proxy_->OnCpuRestrictedResolutionChanged(true);
- ++cpu_restricted_counter_;
- source_proxy_->RequestResolutionLowerThan(current_pixel_count);
+ if (max_pixel_count_ && current_pixel_count >= *max_pixel_count_)
+ return;
+ switch (reason) {
+ case kQuality:
+ if (scale_counter_[reason] >= kMaxQualityDowngrades)
+ return;
+ stats_proxy_->OnQualityRestrictedResolutionChanged(true);
+ break;
+ case kCpu:
+ if (scale_counter_[reason] >= kMaxCpuDowngrades)
+ return;
+ // Update stats accordingly.
+ stats_proxy_->OnCpuRestrictedResolutionChanged(true);
+ break;
+ }
+ max_pixel_count_ = rtc::Optional<int>(current_pixel_count);
+ max_pixel_count_step_up_ = rtc::Optional<int>();
+ ++scale_counter_[reason];
+ source_proxy_->RequestResolutionLowerThan(current_pixel_count);
+ LOG(LS_INFO) << "Scaling down resolution.";
+ for (size_t i = 0; i < kScaleReasonSize; ++i) {
+ LOG(LS_INFO) << "Scaled " << scale_counter_[i]
+ << " times for reason: " << (i ? "quality" : "cpu");
}
}
-void ViEEncoder::NormalUsage() {
+void ViEEncoder::ScaleUp(ScaleReason reason) {
RTC_DCHECK_RUN_ON(&encoder_queue_);
- if (degradation_preference_ ==
- VideoSendStream::DegradationPreference::kMaintainResolution ||
- cpu_restricted_counter_ == 0) {
+ if (scale_counter_[reason] == 0 || !scaling_enabled_)
return;
- }
-
- LOG(LS_INFO) << "CPU underuse detected. Requesting higher resolution.";
+ // Only scale if resolution is higher than last time
+ // we requested higher resolution.
int current_pixel_count = last_frame_height_ * last_frame_width_;
- // Request higher resolution if we are CPU restricted and the the current
- // resolution is higher than last time we requested higher resolution.
- // Update stats accordingly.
- if (!max_pixel_count_step_up_ ||
- current_pixel_count > *max_pixel_count_step_up_) {
- max_pixel_count_ = rtc::Optional<int>();
- max_pixel_count_step_up_ = rtc::Optional<int>(current_pixel_count);
- --cpu_restricted_counter_;
- stats_proxy_->OnCpuRestrictedResolutionChanged(cpu_restricted_counter_ > 0);
- source_proxy_->RequestHigherResolutionThan(current_pixel_count);
+ if (current_pixel_count <= max_pixel_count_step_up_.value_or(0))
+ return;
+ switch (reason) {
+ case kQuality:
+ stats_proxy_->OnQualityRestrictedResolutionChanged(
+ scale_counter_[reason] > 1);
+ break;
+ case kCpu:
+ // Update stats accordingly.
+ stats_proxy_->OnCpuRestrictedResolutionChanged(scale_counter_[reason] >
+ 1);
+ break;
+ }
+ max_pixel_count_ = rtc::Optional<int>();
+ max_pixel_count_step_up_ = rtc::Optional<int>(current_pixel_count);
+ --scale_counter_[reason];
+ source_proxy_->RequestHigherResolutionThan(current_pixel_count);
+ LOG(LS_INFO) << "Scaling up resolution.";
+ for (size_t i = 0; i < kScaleReasonSize; ++i) {
+ LOG(LS_INFO) << "Scaled " << scale_counter_[i]
+ << " times for reason: " << (i ? "quality" : "cpu");
}
}
diff --git a/webrtc/video/vie_encoder.h b/webrtc/video/vie_encoder.h
index 8373a3f..1266962 100644
--- a/webrtc/video/vie_encoder.h
+++ b/webrtc/video/vie_encoder.h
@@ -25,6 +25,7 @@
#include "webrtc/common_video/rotation.h"
#include "webrtc/media/base/videosinkinterface.h"
#include "webrtc/modules/video_coding/include/video_coding_defines.h"
+#include "webrtc/modules/video_coding/utility/quality_scaler.h"
#include "webrtc/modules/video_coding/video_coding_impl.h"
#include "webrtc/modules/video_processing/include/video_processing.h"
#include "webrtc/system_wrappers/include/atomic32.h"
@@ -49,7 +50,7 @@
class ViEEncoder : public rtc::VideoSinkInterface<VideoFrame>,
public EncodedImageCallback,
public VCMSendStatisticsCallback,
- public CpuOveruseObserver {
+ public ScalingObserverInterface {
public:
// Interface for receiving encoded video frames and notifications about
// configuration changes.
@@ -60,8 +61,10 @@
int min_transmit_bitrate_bps) = 0;
};
- // Down grade resolution at most 2 times for CPU reasons.
+ // Downscale resolution at most 2 times for CPU reasons.
static const int kMaxCpuDowngrades = 2;
+ // Downscale resolution at most 2 times for low-quality reasons.
+ static const int kMaxQualityDowngrades = 2;
ViEEncoder(uint32_t number_of_cores,
SendStatisticsProxy* stats_proxy,
@@ -111,14 +114,14 @@
int64_t round_trip_time_ms);
protected:
- // Used for testing. For example the |CpuOveruseObserver| methods must be
- // called on |encoder_queue_|.
+ // Used for testing. For example the |ScalingObserverInterface| methods must
+ // be called on |encoder_queue_|.
rtc::TaskQueue* encoder_queue() { return &encoder_queue_; }
- // webrtc::CpuOveruseObserver implementation.
+ // webrtc::ScalingObserverInterface implementation.
// These methods are protected for easier testing.
- void OveruseDetected() override;
- void NormalUsage() override;
+ void ScaleUp(ScaleReason reason) override;
+ void ScaleDown(ScaleReason reason) override;
private:
class ConfigureEncoderTask;
@@ -161,6 +164,8 @@
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) override;
+ void OnDroppedFrame() override;
+
bool EncoderPaused() const;
void TraceFrameDropStart();
void TraceFrameDropEnd();
@@ -176,6 +181,7 @@
vcm::VideoSender video_sender_ ACCESS_ON(&encoder_queue_);
OveruseFrameDetector overuse_detector_ ACCESS_ON(&encoder_queue_);
+ std::unique_ptr<QualityScaler> quality_scaler_ ACCESS_ON(&encoder_queue_);
SendStatisticsProxy* const stats_proxy_;
rtc::VideoSinkInterface<VideoFrame>* const pre_encode_callback_;
@@ -203,12 +209,11 @@
bool has_received_rpsi_ ACCESS_ON(&encoder_queue_);
uint64_t picture_id_rpsi_ ACCESS_ON(&encoder_queue_);
Clock* const clock_;
-
- VideoSendStream::DegradationPreference degradation_preference_
- ACCESS_ON(&encoder_queue_);
- // Counter used for deciding if the video resolution is currently
- // restricted by CPU usage.
- int cpu_restricted_counter_ ACCESS_ON(&encoder_queue_);
+ // Counters used for deciding if the video resolution is currently
+ // restricted, and if so, why.
+ int scale_counter_[kScaleReasonSize] ACCESS_ON(&encoder_queue_) = {0};
+ // Set depending on degradation preferences
+ bool scaling_enabled_ ACCESS_ON(&encoder_queue_) = false;
int last_frame_width_ ACCESS_ON(&encoder_queue_);
int last_frame_height_ ACCESS_ON(&encoder_queue_);
diff --git a/webrtc/video/vie_encoder_unittest.cc b/webrtc/video/vie_encoder_unittest.cc
index 71e68d3..6cc25cf 100644
--- a/webrtc/video/vie_encoder_unittest.cc
+++ b/webrtc/video/vie_encoder_unittest.cc
@@ -22,6 +22,9 @@
namespace webrtc {
+using DegredationPreference = VideoSendStream::DegradationPreference;
+using ScaleReason = ScalingObserverInterface::ScaleReason;
+
namespace {
const size_t kMaxPayloadLength = 1440;
const int kTargetBitrateBps = 100000;
@@ -42,32 +45,30 @@
class ViEEncoderUnderTest : public ViEEncoder {
public:
- ViEEncoderUnderTest(
- SendStatisticsProxy* stats_proxy,
- const webrtc::VideoSendStream::Config::EncoderSettings& settings)
+ ViEEncoderUnderTest(SendStatisticsProxy* stats_proxy,
+ const VideoSendStream::Config::EncoderSettings& settings)
: ViEEncoder(1 /* number_of_cores */,
stats_proxy,
settings,
nullptr /* pre_encode_callback */,
nullptr /* encoder_timing */) {}
- void TriggerCpuOveruse() {
+ void PostTaskAndWait(bool down, ScaleReason reason) {
rtc::Event event(false, false);
- encoder_queue()->PostTask([this, &event] {
- OveruseDetected();
+ encoder_queue()->PostTask([this, &event, reason, down] {
+ down ? ScaleDown(reason) : ScaleUp(reason);
event.Set();
});
- event.Wait(rtc::Event::kForever);
+ RTC_DCHECK(event.Wait(5000));
}
- void TriggerCpuNormalUsage() {
- rtc::Event event(false, false);
- encoder_queue()->PostTask([this, &event] {
- NormalUsage();
- event.Set();
- });
- event.Wait(rtc::Event::kForever);
- }
+ void TriggerCpuOveruse() { PostTaskAndWait(true, ScaleReason::kCpu); }
+
+ void TriggerCpuNormalUsage() { PostTaskAndWait(false, ScaleReason::kCpu); }
+
+ void TriggerQualityLow() { PostTaskAndWait(true, ScaleReason::kQuality); }
+
+ void TriggerQualityHigh() { PostTaskAndWait(false, ScaleReason::kQuality); }
};
class VideoStreamFactory
@@ -183,6 +184,10 @@
block_next_encode_ = true;
}
+ VideoEncoder::ScalingSettings GetScalingSettings() const override {
+ return VideoEncoder::ScalingSettings(true, 1, 2);
+ }
+
void ContinueEncode() { continue_encode_event_.Set(); }
void CheckLastTimeStampsMatch(int64_t ntp_time_ms,
@@ -549,7 +554,7 @@
frame_height /= 2;
}
- // Trigger CPU overuse a one more time. This should not trigger request for
+ // Trigger CPU overuse one more time. This should not trigger a request for
// lower resolution.
rtc::VideoSinkWants current_wants = video_source_.sink_wants();
video_source_.IncomingCapturedFrame(CreateFrame(
@@ -653,6 +658,138 @@
vie_encoder_->Stop();
}
+TEST_F(ViEEncoderTest, SwitchingSourceKeepsCpuAdaptation) {
+ const int kTargetBitrateBps = 100000;
+ vie_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ int frame_width = 1280;
+ int frame_height = 720;
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(1, frame_width, frame_height));
+ sink_.WaitForEncodedFrame(1);
+
+ VideoSendStream::Stats stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_EQ(0, stats.number_of_cpu_adapt_changes);
+
+ vie_encoder_->TriggerCpuOveruse();
+
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(2, frame_width, frame_height));
+ sink_.WaitForEncodedFrame(2);
+ stats = stats_proxy_->GetStats();
+ EXPECT_TRUE(stats.cpu_limited_resolution);
+ EXPECT_EQ(1, stats.number_of_cpu_adapt_changes);
+
+ // Set new source with adaptation still enabled.
+ test::FrameForwarder new_video_source;
+ vie_encoder_->SetSource(&new_video_source,
+ VideoSendStream::DegradationPreference::kBalanced);
+
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(3, frame_width, frame_height));
+ sink_.WaitForEncodedFrame(3);
+ stats = stats_proxy_->GetStats();
+ EXPECT_TRUE(stats.cpu_limited_resolution);
+ EXPECT_EQ(1, stats.number_of_cpu_adapt_changes);
+
+ // Set adaptation disabled.
+ vie_encoder_->SetSource(
+ &new_video_source,
+ VideoSendStream::DegradationPreference::kMaintainResolution);
+
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(4, frame_width, frame_height));
+ sink_.WaitForEncodedFrame(4);
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_EQ(1, stats.number_of_cpu_adapt_changes);
+
+ // Set adaptation back to enabled.
+ vie_encoder_->SetSource(&new_video_source,
+ VideoSendStream::DegradationPreference::kBalanced);
+
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(5, frame_width, frame_height));
+ sink_.WaitForEncodedFrame(5);
+ stats = stats_proxy_->GetStats();
+ EXPECT_TRUE(stats.cpu_limited_resolution);
+ EXPECT_EQ(1, stats.number_of_cpu_adapt_changes);
+
+ vie_encoder_->TriggerCpuNormalUsage();
+
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(6, frame_width, frame_height));
+ sink_.WaitForEncodedFrame(6);
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_EQ(2, stats.number_of_cpu_adapt_changes);
+
+ vie_encoder_->Stop();
+}
+
+TEST_F(ViEEncoderTest, SwitchingSourceKeepsQualityAdaptation) {
+ const int kTargetBitrateBps = 100000;
+ vie_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ int frame_width = 1280;
+ int frame_height = 720;
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(1, frame_width, frame_height));
+ sink_.WaitForEncodedFrame(1);
+
+ VideoSendStream::Stats stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_FALSE(stats.bw_limited_resolution);
+ EXPECT_EQ(0, stats.number_of_cpu_adapt_changes);
+
+ // Set new source with adaptation still enabled.
+ test::FrameForwarder new_video_source;
+ vie_encoder_->SetSource(&new_video_source,
+ VideoSendStream::DegradationPreference::kBalanced);
+
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(2, frame_width, frame_height));
+ sink_.WaitForEncodedFrame(2);
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_FALSE(stats.bw_limited_resolution);
+ EXPECT_EQ(0, stats.number_of_cpu_adapt_changes);
+
+ vie_encoder_->TriggerQualityLow();
+
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(3, frame_width, frame_height));
+ sink_.WaitForEncodedFrame(3);
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_TRUE(stats.bw_limited_resolution);
+
+ vie_encoder_->SetSource(&new_video_source,
+ VideoSendStream::DegradationPreference::kBalanced);
+
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(4, frame_width, frame_height));
+ sink_.WaitForEncodedFrame(4);
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_TRUE(stats.bw_limited_resolution);
+
+ // Set adaptation disabled.
+ vie_encoder_->SetSource(
+ &new_video_source,
+ VideoSendStream::DegradationPreference::kMaintainResolution);
+
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(5, frame_width, frame_height));
+ sink_.WaitForEncodedFrame(5);
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_FALSE(stats.bw_limited_resolution);
+
+ vie_encoder_->Stop();
+}
+
TEST_F(ViEEncoderTest, StatsTracksAdaptationStatsWhenSwitchingSource) {
vie_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
@@ -727,6 +864,54 @@
vie_encoder_->Stop();
}
+TEST_F(ViEEncoderTest, ScalingUpAndDownDoesNothingWithMaintainResolution) {
+ const int kTargetBitrateBps = 100000;
+ int frame_width = 1280;
+ int frame_height = 720;
+ vie_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+
+ // Expect no scaling to begin with
+ EXPECT_FALSE(video_source_.sink_wants().max_pixel_count);
+ EXPECT_FALSE(video_source_.sink_wants().max_pixel_count_step_up);
+
+ // Trigger scale down
+ vie_encoder_->TriggerQualityLow();
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(1, frame_width, frame_height));
+ sink_.WaitForEncodedFrame(1);
+
+ // Expect a scale down.
+ EXPECT_TRUE(video_source_.sink_wants().max_pixel_count);
+ EXPECT_LT(*video_source_.sink_wants().max_pixel_count,
+ frame_width * frame_height);
+
+ // Set adaptation disabled.
+ test::FrameForwarder new_video_source;
+ vie_encoder_->SetSource(
+ &new_video_source,
+ VideoSendStream::DegradationPreference::kMaintainResolution);
+
+ // Trigger scale down
+ vie_encoder_->TriggerQualityLow();
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(2, frame_width, frame_height));
+ sink_.WaitForEncodedFrame(2);
+
+ // Expect no scaling
+ EXPECT_FALSE(new_video_source.sink_wants().max_pixel_count);
+
+ // Trigger scale up
+ vie_encoder_->TriggerQualityHigh();
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(3, frame_width, frame_height));
+ sink_.WaitForEncodedFrame(3);
+
+ // Expect nothing to change, still no scaling
+ EXPECT_FALSE(new_video_source.sink_wants().max_pixel_count);
+
+ vie_encoder_->Stop();
+}
+
TEST_F(ViEEncoderTest, UMACpuLimitedResolutionInPercent) {
vie_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
diff --git a/webrtc/video_encoder.h b/webrtc/video_encoder.h
index f8a04f2..0a21fa3 100644
--- a/webrtc/video_encoder.h
+++ b/webrtc/video_encoder.h
@@ -19,6 +19,7 @@
#include "webrtc/common_types.h"
#include "webrtc/typedefs.h"
#include "webrtc/video_frame.h"
+#include "webrtc/base/optional.h"
namespace webrtc {
@@ -59,10 +60,38 @@
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) = 0;
+
+ virtual void OnDroppedFrame() {}
};
class VideoEncoder {
public:
+ enum EncoderType {
+ kH264,
+ kVp8,
+ kVp9,
+ kUnsupportedCodec,
+ };
+ struct QpThresholds {
+ QpThresholds(int l, int h) : low(l), high(h) {}
+ QpThresholds() : low(-1), high(-1) {}
+ int low;
+ int high;
+ };
+ struct ScalingSettings {
+ ScalingSettings(bool on, int low, int high)
+ : enabled(on),
+ thresholds(rtc::Optional<QpThresholds>(QpThresholds(low, high))) {}
+ explicit ScalingSettings(bool on) : enabled(on) {}
+ const bool enabled;
+ const rtc::Optional<QpThresholds> thresholds;
+ };
+ static VideoEncoder* Create(EncoderType codec_type);
+ // Returns true if this type of encoder can be created using
+ // VideoEncoder::Create.
+ static bool IsSupportedSoftware(EncoderType codec_type);
+ static EncoderType CodecToEncoderType(VideoCodecType codec_type);
+
static VideoCodecVP8 GetDefaultVp8Settings();
static VideoCodecVP9 GetDefaultVp9Settings();
static VideoCodecH264 GetDefaultH264Settings();
@@ -148,8 +177,13 @@
return SetRates(allocation.get_sum_kbps(), framerate);
}
+ // Any encoder implementation wishing to use the WebRTC provided
+ // quality scaler must implement this method.
+ virtual ScalingSettings GetScalingSettings() const {
+ return ScalingSettings(false);
+ }
+
virtual int32_t SetPeriodicKeyFrames(bool enable) { return -1; }
- virtual void OnDroppedFrame() {}
virtual bool SupportsNativeHandle() const { return false; }
virtual const char* ImplementationName() const { return "unknown"; }
};