Replace VP9 screen sharing.
- Remove referencing control from encoder wrapper. Use fixed temporal
prediction structure.
- Remove flexible mode from encoder wrapper. It only worked with
referencing control which this CL removes.
- Remove external framerate/bitrate controller. Keep codec's internal
frame dropping enabled at screen sharing.
- Use GetSvcConfig() to configure layering.
Bug: webrtc:9261
Change-Id: I355baa6aab7b98ac5028b3851d1f8ccc82a308e0
Reviewed-on: https://webrtc-review.googlesource.com/76801
Reviewed-by: Stefan Holmer <stefan@webrtc.org>
Reviewed-by: Erik Språng <sprang@webrtc.org>
Commit-Queue: Sergey Silkin <ssilkin@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#23311}
diff --git a/media/engine/webrtcvideoengine.cc b/media/engine/webrtcvideoengine.cc
index 29766a7..2b20be0 100644
--- a/media/engine/webrtcvideoengine.cc
+++ b/media/engine/webrtcvideoengine.cc
@@ -455,37 +455,32 @@
if (CodecNamesEq(codec.name, kVp9CodecName)) {
webrtc::VideoCodecVP9 vp9_settings =
webrtc::VideoEncoder::GetDefaultVp9Settings();
- if (is_screencast) {
- // TODO(asapersson): Set to 2 for now since there is a DCHECK in
- // VideoSendStream::ReconfigureVideoEncoder.
- vp9_settings.numberOfSpatialLayers = 2;
- vp9_settings.numberOfTemporalLayers = 1;
- } else {
- const size_t default_num_spatial_layers =
- parameters_.config.rtp.ssrcs.size();
- const size_t num_spatial_layers =
- GetVp9SpatialLayersFromFieldTrial().value_or(
- default_num_spatial_layers);
+ const size_t default_num_spatial_layers =
+ parameters_.config.rtp.ssrcs.size();
+ const size_t num_spatial_layers =
+ GetVp9SpatialLayersFromFieldTrial().value_or(
+ default_num_spatial_layers);
- const size_t default_num_temporal_layers =
- num_spatial_layers > 1 ? kConferenceDefaultNumTemporalLayers : 1;
- const size_t num_temporal_layers =
- GetVp9TemporalLayersFromFieldTrial().value_or(
- default_num_temporal_layers);
+ const size_t default_num_temporal_layers =
+ num_spatial_layers > 1 ? kConferenceDefaultNumTemporalLayers : 1;
+ const size_t num_temporal_layers =
+ GetVp9TemporalLayersFromFieldTrial().value_or(
+ default_num_temporal_layers);
- vp9_settings.numberOfSpatialLayers = std::min<unsigned char>(
- num_spatial_layers, kConferenceMaxNumSpatialLayers);
- vp9_settings.numberOfTemporalLayers = std::min<unsigned char>(
- num_temporal_layers, kConferenceMaxNumTemporalLayers);
-
- // Limit inter-layer prediction to key pictures.
- vp9_settings.interLayerPred = webrtc::InterLayerPredMode::kOnKeyPic;
- }
+ vp9_settings.numberOfSpatialLayers = std::min<unsigned char>(
+ num_spatial_layers, kConferenceMaxNumSpatialLayers);
+ vp9_settings.numberOfTemporalLayers = std::min<unsigned char>(
+ num_temporal_layers, kConferenceMaxNumTemporalLayers);
// VP9 denoising is disabled by default.
vp9_settings.denoisingOn = codec_default_denoising ? true : denoising;
- vp9_settings.frameDroppingOn = frame_dropping;
vp9_settings.automaticResizeOn = automatic_resize;
+ // Ensure frame dropping is always enabled.
+ RTC_DCHECK(vp9_settings.frameDroppingOn);
+ if (!is_screencast) {
+ // Limit inter-layer prediction to key pictures.
+ vp9_settings.interLayerPred = webrtc::InterLayerPredMode::kOnKeyPic;
+ }
return new rtc::RefCountedObject<
webrtc::VideoEncoderConfig::Vp9EncoderSpecificSettings>(vp9_settings);
}
diff --git a/media/engine/webrtcvideoengine_unittest.cc b/media/engine/webrtcvideoengine_unittest.cc
index 14723cd..b368abe 100644
--- a/media/engine/webrtcvideoengine_unittest.cc
+++ b/media/engine/webrtcvideoengine_unittest.cc
@@ -2962,14 +2962,14 @@
ASSERT_TRUE(stream->GetVp9Settings(&vp9_settings)) << "No VP9 config set.";
EXPECT_FALSE(vp9_settings.denoisingOn);
- // Frame dropping always off for screen sharing.
- EXPECT_FALSE(vp9_settings.frameDroppingOn);
+ // Frame dropping always on for screen sharing.
+ EXPECT_TRUE(vp9_settings.frameDroppingOn);
stream = SetDenoisingOption(last_ssrc_, &capturer, false);
ASSERT_TRUE(stream->GetVp9Settings(&vp9_settings)) << "No VP9 config set.";
EXPECT_FALSE(vp9_settings.denoisingOn);
- EXPECT_FALSE(vp9_settings.frameDroppingOn);
+ EXPECT_TRUE(vp9_settings.frameDroppingOn);
EXPECT_TRUE(channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
}
diff --git a/modules/video_coding/BUILD.gn b/modules/video_coding/BUILD.gn
index d5a76a0..269b2ca 100644
--- a/modules/video_coding/BUILD.gn
+++ b/modules/video_coding/BUILD.gn
@@ -477,8 +477,6 @@
if (rtc_libvpx_build_vp9) {
sources = [
"codecs/vp9/include/vp9.h",
- "codecs/vp9/screenshare_layers.cc",
- "codecs/vp9/screenshare_layers.h",
"codecs/vp9/vp9_frame_buffer_pool.cc",
"codecs/vp9/vp9_frame_buffer_pool.h",
"codecs/vp9/vp9_impl.cc",
@@ -816,9 +814,6 @@
"video_receiver_unittest.cc",
"video_sender_unittest.cc",
]
- if (rtc_libvpx_build_vp9) {
- sources += [ "codecs/vp9/vp9_screenshare_layers_unittest.cc" ]
- }
if (rtc_use_h264) {
sources += [ "codecs/h264/h264_encoder_impl_unittest.cc" ]
}
diff --git a/modules/video_coding/codecs/vp9/screenshare_layers.cc b/modules/video_coding/codecs/vp9/screenshare_layers.cc
deleted file mode 100644
index 158b0e5..0000000
--- a/modules/video_coding/codecs/vp9/screenshare_layers.cc
+++ /dev/null
@@ -1,93 +0,0 @@
-/* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
-*
-* Use of this source code is governed by a BSD-style license
-* that can be found in the LICENSE file in the root of the source
-* tree. An additional intellectual property rights grant can be found
-* in the file PATENTS. All contributing project authors may
-* be found in the AUTHORS file in the root of the source tree.
-*/
-
-#include "modules/video_coding/codecs/vp9/screenshare_layers.h"
-#include <algorithm>
-#include "rtc_base/checks.h"
-
-namespace webrtc {
-
-ScreenshareLayersVP9::ScreenshareLayersVP9(uint8_t num_layers)
- : num_layers_(num_layers),
- start_layer_(0),
- last_timestamp_(0),
- timestamp_initialized_(false) {
- RTC_DCHECK_GT(num_layers, 0);
- RTC_DCHECK_LE(num_layers, kMaxVp9NumberOfSpatialLayers);
- memset(bits_used_, 0, sizeof(bits_used_));
- memset(threshold_kbps_, 0, sizeof(threshold_kbps_));
-}
-
-uint8_t ScreenshareLayersVP9::GetStartLayer() const {
- return start_layer_;
-}
-
-void ScreenshareLayersVP9::ConfigureBitrate(int threshold_kbps,
- uint8_t layer_id) {
- // The upper layer is always the layer we spill frames
- // to when the bitrate becomes to high, therefore setting
- // a max limit is not allowed. The top layer bitrate is
- // never used either so configuring it makes no difference.
- RTC_DCHECK_LT(layer_id, num_layers_ - 1);
- threshold_kbps_[layer_id] = threshold_kbps;
-}
-
-void ScreenshareLayersVP9::LayerFrameEncoded(unsigned int size_bytes,
- uint8_t layer_id) {
- RTC_DCHECK_LT(layer_id, num_layers_);
- bits_used_[layer_id] += size_bytes * 8;
-}
-
-VP9EncoderImpl::SuperFrameRefSettings
-ScreenshareLayersVP9::GetSuperFrameSettings(uint32_t timestamp,
- bool is_keyframe) {
- VP9EncoderImpl::SuperFrameRefSettings settings;
- if (!timestamp_initialized_) {
- last_timestamp_ = timestamp;
- timestamp_initialized_ = true;
- }
- float time_diff = (timestamp - last_timestamp_) / 90.f;
- float total_bits_used = 0;
- float total_threshold_kbps = 0;
- start_layer_ = 0;
-
- // Up to (num_layers - 1) because we only have
- // (num_layers - 1) thresholds to check.
- for (int layer_id = 0; layer_id < num_layers_ - 1; ++layer_id) {
- bits_used_[layer_id] = std::max(
- 0.f, bits_used_[layer_id] - time_diff * threshold_kbps_[layer_id]);
- total_bits_used += bits_used_[layer_id];
- total_threshold_kbps += threshold_kbps_[layer_id];
-
- // If this is a keyframe then there should be no
- // references to any previous frames.
- if (!is_keyframe) {
- settings.layer[layer_id].ref_buf1 = layer_id;
- if (total_bits_used > total_threshold_kbps * 1000)
- start_layer_ = layer_id + 1;
- }
-
- settings.layer[layer_id].upd_buf = layer_id;
- }
- // Since the above loop does not iterate over the last layer
- // the reference of the last layer has to be set after the loop,
- // and if this is a keyframe there should be no references to
- // any previous frames.
- if (!is_keyframe)
- settings.layer[num_layers_ - 1].ref_buf1 = num_layers_ - 1;
-
- settings.layer[num_layers_ - 1].upd_buf = num_layers_ - 1;
- settings.is_keyframe = is_keyframe;
- settings.start_layer = start_layer_;
- settings.stop_layer = num_layers_ - 1;
- last_timestamp_ = timestamp;
- return settings;
-}
-
-} // namespace webrtc
diff --git a/modules/video_coding/codecs/vp9/screenshare_layers.h b/modules/video_coding/codecs/vp9/screenshare_layers.h
deleted file mode 100644
index 6725ab7..0000000
--- a/modules/video_coding/codecs/vp9/screenshare_layers.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
-*
-* Use of this source code is governed by a BSD-style license
-* that can be found in the LICENSE file in the root of the source
-* tree. An additional intellectual property rights grant can be found
-* in the file PATENTS. All contributing project authors may
-* be found in the AUTHORS file in the root of the source tree.
-*/
-
-#ifndef MODULES_VIDEO_CODING_CODECS_VP9_SCREENSHARE_LAYERS_H_
-#define MODULES_VIDEO_CODING_CODECS_VP9_SCREENSHARE_LAYERS_H_
-
-#include "modules/video_coding/codecs/vp9/vp9_impl.h"
-
-namespace webrtc {
-
-class ScreenshareLayersVP9 {
- public:
- explicit ScreenshareLayersVP9(uint8_t num_layers);
-
- // The target bitrate for layer with id layer_id.
- void ConfigureBitrate(int threshold_kbps, uint8_t layer_id);
-
- // The current start layer.
- uint8_t GetStartLayer() const;
-
- // Update the layer with the size of the layer frame.
- void LayerFrameEncoded(unsigned int size_bytes, uint8_t layer_id);
-
- // Get the layer settings for the next superframe.
- //
- // In short, each time the GetSuperFrameSettings is called the
- // bitrate of every layer is calculated and if the cummulative
- // bitrate exceeds the configured cummulative bitrates
- // (ConfigureBitrate to configure) up to and including that
- // layer then the resulting encoding settings for the
- // superframe will only encode layers above that layer.
- VP9EncoderImpl::SuperFrameRefSettings GetSuperFrameSettings(
- uint32_t timestamp,
- bool is_keyframe);
-
- private:
- // How many layers that are used.
- uint8_t num_layers_;
-
- // The index of the first layer to encode.
- uint8_t start_layer_;
-
- // Cummulative target kbps for the different layers.
- float threshold_kbps_[kMaxVp9NumberOfSpatialLayers - 1];
-
- // How many bits that has been used for a certain layer. Increased in
- // FrameEncoded() by the size of the encoded frame and decreased in
- // GetSuperFrameSettings() depending on the time between frames.
- float bits_used_[kMaxVp9NumberOfSpatialLayers];
-
- // Timestamp of last frame.
- uint32_t last_timestamp_;
-
- // If the last_timestamp_ has been set.
- bool timestamp_initialized_;
-};
-
-} // namespace webrtc
-
-#endif // MODULES_VIDEO_CODING_CODECS_VP9_SCREENSHARE_LAYERS_H_
diff --git a/modules/video_coding/codecs/vp9/vp9_impl.cc b/modules/video_coding/codecs/vp9/vp9_impl.cc
index 408ec15..6ff7e32 100644
--- a/modules/video_coding/codecs/vp9/vp9_impl.cc
+++ b/modules/video_coding/codecs/vp9/vp9_impl.cc
@@ -23,7 +23,6 @@
#include "common_video/include/video_frame_buffer.h"
#include "common_video/libyuv/include/webrtc_libyuv.h"
-#include "modules/video_coding/codecs/vp9/screenshare_layers.h"
#include "modules/video_coding/codecs/vp9/svc_rate_allocator.h"
#include "rtc_base/checks.h"
#include "rtc_base/keep_ref_until_done.h"
@@ -79,10 +78,7 @@
num_temporal_layers_(0),
num_spatial_layers_(0),
inter_layer_pred_(InterLayerPredMode::kOn),
- is_flexible_mode_(false),
- frames_encoded_(0),
- // Use two spatial when screensharing with flexible mode.
- spatial_layer_(new ScreenshareLayersVP9(2)) {
+ is_flexible_mode_(false) {
memset(&codec_, 0, sizeof(codec_));
memset(&svc_params_, 0, sizeof(vpx_svc_extra_cfg_t));
}
@@ -130,7 +126,6 @@
uint8_t i = 0;
config_->rc_target_bitrate = bitrate_allocation.get_sum_kbps();
- spatial_layer_->ConfigureBitrate(bitrate_allocation.get_sum_kbps(), 0);
if (ExplicitlyConfiguredSpatialLayers()) {
for (size_t sl_idx = 0; sl_idx < num_spatial_layers_; ++sl_idx) {
@@ -342,12 +337,11 @@
// TODO(asapersson): Check configuration of temporal switch up and increase
// pattern length.
is_flexible_mode_ = inst->VP9().flexibleMode;
- if (is_flexible_mode_) {
- config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_BYPASS;
- config_->ts_number_layers = num_temporal_layers_;
- if (codec_.mode == kScreensharing)
- spatial_layer_->ConfigureBitrate(inst->startBitrate, 0);
- } else if (num_temporal_layers_ == 1) {
+
+ // TODO(ssilkin): Only non-flexible mode is supported for now.
+ RTC_DCHECK(!is_flexible_mode_);
+
+ if (num_temporal_layers_ == 1) {
gof_.SetGofInfoVP9(kTemporalStructureMode1);
config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_NOLAYERING;
config_->ts_number_layers = 1;
@@ -490,6 +484,22 @@
default:
RTC_NOTREACHED();
}
+
+ if (!is_flexible_mode_) {
+ // In RTP non-flexible mode, frame dropping of individual layers in a
+ // superframe leads to incorrect reference picture ID values in the
+ // RTP header. Dropping the entire superframe if the base is dropped
+ // or not dropping upper layers if base is not dropped mitigates
+ // the problem.
+ vpx_svc_frame_drop_t svc_drop_frame;
+ svc_drop_frame.framedrop_mode = CONSTRAINED_LAYER_DROP;
+ for (size_t i = 0; i < num_spatial_layers_; ++i) {
+ svc_drop_frame.framedrop_thresh[i] =
+ (i == 0) ? config_->rc_dropframe_thresh : 0;
+ }
+ vpx_codec_control(encoder_, VP9E_SET_SVC_FRAME_DROP_LAYER,
+ &svc_drop_frame);
+ }
}
// Register callback for getting each spatial layer.
@@ -582,41 +592,6 @@
flags = VPX_EFLAG_FORCE_KF;
}
- if (is_flexible_mode_) {
- SuperFrameRefSettings settings;
-
- // These structs are copied when calling vpx_codec_control,
- // therefore it is ok for them to go out of scope.
- vpx_svc_ref_frame_config enc_layer_conf;
- vpx_svc_layer_id layer_id;
-
- if (codec_.mode == kRealtimeVideo) {
- // Real time video not yet implemented in flexible mode.
- RTC_NOTREACHED();
- } else {
- settings = spatial_layer_->GetSuperFrameSettings(input_image.timestamp(),
- force_key_frame_);
- }
- enc_layer_conf = GenerateRefsAndFlags(settings);
- layer_id.temporal_layer_id = 0;
- layer_id.spatial_layer_id = settings.start_layer;
- vpx_codec_control(encoder_, VP9E_SET_SVC_LAYER_ID, &layer_id);
- vpx_codec_control(encoder_, VP9E_SET_SVC_REF_FRAME_CONFIG, &enc_layer_conf);
- } else if (codec_.mode == kRealtimeVideo && num_spatial_layers_ > 1) {
- // In RTP non-flexible mode, frame dropping of individual layers in a
- // superframe leads to incorrect reference picture ID values in the
- // RTP header. Dropping the entire superframe if the base is dropped
- // or not dropping upper layers if base is not dropped mitigates
- // the problem.
- vpx_svc_frame_drop_t svc_drop_frame;
- svc_drop_frame.framedrop_mode = CONSTRAINED_LAYER_DROP;
- for (size_t i = 0; i < num_spatial_layers_; ++i) {
- svc_drop_frame.framedrop_thresh[i] =
- (i == 0) ? config_->rc_dropframe_thresh : 0;
- }
- vpx_codec_control(encoder_, VP9E_SET_SVC_FRAME_DROP_LAYER, &svc_drop_frame);
- }
-
RTC_CHECK_GT(codec_.maxFramerate, 0);
uint32_t duration = 90000 / codec_.maxFramerate;
if (vpx_codec_encode(encoder_, raw_, timestamp_, duration, flags,
@@ -703,18 +678,12 @@
// bit.
vp9_info->num_spatial_layers = num_spatial_layers_;
- vp9_info->num_ref_pics = 0;
- if (vp9_info->flexible_mode) {
- vp9_info->gof_idx = kNoGofIdx;
- vp9_info->num_ref_pics = num_ref_pics_[layer_id.spatial_layer_id];
- for (int i = 0; i < num_ref_pics_[layer_id.spatial_layer_id]; ++i) {
- vp9_info->p_diff[i] = p_diff_[layer_id.spatial_layer_id][i];
- }
- } else {
- vp9_info->gof_idx =
- static_cast<uint8_t>(pics_since_key_ % gof_.num_frames_in_gof);
- vp9_info->temporal_up_switch = gof_.temporal_up_switch[vp9_info->gof_idx];
- }
+ RTC_DCHECK(!vp9_info->flexible_mode);
+ vp9_info->gof_idx =
+ static_cast<uint8_t>(pics_since_key_ % gof_.num_frames_in_gof);
+ vp9_info->temporal_up_switch = gof_.temporal_up_switch[vp9_info->gof_idx];
+ vp9_info->num_ref_pics =
+ is_key_pic ? 0 : gof_.num_ref_pics[vp9_info->gof_idx];
if (vp9_info->ss_data_available) {
vp9_info->spatial_layer_resolution_present = true;
@@ -758,12 +727,6 @@
memcpy(encoded_image_._buffer, pkt->data.frame.buf, pkt->data.frame.sz);
encoded_image_._length = pkt->data.frame.sz;
- if (is_flexible_mode_ && codec_.mode == kScreensharing) {
- spatial_layer_->LayerFrameEncoded(
- static_cast<unsigned int>(encoded_image_._length),
- layer_id.spatial_layer_id);
- }
-
const bool is_key_frame =
(pkt->data.frame.flags & VPX_FRAME_IS_KEY) ? true : false;
// Ensure encoder issued key frame on request.
@@ -819,108 +782,6 @@
}
}
-vpx_svc_ref_frame_config VP9EncoderImpl::GenerateRefsAndFlags(
- const SuperFrameRefSettings& settings) {
- static const vpx_enc_frame_flags_t kAllFlags =
- VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_LAST |
- VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_GF;
- vpx_svc_ref_frame_config sf_conf = {};
- if (settings.is_keyframe) {
- // Used later on to make sure we don't make any invalid references.
- memset(buffer_updated_at_frame_, -1, sizeof(buffer_updated_at_frame_));
- for (int layer = settings.start_layer; layer <= settings.stop_layer;
- ++layer) {
- num_ref_pics_[layer] = 0;
- buffer_updated_at_frame_[settings.layer[layer].upd_buf] = frames_encoded_;
- // When encoding a keyframe only the alt_fb_idx is used
- // to specify which layer ends up in which buffer.
- sf_conf.alt_fb_idx[layer] = settings.layer[layer].upd_buf;
- }
- } else {
- for (int layer_idx = settings.start_layer; layer_idx <= settings.stop_layer;
- ++layer_idx) {
- vpx_enc_frame_flags_t layer_flags = kAllFlags;
- num_ref_pics_[layer_idx] = 0;
- int8_t refs[3] = {settings.layer[layer_idx].ref_buf1,
- settings.layer[layer_idx].ref_buf2,
- settings.layer[layer_idx].ref_buf3};
-
- for (unsigned int ref_idx = 0; ref_idx < kMaxVp9RefPics; ++ref_idx) {
- if (refs[ref_idx] == -1)
- continue;
-
- RTC_DCHECK_GE(refs[ref_idx], 0);
- RTC_DCHECK_LE(refs[ref_idx], 7);
- // Easier to remove flags from all flags rather than having to
- // build the flags from 0.
- switch (num_ref_pics_[layer_idx]) {
- case 0: {
- sf_conf.lst_fb_idx[layer_idx] = refs[ref_idx];
- layer_flags &= ~VP8_EFLAG_NO_REF_LAST;
- break;
- }
- case 1: {
- sf_conf.gld_fb_idx[layer_idx] = refs[ref_idx];
- layer_flags &= ~VP8_EFLAG_NO_REF_GF;
- break;
- }
- case 2: {
- sf_conf.alt_fb_idx[layer_idx] = refs[ref_idx];
- layer_flags &= ~VP8_EFLAG_NO_REF_ARF;
- break;
- }
- }
- // Make sure we don't reference a buffer that hasn't been
- // used at all or hasn't been used since a keyframe.
- RTC_DCHECK_NE(buffer_updated_at_frame_[refs[ref_idx]], -1);
-
- p_diff_[layer_idx][num_ref_pics_[layer_idx]] =
- frames_encoded_ - buffer_updated_at_frame_[refs[ref_idx]];
- num_ref_pics_[layer_idx]++;
- }
-
- bool upd_buf_same_as_a_ref = false;
- if (settings.layer[layer_idx].upd_buf != -1) {
- for (unsigned int ref_idx = 0; ref_idx < kMaxVp9RefPics; ++ref_idx) {
- if (settings.layer[layer_idx].upd_buf == refs[ref_idx]) {
- switch (ref_idx) {
- case 0: {
- layer_flags &= ~VP8_EFLAG_NO_UPD_LAST;
- break;
- }
- case 1: {
- layer_flags &= ~VP8_EFLAG_NO_UPD_GF;
- break;
- }
- case 2: {
- layer_flags &= ~VP8_EFLAG_NO_UPD_ARF;
- break;
- }
- }
- upd_buf_same_as_a_ref = true;
- break;
- }
- }
- if (!upd_buf_same_as_a_ref) {
- // If we have three references and a buffer is specified to be
- // updated, then that buffer must be the same as one of the
- // three references.
- RTC_CHECK_LT(num_ref_pics_[layer_idx], kMaxVp9RefPics);
-
- sf_conf.alt_fb_idx[layer_idx] = settings.layer[layer_idx].upd_buf;
- layer_flags ^= VP8_EFLAG_NO_UPD_ARF;
- }
-
- int updated_buffer = settings.layer[layer_idx].upd_buf;
- buffer_updated_at_frame_[updated_buffer] = frames_encoded_;
- sf_conf.frame_flags[layer_idx] = layer_flags;
- }
- }
- }
- ++frames_encoded_;
- return sf_conf;
-}
-
int VP9EncoderImpl::SetChannelParameters(uint32_t packet_loss, int64_t rtt) {
return WEBRTC_VIDEO_CODEC_OK;
}
diff --git a/modules/video_coding/codecs/vp9/vp9_impl.h b/modules/video_coding/codecs/vp9/vp9_impl.h
index 6da3dce..509f04e 100644
--- a/modules/video_coding/codecs/vp9/vp9_impl.h
+++ b/modules/video_coding/codecs/vp9/vp9_impl.h
@@ -24,8 +24,6 @@
namespace webrtc {
-class ScreenshareLayersVP9;
-
class VP9EncoderImpl : public VP9Encoder {
public:
VP9EncoderImpl();
@@ -51,20 +49,6 @@
const char* ImplementationName() const override;
- struct LayerFrameRefSettings {
- int8_t upd_buf = -1; // -1 - no update, 0..7 - update buffer 0..7
- int8_t ref_buf1 = -1; // -1 - no reference, 0..7 - reference buffer 0..7
- int8_t ref_buf2 = -1; // -1 - no reference, 0..7 - reference buffer 0..7
- int8_t ref_buf3 = -1; // -1 - no reference, 0..7 - reference buffer 0..7
- };
-
- struct SuperFrameRefSettings {
- LayerFrameRefSettings layer[kMaxVp9NumberOfSpatialLayers];
- uint8_t start_layer = 0; // The first spatial layer to be encoded.
- uint8_t stop_layer = 0; // The last spatial layer to be encoded.
- bool is_keyframe = false;
- };
-
private:
// Determine number of encoder threads to use.
int NumberOfThreads(int width, int height, int number_of_cores);
@@ -80,15 +64,6 @@
bool ExplicitlyConfiguredSpatialLayers() const;
bool SetSvcRates(const VideoBitrateAllocation& bitrate_allocation);
- // Used for flexible mode to set the flags and buffer references used
- // by the encoder. Also calculates the references used by the RTP
- // packetizer.
- //
- // Has to be called for every frame (keyframes included) to update the
- // state used to calculate references.
- vpx_svc_ref_frame_config GenerateRefsAndFlags(
- const SuperFrameRefSettings& settings);
-
virtual int GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt);
// Callback function for outputting packets per spatial layer.
@@ -97,6 +72,8 @@
void DeliverBufferedFrame(bool end_of_picture);
+ bool DropFrame(uint32_t rtp_timestamp);
+
// Determine maximum target for Intra frames
//
// Input:
@@ -128,11 +105,6 @@
// Used for flexible mode.
bool is_flexible_mode_;
- int64_t buffer_updated_at_frame_[kNumVp9Buffers];
- int64_t frames_encoded_;
- uint8_t num_ref_pics_[kMaxVp9NumberOfSpatialLayers];
- uint8_t p_diff_[kMaxVp9NumberOfSpatialLayers][kMaxVp9RefPics];
- std::unique_ptr<ScreenshareLayersVP9> spatial_layer_;
};
class VP9DecoderImpl : public VP9Decoder {
diff --git a/modules/video_coding/codecs/vp9/vp9_screenshare_layers_unittest.cc b/modules/video_coding/codecs/vp9/vp9_screenshare_layers_unittest.cc
deleted file mode 100644
index 5ef7470..0000000
--- a/modules/video_coding/codecs/vp9/vp9_screenshare_layers_unittest.cc
+++ /dev/null
@@ -1,324 +0,0 @@
-/*
- * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <limits>
-#include <memory>
-
-#include "vpx/vp8cx.h"
-#include "modules/video_coding/codecs/vp9/screenshare_layers.h"
-#include "modules/video_coding/codecs/vp9/vp9_impl.h"
-#include "rtc_base/logging.h"
-#include "system_wrappers/include/clock.h"
-#include "test/gtest.h"
-
-namespace webrtc {
-
-typedef VP9EncoderImpl::SuperFrameRefSettings Settings;
-
-const uint32_t kTickFrequency = 90000;
-
-class ScreenshareLayerTestVP9 : public ::testing::Test {
- protected:
- ScreenshareLayerTestVP9() : clock_(0) {}
- virtual ~ScreenshareLayerTestVP9() {}
-
- void InitScreenshareLayers(int layers) {
- layers_.reset(new ScreenshareLayersVP9(layers));
- }
-
- void ConfigureBitrateForLayer(int kbps, uint8_t layer_id) {
- layers_->ConfigureBitrate(kbps, layer_id);
- }
-
- void AdvanceTime(int64_t milliseconds) {
- clock_.AdvanceTimeMilliseconds(milliseconds);
- }
-
- void AddKilobitsToLayer(int kilobits, uint8_t layer_id) {
- layers_->LayerFrameEncoded(kilobits * 1000 / 8, layer_id);
- }
-
- void EqualRefsForLayer(const Settings& actual, uint8_t layer_id) {
- EXPECT_EQ(expected_.layer[layer_id].upd_buf,
- actual.layer[layer_id].upd_buf);
- EXPECT_EQ(expected_.layer[layer_id].ref_buf1,
- actual.layer[layer_id].ref_buf1);
- EXPECT_EQ(expected_.layer[layer_id].ref_buf2,
- actual.layer[layer_id].ref_buf2);
- EXPECT_EQ(expected_.layer[layer_id].ref_buf3,
- actual.layer[layer_id].ref_buf3);
- }
-
- void EqualRefs(const Settings& actual) {
- for (unsigned int layer_id = 0; layer_id < kMaxVp9NumberOfSpatialLayers;
- ++layer_id) {
- EqualRefsForLayer(actual, layer_id);
- }
- }
-
- void EqualStartStopKeyframe(const Settings& actual) {
- EXPECT_EQ(expected_.start_layer, actual.start_layer);
- EXPECT_EQ(expected_.stop_layer, actual.stop_layer);
- EXPECT_EQ(expected_.is_keyframe, actual.is_keyframe);
- }
-
- // Check that the settings returned by GetSuperFrameSettings() is
- // equal to the expected_ settings.
- void EqualToExpected() {
- uint32_t frame_timestamp_ =
- clock_.TimeInMilliseconds() * (kTickFrequency / 1000);
- Settings actual =
- layers_->GetSuperFrameSettings(frame_timestamp_, expected_.is_keyframe);
- EqualRefs(actual);
- EqualStartStopKeyframe(actual);
- }
-
- Settings expected_;
- SimulatedClock clock_;
- std::unique_ptr<ScreenshareLayersVP9> layers_;
-};
-
-TEST_F(ScreenshareLayerTestVP9, NoRefsOnKeyFrame) {
- const int kNumLayers = kMaxVp9NumberOfSpatialLayers;
- InitScreenshareLayers(kNumLayers);
- expected_.start_layer = 0;
- expected_.stop_layer = kNumLayers - 1;
-
- for (int l = 0; l < kNumLayers; ++l) {
- expected_.layer[l].upd_buf = l;
- }
- expected_.is_keyframe = true;
- EqualToExpected();
-
- for (int l = 0; l < kNumLayers; ++l) {
- expected_.layer[l].ref_buf1 = l;
- }
- expected_.is_keyframe = false;
- EqualToExpected();
-}
-
-// Test if it is possible to send at a high bitrate (over the threshold)
-// after a longer period of low bitrate. This should not be possible.
-TEST_F(ScreenshareLayerTestVP9, DontAccumelateAvailableBitsOverTime) {
- InitScreenshareLayers(2);
- ConfigureBitrateForLayer(100, 0);
-
- expected_.layer[0].upd_buf = 0;
- expected_.layer[0].ref_buf1 = 0;
- expected_.layer[1].upd_buf = 1;
- expected_.layer[1].ref_buf1 = 1;
- expected_.start_layer = 0;
- expected_.stop_layer = 1;
-
- // Send 10 frames at a low bitrate (50 kbps)
- for (int i = 0; i < 10; ++i) {
- AdvanceTime(200);
- EqualToExpected();
- AddKilobitsToLayer(10, 0);
- }
-
- AdvanceTime(200);
- EqualToExpected();
- AddKilobitsToLayer(301, 0);
-
- // Send 10 frames at a high bitrate (200 kbps)
- expected_.start_layer = 1;
- for (int i = 0; i < 10; ++i) {
- AdvanceTime(200);
- EqualToExpected();
- AddKilobitsToLayer(40, 1);
- }
-}
-
-// Test if used bits are accumelated over layers, as they should;
-TEST_F(ScreenshareLayerTestVP9, AccumelateUsedBitsOverLayers) {
- const int kNumLayers = kMaxVp9NumberOfSpatialLayers;
- InitScreenshareLayers(kNumLayers);
- for (int l = 0; l < kNumLayers - 1; ++l)
- ConfigureBitrateForLayer(100, l);
- for (int l = 0; l < kNumLayers; ++l) {
- expected_.layer[l].upd_buf = l;
- expected_.layer[l].ref_buf1 = l;
- }
-
- expected_.start_layer = 0;
- expected_.stop_layer = kNumLayers - 1;
- EqualToExpected();
-
- for (int layer = 0; layer < kNumLayers - 1; ++layer) {
- expected_.start_layer = layer;
- EqualToExpected();
- AddKilobitsToLayer(101, layer);
- }
-}
-
-// General testing of the bitrate controller.
-TEST_F(ScreenshareLayerTestVP9, 2LayerBitrate) {
- InitScreenshareLayers(2);
- ConfigureBitrateForLayer(100, 0);
-
- expected_.layer[0].upd_buf = 0;
- expected_.layer[1].upd_buf = 1;
- expected_.layer[0].ref_buf1 = -1;
- expected_.layer[1].ref_buf1 = -1;
- expected_.start_layer = 0;
- expected_.stop_layer = 1;
-
- expected_.is_keyframe = true;
- EqualToExpected();
- AddKilobitsToLayer(100, 0);
-
- expected_.layer[0].ref_buf1 = 0;
- expected_.layer[1].ref_buf1 = 1;
- expected_.is_keyframe = false;
- AdvanceTime(199);
- EqualToExpected();
- AddKilobitsToLayer(100, 0);
-
- expected_.start_layer = 1;
- for (int frame = 0; frame < 3; ++frame) {
- AdvanceTime(200);
- EqualToExpected();
- AddKilobitsToLayer(100, 1);
- }
-
- // Just before enough bits become available for L0 @0.999 seconds.
- AdvanceTime(199);
- EqualToExpected();
- AddKilobitsToLayer(100, 1);
-
- // Just after enough bits become available for L0 @1.0001 seconds.
- expected_.start_layer = 0;
- AdvanceTime(2);
- EqualToExpected();
- AddKilobitsToLayer(100, 0);
-
- // Keyframes always encode all layers, even if it is over budget.
- expected_.layer[0].ref_buf1 = -1;
- expected_.layer[1].ref_buf1 = -1;
- expected_.is_keyframe = true;
- AdvanceTime(499);
- EqualToExpected();
- expected_.layer[0].ref_buf1 = 0;
- expected_.layer[1].ref_buf1 = 1;
- expected_.start_layer = 1;
- expected_.is_keyframe = false;
- EqualToExpected();
- AddKilobitsToLayer(100, 0);
-
- // 400 kb in L0 --> @3 second mark to fall below the threshold..
- // just before @2.999 seconds.
- expected_.is_keyframe = false;
- AdvanceTime(1499);
- EqualToExpected();
- AddKilobitsToLayer(100, 1);
-
- // just after @3.001 seconds.
- expected_.start_layer = 0;
- AdvanceTime(2);
- EqualToExpected();
- AddKilobitsToLayer(100, 0);
-}
-
-// General testing of the bitrate controller.
-TEST_F(ScreenshareLayerTestVP9, 3LayerBitrate) {
- InitScreenshareLayers(3);
- ConfigureBitrateForLayer(100, 0);
- ConfigureBitrateForLayer(100, 1);
-
- for (int l = 0; l < 3; ++l) {
- expected_.layer[l].upd_buf = l;
- expected_.layer[l].ref_buf1 = l;
- }
- expected_.start_layer = 0;
- expected_.stop_layer = 2;
-
- EqualToExpected();
- AddKilobitsToLayer(105, 0);
- AddKilobitsToLayer(30, 1);
-
- AdvanceTime(199);
- EqualToExpected();
- AddKilobitsToLayer(105, 0);
- AddKilobitsToLayer(30, 1);
-
- expected_.start_layer = 1;
- AdvanceTime(200);
- EqualToExpected();
- AddKilobitsToLayer(130, 1);
-
- expected_.start_layer = 2;
- AdvanceTime(200);
- EqualToExpected();
-
- // 400 kb in L1 --> @1.0 second mark to fall below threshold.
- // 210 kb in L0 --> @1.1 second mark to fall below threshold.
- // Just before L1 @0.999 seconds.
- AdvanceTime(399);
- EqualToExpected();
-
- // Just after L1 @1.001 seconds.
- expected_.start_layer = 1;
- AdvanceTime(2);
- EqualToExpected();
-
- // Just before L0 @1.099 seconds.
- AdvanceTime(99);
- EqualToExpected();
-
- // Just after L0 @1.101 seconds.
- expected_.start_layer = 0;
- AdvanceTime(2);
- EqualToExpected();
-
- // @1.1 seconds
- AdvanceTime(99);
- EqualToExpected();
- AddKilobitsToLayer(200, 1);
-
- expected_.is_keyframe = true;
- for (int l = 0; l < 3; ++l)
- expected_.layer[l].ref_buf1 = -1;
- AdvanceTime(200);
- EqualToExpected();
-
- expected_.is_keyframe = false;
- expected_.start_layer = 2;
- for (int l = 0; l < 3; ++l)
- expected_.layer[l].ref_buf1 = l;
- AdvanceTime(200);
- EqualToExpected();
-}
-
-// Test that the bitrate calculations are
-// correct when the timestamp wrap.
-TEST_F(ScreenshareLayerTestVP9, TimestampWrap) {
- InitScreenshareLayers(2);
- ConfigureBitrateForLayer(100, 0);
-
- expected_.layer[0].upd_buf = 0;
- expected_.layer[0].ref_buf1 = 0;
- expected_.layer[1].upd_buf = 1;
- expected_.layer[1].ref_buf1 = 1;
- expected_.start_layer = 0;
- expected_.stop_layer = 1;
-
- // Advance time to just before the timestamp wraps.
- AdvanceTime(std::numeric_limits<uint32_t>::max() / (kTickFrequency / 1000));
- EqualToExpected();
- AddKilobitsToLayer(200, 0);
-
- // Wrap
- expected_.start_layer = 1;
- AdvanceTime(1);
- EqualToExpected();
-}
-
-} // namespace webrtc
diff --git a/modules/video_coding/video_codec_initializer.cc b/modules/video_coding/video_codec_initializer.cc
index adc7f32..f122070 100644
--- a/modules/video_coding/video_codec_initializer.cc
+++ b/modules/video_coding/video_codec_initializer.cc
@@ -195,54 +195,46 @@
RTC_DCHECK_LE(video_codec.VP9()->numberOfTemporalLayers,
kMaxTemporalStreams);
- if (video_codec.mode == kScreensharing &&
- config.encoder_specific_settings) {
- video_codec.VP9()->flexibleMode = true;
- // For now VP9 screensharing use 1 temporal and 2 spatial layers.
- RTC_DCHECK_EQ(1, video_codec.VP9()->numberOfTemporalLayers);
- RTC_DCHECK_EQ(2, video_codec.VP9()->numberOfSpatialLayers);
+ RTC_DCHECK(config.spatial_layers.empty() ||
+ config.spatial_layers.size() ==
+ video_codec.VP9()->numberOfSpatialLayers);
+
+ std::vector<SpatialLayer> spatial_layers;
+ if (!config.spatial_layers.empty()) {
+ // Layering is set explicitly.
+ spatial_layers = config.spatial_layers;
} else {
- RTC_DCHECK(config.spatial_layers.empty() ||
- config.spatial_layers.size() ==
- video_codec.VP9()->numberOfSpatialLayers);
+ spatial_layers = GetSvcConfig(video_codec.width, video_codec.height,
+ video_codec.VP9()->numberOfSpatialLayers,
+ video_codec.VP9()->numberOfTemporalLayers,
+ video_codec.mode == kScreensharing);
- std::vector<SpatialLayer> spatial_layers;
- if (!config.spatial_layers.empty()) {
- // Layering is set explicitly.
- spatial_layers = config.spatial_layers;
- } else {
- spatial_layers =
- GetSvcConfig(video_codec.width, video_codec.height,
- video_codec.VP9()->numberOfSpatialLayers,
- video_codec.VP9()->numberOfTemporalLayers, false);
-
- const bool no_spatial_layering = (spatial_layers.size() == 1);
- if (no_spatial_layering) {
- // Use codec's bitrate limits.
- spatial_layers.back().minBitrate = video_codec.minBitrate;
- spatial_layers.back().maxBitrate = video_codec.maxBitrate;
- }
+ const bool no_spatial_layering = (spatial_layers.size() == 1);
+ if (no_spatial_layering) {
+ // Use codec's bitrate limits.
+ spatial_layers.back().minBitrate = video_codec.minBitrate;
+ spatial_layers.back().maxBitrate = video_codec.maxBitrate;
}
-
- RTC_DCHECK(!spatial_layers.empty());
- for (size_t i = 0; i < spatial_layers.size(); ++i) {
- video_codec.spatialLayers[i] = spatial_layers[i];
- }
-
- // Update layering settings.
- video_codec.VP9()->numberOfSpatialLayers =
- static_cast<unsigned char>(spatial_layers.size());
- RTC_DCHECK_GE(video_codec.VP9()->numberOfSpatialLayers, 1);
- RTC_DCHECK_LE(video_codec.VP9()->numberOfSpatialLayers,
- kMaxSpatialLayers);
-
- video_codec.VP9()->numberOfTemporalLayers = static_cast<unsigned char>(
- spatial_layers.back().numberOfTemporalLayers);
- RTC_DCHECK_GE(video_codec.VP9()->numberOfTemporalLayers, 1);
- RTC_DCHECK_LE(video_codec.VP9()->numberOfTemporalLayers,
- kMaxTemporalStreams);
}
+ RTC_DCHECK(!spatial_layers.empty());
+ for (size_t i = 0; i < spatial_layers.size(); ++i) {
+ video_codec.spatialLayers[i] = spatial_layers[i];
+ }
+
+ // Update layering settings.
+ video_codec.VP9()->numberOfSpatialLayers =
+ static_cast<unsigned char>(spatial_layers.size());
+ RTC_DCHECK_GE(video_codec.VP9()->numberOfSpatialLayers, 1);
+ RTC_DCHECK_LE(video_codec.VP9()->numberOfSpatialLayers,
+ kMaxSpatialLayers);
+
+ video_codec.VP9()->numberOfTemporalLayers = static_cast<unsigned char>(
+ spatial_layers.back().numberOfTemporalLayers);
+ RTC_DCHECK_GE(video_codec.VP9()->numberOfTemporalLayers, 1);
+ RTC_DCHECK_LE(video_codec.VP9()->numberOfTemporalLayers,
+ kMaxTemporalStreams);
+
break;
}
case kVideoCodecH264: {
diff --git a/video/video_send_stream_tests.cc b/video/video_send_stream_tests.cc
index 4a72e25..6775813 100644
--- a/video/video_send_stream_tests.cc
+++ b/video/video_send_stream_tests.cc
@@ -3571,7 +3571,10 @@
// Crashes on Android; bugs.webrtc.org/7401
#define MAYBE_Vp9FlexModeRefCount DISABLED_Vp9FlexModeRefCount
#else
-#define MAYBE_Vp9FlexModeRefCount Vp9FlexModeRefCount
+// TODO(webrtc:9270): Support of flexible mode is temporarily disabled. Enable
+// the test after webrtc:9270 is implemented.
+#define MAYBE_Vp9FlexModeRefCount DISABLED_Vp9FlexModeRefCount
+// #define MAYBE_Vp9FlexModeRefCount Vp9FlexModeRefCount
#endif
TEST_F(VideoSendStreamTest, MAYBE_Vp9FlexModeRefCount) {
class FlexibleMode : public Vp9HeaderObserver {