Add support so requested resolution alignment also apply to scaled layers.
Bug: webrtc:11872
Change-Id: I7f904e2765330ee93270b66b0102ce57f336f9a0
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/181883
Reviewed-by: Ilya Nikolaevskiy <ilnik@webrtc.org>
Reviewed-by: Rasmus Brandt <brandtr@webrtc.org>
Commit-Queue: Åsa Persson <asapersson@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#32146}
diff --git a/api/video_codecs/video_encoder.cc b/api/video_codecs/video_encoder.cc
index 4427d6c..da22746 100644
--- a/api/video_codecs/video_encoder.cc
+++ b/api/video_codecs/video_encoder.cc
@@ -94,6 +94,7 @@
VideoEncoder::EncoderInfo::EncoderInfo()
: scaling_settings(VideoEncoder::ScalingSettings::kOff),
requested_resolution_alignment(1),
+ apply_alignment_to_all_simulcast_layers(false),
supports_native_handle(false),
implementation_name("unknown"),
has_trusted_rate_controller(false),
@@ -123,6 +124,8 @@
oss << "min_pixels_per_frame = " << scaling_settings.min_pixels_per_frame
<< " }";
oss << ", requested_resolution_alignment = " << requested_resolution_alignment
+ << ", apply_alignment_to_all_simulcast_layers = "
+ << apply_alignment_to_all_simulcast_layers
<< ", supports_native_handle = " << supports_native_handle
<< ", implementation_name = '" << implementation_name
<< "'"
diff --git a/api/video_codecs/video_encoder.h b/api/video_codecs/video_encoder.h
index 1ea19c6..d73c9f0 100644
--- a/api/video_codecs/video_encoder.h
+++ b/api/video_codecs/video_encoder.h
@@ -174,6 +174,15 @@
// requirements the encoder has on the incoming video frame buffers.
int requested_resolution_alignment;
+ // Same as above but if true, each simulcast layer should also be divisible
+ // by |requested_resolution_alignment|.
+ // Note that scale factors |scale_resolution_down_by| may be adjusted so a
+ // common multiple is not too large to avoid largely cropped frames and
+ // possibly with an aspect ratio far from the original.
+ // Warning: large values of scale_resolution_down_by could be changed
+ // considerably, especially if |requested_resolution_alignment| is large.
+ bool apply_alignment_to_all_simulcast_layers;
+
// If true, encoder supports working with a native handle (e.g. texture
// handle for hw codecs) rather than requiring a raw I420 buffer.
bool supports_native_handle;
diff --git a/api/video_codecs/video_encoder_config.cc b/api/video_codecs/video_encoder_config.cc
index 19b0135..5956d60 100644
--- a/api/video_codecs/video_encoder_config.cc
+++ b/api/video_codecs/video_encoder_config.cc
@@ -43,6 +43,7 @@
ss << ", num_temporal_layers: " << num_temporal_layers.value_or(1);
ss << ", bitrate_priority: " << bitrate_priority.value_or(0);
ss << ", active: " << active;
+ ss << ", scale_down_by: " << scale_resolution_down_by;
return ss.str();
}
diff --git a/media/engine/simulcast_encoder_adapter.cc b/media/engine/simulcast_encoder_adapter.cc
index 7706bd9..6e937578 100644
--- a/media/engine/simulcast_encoder_adapter.cc
+++ b/media/engine/simulcast_encoder_adapter.cc
@@ -642,6 +642,7 @@
VideoEncoder::EncoderInfo encoder_info;
encoder_info.implementation_name = "SimulcastEncoderAdapter";
encoder_info.requested_resolution_alignment = 1;
+ encoder_info.apply_alignment_to_all_simulcast_layers = false;
encoder_info.supports_native_handle = true;
encoder_info.scaling_settings.thresholds = absl::nullopt;
if (streaminfos_.empty()) {
@@ -693,6 +694,9 @@
encoder_info.requested_resolution_alignment = cricket::LeastCommonMultiple(
encoder_info.requested_resolution_alignment,
encoder_impl_info.requested_resolution_alignment);
+ if (encoder_impl_info.apply_alignment_to_all_simulcast_layers) {
+ encoder_info.apply_alignment_to_all_simulcast_layers = true;
+ }
if (num_active_streams == 1 && codec_.simulcastStream[i].active) {
encoder_info.scaling_settings = encoder_impl_info.scaling_settings;
}
diff --git a/media/engine/simulcast_encoder_adapter_unittest.cc b/media/engine/simulcast_encoder_adapter_unittest.cc
index 79b669c..24686e8 100644
--- a/media/engine/simulcast_encoder_adapter_unittest.cc
+++ b/media/engine/simulcast_encoder_adapter_unittest.cc
@@ -232,6 +232,8 @@
info.implementation_name = implementation_name_;
info.scaling_settings = scaling_settings_;
info.requested_resolution_alignment = requested_resolution_alignment_;
+ info.apply_alignment_to_all_simulcast_layers =
+ apply_alignment_to_all_simulcast_layers_;
info.has_trusted_rate_controller = has_trusted_rate_controller_;
info.is_hardware_accelerated = is_hardware_accelerated_;
info.has_internal_source = has_internal_source_;
@@ -274,6 +276,10 @@
requested_resolution_alignment_ = requested_resolution_alignment;
}
+ void set_apply_alignment_to_all_simulcast_layers(bool apply) {
+ apply_alignment_to_all_simulcast_layers_ = apply;
+ }
+
void set_has_trusted_rate_controller(bool trusted) {
has_trusted_rate_controller_ = trusted;
}
@@ -310,6 +316,7 @@
std::string implementation_name_ = "unknown";
VideoEncoder::ScalingSettings scaling_settings_;
int requested_resolution_alignment_ = 1;
+ bool apply_alignment_to_all_simulcast_layers_ = false;
bool has_trusted_rate_controller_ = false;
bool is_hardware_accelerated_ = false;
bool has_internal_source_ = false;
@@ -1259,6 +1266,31 @@
EXPECT_EQ(adapter_->GetEncoderInfo().requested_resolution_alignment, 28);
}
+TEST_F(TestSimulcastEncoderAdapterFake,
+ ReportsApplyAlignmentToSimulcastLayers) {
+ SimulcastTestFixtureImpl::DefaultSettings(
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile),
+ kVideoCodecVP8);
+ codec_.numberOfSimulcastStreams = 3;
+
+ // No encoder has apply_alignment_to_all_simulcast_layers, report false.
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ ASSERT_EQ(3u, helper_->factory()->encoders().size());
+ for (MockVideoEncoder* encoder : helper_->factory()->encoders()) {
+ encoder->set_apply_alignment_to_all_simulcast_layers(false);
+ }
+ EXPECT_FALSE(
+ adapter_->GetEncoderInfo().apply_alignment_to_all_simulcast_layers);
+
+ // One encoder has apply_alignment_to_all_simulcast_layers, report true.
+ helper_->factory()
+ ->encoders()[1]
+ ->set_apply_alignment_to_all_simulcast_layers(true);
+ EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings));
+ EXPECT_TRUE(
+ adapter_->GetEncoderInfo().apply_alignment_to_all_simulcast_layers);
+}
+
TEST_F(TestSimulcastEncoderAdapterFake, ReportsInternalSource) {
SimulcastTestFixtureImpl::DefaultSettings(
&codec_, static_cast<const int*>(kTestTemporalLayerProfile),
diff --git a/media/engine/webrtc_video_engine.cc b/media/engine/webrtc_video_engine.cc
index fa46e92..0c1c0cc 100644
--- a/media/engine/webrtc_video_engine.cc
+++ b/media/engine/webrtc_video_engine.cc
@@ -35,6 +35,7 @@
#include "rtc_base/experiments/field_trial_parser.h"
#include "rtc_base/experiments/field_trial_units.h"
#include "rtc_base/experiments/min_video_bitrate_experiment.h"
+#include "rtc_base/experiments/normalize_simulcast_size_experiment.h"
#include "rtc_base/logging.h"
#include "rtc_base/numerics/safe_conversions.h"
#include "rtc_base/strings/string_builder.h"
@@ -77,6 +78,20 @@
return webrtc::field_trial::IsEnabled("WebRTC-FlexFEC-03-Advertised");
}
+bool PowerOfTwo(int value) {
+ return (value > 0) && ((value & (value - 1)) == 0);
+}
+
+bool IsScaleFactorsPowerOfTwo(const webrtc::VideoEncoderConfig& config) {
+ for (const auto& layer : config.simulcast_layers) {
+ double scale = std::max(layer.scale_resolution_down_by, 1.0);
+ if (std::round(scale) != scale || !PowerOfTwo(scale)) {
+ return false;
+ }
+ }
+ return true;
+}
+
void AddDefaultFeedbackParams(VideoCodec* codec) {
// Don't add any feedback params for RED and ULPFEC.
if (codec->name == kRedCodecName || codec->name == kUlpfecCodecName)
@@ -3530,10 +3545,22 @@
encoder_config.simulcast_layers, [](const webrtc::VideoStream& layer) {
return layer.scale_resolution_down_by != -1.;
});
+
+ bool default_scale_factors_used = true;
+ if (has_scale_resolution_down_by) {
+ default_scale_factors_used = IsScaleFactorsPowerOfTwo(encoder_config);
+ }
+ const bool norm_size_configured =
+ webrtc::NormalizeSimulcastSizeExperiment::GetBase2Exponent().has_value();
const int normalized_width =
- NormalizeSimulcastSize(width, encoder_config.number_of_streams);
+ (default_scale_factors_used || norm_size_configured)
+ ? NormalizeSimulcastSize(width, encoder_config.number_of_streams)
+ : width;
const int normalized_height =
- NormalizeSimulcastSize(height, encoder_config.number_of_streams);
+ (default_scale_factors_used || norm_size_configured)
+ ? NormalizeSimulcastSize(height, encoder_config.number_of_streams)
+ : height;
+
for (size_t i = 0; i < layers.size(); ++i) {
layers[i].active = encoder_config.simulcast_layers[i].active;
// Update with configured num temporal layers if supported by codec.
diff --git a/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc b/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc
index 6e21385..0620a78 100644
--- a/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc
+++ b/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc
@@ -101,6 +101,7 @@
encoder_info_ = EncoderInfo();
encoder_info_.implementation_name = "MultiplexEncoderAdapter (";
encoder_info_.requested_resolution_alignment = 1;
+ encoder_info_.apply_alignment_to_all_simulcast_layers = false;
// This needs to be false so that we can do the split in Encode().
encoder_info_.supports_native_handle = false;
@@ -137,6 +138,10 @@
encoder_info_.requested_resolution_alignment,
encoder_impl_info.requested_resolution_alignment);
+ if (encoder_impl_info.apply_alignment_to_all_simulcast_layers) {
+ encoder_info_.apply_alignment_to_all_simulcast_layers = true;
+ }
+
encoder_info_.has_internal_source = false;
encoders_.emplace_back(std::move(encoder));
diff --git a/video/BUILD.gn b/video/BUILD.gn
index bcec102..06ef027 100644
--- a/video/BUILD.gn
+++ b/video/BUILD.gn
@@ -201,6 +201,8 @@
# visibility = [ "../api/video:video_stream_encoder_create" ]
sources = [
+ "alignment_adjuster.cc",
+ "alignment_adjuster.h",
"encoder_bitrate_adjuster.cc",
"encoder_bitrate_adjuster.h",
"encoder_overshoot_detector.cc",
@@ -515,6 +517,7 @@
defines = []
sources = [
+ "alignment_adjuster_unittest.cc",
"buffered_frame_decryptor_unittest.cc",
"call_stats2_unittest.cc",
"call_stats_unittest.cc",
diff --git a/video/alignment_adjuster.cc b/video/alignment_adjuster.cc
new file mode 100644
index 0000000..b08f2f1
--- /dev/null
+++ b/video/alignment_adjuster.cc
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/alignment_adjuster.h"
+
+#include <algorithm>
+#include <limits>
+
+#include "absl/algorithm/container.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace {
+// Round each scale factor to the closest rational in form alignment/i where i
+// is a multiple of |requested_alignment|. Each resolution divisible by
+// |alignment| will be divisible by |requested_alignment| after the scale factor
+// is applied.
+double RoundToMultiple(int alignment,
+ int requested_alignment,
+ VideoEncoderConfig* config,
+ bool update_config) {
+ double diff = 0.0;
+ for (auto& layer : config->simulcast_layers) {
+ double min_dist = std::numeric_limits<double>::max();
+ double new_scale = 1.0;
+ for (int i = requested_alignment; i <= alignment;
+ i += requested_alignment) {
+ double dist = std::abs(layer.scale_resolution_down_by -
+ alignment / static_cast<double>(i));
+ if (dist <= min_dist) {
+ min_dist = dist;
+ new_scale = alignment / static_cast<double>(i);
+ }
+ }
+ diff += std::abs(layer.scale_resolution_down_by - new_scale);
+ if (update_config) {
+ RTC_LOG(LS_INFO) << "scale_resolution_down_by "
+ << layer.scale_resolution_down_by << " -> " << new_scale;
+ layer.scale_resolution_down_by = new_scale;
+ }
+ }
+ return diff;
+}
+} // namespace
+
+// Input: encoder_info.requested_resolution_alignment (K)
+// Input: encoder_info.apply_alignment_to_all_simulcast_layers (B)
+// Input: vector config->simulcast_layers.scale_resolution_down_by (S[i])
+// Output:
+// If B is false, returns K and does not adjust scaling factors.
+// Otherwise, returns adjusted alignment (A), adjusted scaling factors (S'[i])
+// are written in |config| such that:
+//
+// A / S'[i] are integers divisible by K
+// sum abs(S'[i] - S[i]) -> min
+// A integer <= 16
+//
+// Solution chooses closest S'[i] in a form A / j where j is a multiple of K.
+
+int AlignmentAdjuster::GetAlignmentAndMaybeAdjustScaleFactors(
+ const VideoEncoder::EncoderInfo& encoder_info,
+ VideoEncoderConfig* config) {
+ const int requested_alignment = encoder_info.requested_resolution_alignment;
+ if (!encoder_info.apply_alignment_to_all_simulcast_layers) {
+ return requested_alignment;
+ }
+
+ if (requested_alignment < 1 || config->number_of_streams <= 1 ||
+ config->simulcast_layers.size() <= 1) {
+ return requested_alignment;
+ }
+
+ // Update alignment to also apply to simulcast layers.
+ const bool has_scale_resolution_down_by = absl::c_any_of(
+ config->simulcast_layers, [](const webrtc::VideoStream& layer) {
+ return layer.scale_resolution_down_by >= 1.0;
+ });
+
+ if (!has_scale_resolution_down_by) {
+ // Default resolution downscaling used (scale factors: 1, 2, 4, ...).
+ return requested_alignment * (1 << (config->simulcast_layers.size() - 1));
+ }
+
+ // Get alignment for downscaled layers.
+ // Adjust |scale_resolution_down_by| to a common multiple to limit the
+ // alignment value (to avoid largely cropped frames and possibly with an
+ // aspect ratio far from the original).
+ const int kMaxAlignment = 16;
+
+ for (auto& layer : config->simulcast_layers) {
+ layer.scale_resolution_down_by =
+ std::max(layer.scale_resolution_down_by, 1.0);
+ layer.scale_resolution_down_by =
+ std::min(layer.scale_resolution_down_by, 10000.0);
+ }
+
+ // Decide on common multiple to use.
+ double min_diff = std::numeric_limits<double>::max();
+ int best_alignment = 1;
+ for (int alignment = requested_alignment; alignment <= kMaxAlignment;
+ ++alignment) {
+ double diff = RoundToMultiple(alignment, requested_alignment, config,
+ /*update_config=*/false);
+ if (diff < min_diff) {
+ min_diff = diff;
+ best_alignment = alignment;
+ }
+ }
+ RoundToMultiple(best_alignment, requested_alignment, config,
+ /*update_config=*/true);
+
+ return std::max(best_alignment, requested_alignment);
+}
+} // namespace webrtc
diff --git a/video/alignment_adjuster.h b/video/alignment_adjuster.h
new file mode 100644
index 0000000..53d7927
--- /dev/null
+++ b/video/alignment_adjuster.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_ALIGNMENT_ADJUSTER_H_
+#define VIDEO_ALIGNMENT_ADJUSTER_H_
+
+#include "api/video_codecs/video_encoder.h"
+#include "api/video_codecs/video_encoder_config.h"
+
+namespace webrtc {
+
+class AlignmentAdjuster {
+ public:
+ // Returns the resolution alignment requested by the encoder (i.e
+ // |EncoderInfo::requested_resolution_alignment| which ensures that delivered
+ // frames to the encoder are divisible by this alignment).
+ //
+ // If |EncoderInfo::apply_alignment_to_all_simulcast_layers| is enabled, the
+ // alignment will be adjusted to ensure that each simulcast layer also is
+ // divisible by |requested_resolution_alignment|. The configured scale factors
+ // |scale_resolution_down_by| may be adjusted to a common multiple to limit
+ // the alignment value to avoid largely cropped frames and possibly with an
+ // aspect ratio far from the original.
+ static int GetAlignmentAndMaybeAdjustScaleFactors(
+ const VideoEncoder::EncoderInfo& info,
+ VideoEncoderConfig* config);
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_ALIGNMENT_ADJUSTER_H_
diff --git a/video/alignment_adjuster_unittest.cc b/video/alignment_adjuster_unittest.cc
new file mode 100644
index 0000000..07c7de5
--- /dev/null
+++ b/video/alignment_adjuster_unittest.cc
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/alignment_adjuster.h"
+
+#include <memory>
+#include <tuple>
+#include <vector>
+
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/encoder_settings.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+VideoEncoder::EncoderInfo GetEncoderInfo(int alignment, bool apply) {
+ VideoEncoder::EncoderInfo info;
+ info.requested_resolution_alignment = alignment;
+ info.apply_alignment_to_all_simulcast_layers = apply;
+ return info;
+}
+} // namespace
+
+class AlignmentAdjusterTest
+ : public ::testing::TestWithParam<::testing::tuple<
+ int,
+ std::tuple<std::vector<double>, std::vector<double>, int>>> {
+ protected:
+ AlignmentAdjusterTest()
+ : kRequestedAlignment(std::get<0>(GetParam())),
+ kScaleFactors(std::get<0>(std::get<1>(GetParam()))),
+ kAdjustedScaleFactors(std::get<1>(std::get<1>(GetParam()))),
+ kAdjustedAlignment(std::get<2>(std::get<1>(GetParam()))) {}
+
+ const int kRequestedAlignment;
+ const std::vector<double> kScaleFactors;
+ const std::vector<double> kAdjustedScaleFactors;
+ const int kAdjustedAlignment;
+};
+
+INSTANTIATE_TEST_SUITE_P(
+ ScaleFactorsAndAlignment,
+ AlignmentAdjusterTest,
+ ::testing::Combine(
+ ::testing::Values(2), // kRequestedAlignment
+ ::testing::Values(
+ std::make_tuple(std::vector<double>{-1.0}, // kScaleFactors
+ std::vector<double>{-1.0}, // kAdjustedScaleFactors
+ 2), // default: {1.0} // kAdjustedAlignment
+ std::make_tuple(std::vector<double>{-1.0, -1.0},
+ std::vector<double>{-1.0, -1.0},
+ 4), // default: {1.0, 2.0}
+ std::make_tuple(std::vector<double>{-1.0, -1.0, -1.0},
+ std::vector<double>{-1.0, -1.0, -1.0},
+ 8), // default: {1.0, 2.0, 4.0}
+ std::make_tuple(std::vector<double>{1.0, 2.0, 4.0},
+ std::vector<double>{1.0, 2.0, 4.0},
+ 8),
+ std::make_tuple(std::vector<double>{9999.0, -1.0, 1.0},
+ std::vector<double>{8.0, 1.0, 1.0},
+ 16), // kMaxAlignment
+ std::make_tuple(std::vector<double>{3.99, 2.01, 1.0},
+ std::vector<double>{4.0, 2.0, 1.0},
+ 8),
+ std::make_tuple(std::vector<double>{2.9, 2.1},
+ std::vector<double>{6.0 / 2.0, 6.0 / 3.0},
+ 12),
+ std::make_tuple(std::vector<double>{4.9, 1.7, 1.2},
+ std::vector<double>{5.0, 5.0 / 3.0, 5.0 / 4.0},
+ 10),
+ std::make_tuple(std::vector<double>{1.0, 1.3},
+ std::vector<double>{4.0 / 4.0, 4.0 / 3.0},
+ 8),
+ std::make_tuple(std::vector<double>{1.75, 3.5},
+ std::vector<double>{7.0 / 4.0, 7.0 / 2.0},
+ 7),
+ std::make_tuple(std::vector<double>{1.5, 2.5},
+ std::vector<double>{1.5, 2.5},
+ 15))));
+
+TEST_P(AlignmentAdjusterTest, AlignmentAppliedToAllLayers) {
+ const bool kApplyAlignmentToAllLayers = true;
+
+ // Fill config with the scaling factor by which to reduce encoding size.
+ const int num_streams = kScaleFactors.size();
+ VideoEncoderConfig config;
+ test::FillEncoderConfiguration(kVideoCodecVP8, num_streams, &config);
+ for (int i = 0; i < num_streams; ++i) {
+ config.simulcast_layers[i].scale_resolution_down_by = kScaleFactors[i];
+ }
+
+ // Verify requested alignment from sink.
+ VideoEncoder::EncoderInfo info =
+ GetEncoderInfo(kRequestedAlignment, kApplyAlignmentToAllLayers);
+ int alignment =
+ AlignmentAdjuster::GetAlignmentAndMaybeAdjustScaleFactors(info, &config);
+ EXPECT_EQ(alignment, kAdjustedAlignment);
+
+ // Verify adjusted scale factors.
+ for (int i = 0; i < num_streams; ++i) {
+ EXPECT_EQ(config.simulcast_layers[i].scale_resolution_down_by,
+ kAdjustedScaleFactors[i]);
+ }
+}
+
+TEST_P(AlignmentAdjusterTest, AlignmentNotAppliedToAllLayers) {
+ const bool kApplyAlignmentToAllLayers = false;
+
+ // Fill config with the scaling factor by which to reduce encoding size.
+ const int num_streams = kScaleFactors.size();
+ VideoEncoderConfig config;
+ test::FillEncoderConfiguration(kVideoCodecVP8, num_streams, &config);
+ for (int i = 0; i < num_streams; ++i) {
+ config.simulcast_layers[i].scale_resolution_down_by = kScaleFactors[i];
+ }
+
+ // Verify requested alignment from sink, alignment is not adjusted.
+ VideoEncoder::EncoderInfo info =
+ GetEncoderInfo(kRequestedAlignment, kApplyAlignmentToAllLayers);
+ int alignment =
+ AlignmentAdjuster::GetAlignmentAndMaybeAdjustScaleFactors(info, &config);
+ EXPECT_EQ(alignment, kRequestedAlignment);
+
+ // Verify that scale factors are not adjusted.
+ for (int i = 0; i < num_streams; ++i) {
+ EXPECT_EQ(config.simulcast_layers[i].scale_resolution_down_by,
+ kScaleFactors[i]);
+ }
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/video/video_stream_encoder.cc b/video/video_stream_encoder.cc
index cb6080b..988816e 100644
--- a/video/video_stream_encoder.cc
+++ b/video/video_stream_encoder.cc
@@ -47,6 +47,7 @@
#include "rtc_base/trace_event.h"
#include "system_wrappers/include/field_trial.h"
#include "video/adaptation/video_stream_encoder_resource_manager.h"
+#include "video/alignment_adjuster.h"
namespace webrtc {
@@ -541,6 +542,36 @@
encoder_switch_requested_ = true;
}
+ bool encoder_reset_required = false;
+ if (pending_encoder_creation_) {
+ // Destroy existing encoder instance before creating a new one. Otherwise
+ // attempt to create another instance will fail if encoder factory
+ // supports only single instance of encoder of given type.
+ encoder_.reset();
+
+ encoder_ = settings_.encoder_factory->CreateVideoEncoder(
+ encoder_config_.video_format);
+ // TODO(nisse): What to do if creating the encoder fails? Crash,
+ // or just discard incoming frames?
+ RTC_CHECK(encoder_);
+
+ if (encoder_selector_) {
+ encoder_selector_->OnCurrentEncoder(encoder_config_.video_format);
+ }
+
+ encoder_->SetFecControllerOverride(fec_controller_override_);
+
+ codec_info_ = settings_.encoder_factory->QueryVideoEncoder(
+ encoder_config_.video_format);
+
+ encoder_reset_required = true;
+ }
+
+ // Possibly adjusts scale_resolution_down_by in |encoder_config_| to limit the
+ // alignment value.
+ int alignment = AlignmentAdjuster::GetAlignmentAndMaybeAdjustScaleFactors(
+ encoder_->GetEncoderInfo(), &encoder_config_);
+
std::vector<VideoStream> streams =
encoder_config_.video_stream_factory->CreateEncoderStreams(
last_frame_info_->width, last_frame_info_->height, encoder_config_);
@@ -573,31 +604,6 @@
crop_width_ = last_frame_info_->width - highest_stream_width;
crop_height_ = last_frame_info_->height - highest_stream_height;
- bool encoder_reset_required = false;
- if (pending_encoder_creation_) {
- // Destroy existing encoder instance before creating a new one. Otherwise
- // attempt to create another instance will fail if encoder factory
- // supports only single instance of encoder of given type.
- encoder_.reset();
-
- encoder_ = settings_.encoder_factory->CreateVideoEncoder(
- encoder_config_.video_format);
- // TODO(nisse): What to do if creating the encoder fails? Crash,
- // or just discard incoming frames?
- RTC_CHECK(encoder_);
-
- if (encoder_selector_) {
- encoder_selector_->OnCurrentEncoder(encoder_config_.video_format);
- }
-
- encoder_->SetFecControllerOverride(fec_controller_override_);
-
- codec_info_ = settings_.encoder_factory->QueryVideoEncoder(
- encoder_config_.video_format);
-
- encoder_reset_required = true;
- }
-
encoder_bitrate_limits_ =
encoder_->GetEncoderInfo().GetEncoderBitrateLimitsForResolution(
last_frame_info_->width * last_frame_info_->height);
@@ -705,7 +711,6 @@
for (const auto& stream : streams) {
max_framerate = std::max(stream.max_framerate, max_framerate);
}
- int alignment = encoder_->GetEncoderInfo().requested_resolution_alignment;
if (max_framerate != video_source_sink_controller_.frame_rate_upper_limit() ||
alignment != video_source_sink_controller_.resolution_alignment()) {
video_source_sink_controller_.SetFrameRateUpperLimit(max_framerate);
diff --git a/video/video_stream_encoder_unittest.cc b/video/video_stream_encoder_unittest.cc
index 0bab521..c0ff85e 100644
--- a/video/video_stream_encoder_unittest.cc
+++ b/video/video_stream_encoder_unittest.cc
@@ -31,6 +31,7 @@
#include "common_video/h264/h264_common.h"
#include "common_video/include/video_frame_buffer.h"
#include "media/base/video_adapter.h"
+#include "media/engine/webrtc_video_engine.h"
#include "modules/video_coding/codecs/vp9/include/vp9_globals.h"
#include "modules/video_coding/utility/quality_scaler.h"
#include "modules/video_coding/utility/simulcast_rate_allocator.h"
@@ -234,7 +235,7 @@
if (last_frame_pixels <= 320 * 240) {
fps_range_matcher = AllOf(Ge(7), Le(10));
- } else if (last_frame_pixels <= 480 * 270) {
+ } else if (last_frame_pixels <= 480 * 360) {
fps_range_matcher = AllOf(Ge(10), Le(15));
} else if (last_frame_pixels <= 640 * 480) {
fps_range_matcher = Ge(15);
@@ -803,6 +804,8 @@
info.resolution_bitrate_limits = resolution_bitrate_limits_;
info.requested_resolution_alignment = requested_resolution_alignment_;
+ info.apply_alignment_to_all_simulcast_layers =
+ apply_alignment_to_all_simulcast_layers_;
return info;
}
@@ -832,6 +835,11 @@
requested_resolution_alignment_ = requested_resolution_alignment;
}
+ void SetApplyAlignmentToAllSimulcastLayers(bool b) {
+ MutexLock lock(&local_mutex_);
+ apply_alignment_to_all_simulcast_layers_ = b;
+ }
+
void SetIsHardwareAccelerated(bool is_hardware_accelerated) {
MutexLock lock(&local_mutex_);
is_hardware_accelerated_ = is_hardware_accelerated;
@@ -918,6 +926,11 @@
return num_set_rates_;
}
+ VideoCodec video_codec() const {
+ MutexLock lock(&local_mutex_);
+ return video_codec_;
+ }
+
private:
int32_t Encode(const VideoFrame& input_image,
const std::vector<VideoFrameType>* frame_types) override {
@@ -971,6 +984,7 @@
EXPECT_EQ(initialized_, EncoderState::kUninitialized);
++num_encoder_initializations_;
+ video_codec_ = *config;
if (config->codecType == kVideoCodecVP8) {
// Simulate setting up temporal layers, in order to validate the life
@@ -1030,6 +1044,8 @@
int last_input_height_ RTC_GUARDED_BY(local_mutex_) = 0;
bool quality_scaling_ RTC_GUARDED_BY(local_mutex_) = true;
int requested_resolution_alignment_ RTC_GUARDED_BY(local_mutex_) = 1;
+ bool apply_alignment_to_all_simulcast_layers_ RTC_GUARDED_BY(local_mutex_) =
+ false;
bool is_hardware_accelerated_ RTC_GUARDED_BY(local_mutex_) = false;
rtc::scoped_refptr<EncodedImageBufferInterface> encoded_image_data_
RTC_GUARDED_BY(local_mutex_);
@@ -1054,6 +1070,7 @@
std::vector<ResolutionBitrateLimits> resolution_bitrate_limits_
RTC_GUARDED_BY(local_mutex_);
int num_set_rates_ RTC_GUARDED_BY(local_mutex_) = 0;
+ VideoCodec video_codec_ RTC_GUARDED_BY(local_mutex_);
};
class TestSink : public VideoStreamEncoder::EncoderSink {
@@ -1098,18 +1115,6 @@
EXPECT_EQ(expected_width, width);
}
- void CheckLastFrameSizeIsMultipleOf(int resolution_alignment) {
- int width = 0;
- int height = 0;
- {
- MutexLock lock(&mutex_);
- width = last_width_;
- height = last_height_;
- }
- EXPECT_EQ(width % resolution_alignment, 0);
- EXPECT_EQ(height % resolution_alignment, 0);
- }
-
void CheckLastFrameRotationMatches(VideoRotation expected_rotation) {
VideoRotation rotation;
{
@@ -1776,30 +1781,88 @@
video_stream_encoder_->Stop();
}
-TEST_F(VideoStreamEncoderTest, SinkWantsResolutionAlignment) {
- constexpr int kRequestedResolutionAlignment = 7;
+class ResolutionAlignmentTest
+ : public VideoStreamEncoderTest,
+ public ::testing::WithParamInterface<
+ ::testing::tuple<int, std::vector<double>>> {
+ public:
+ ResolutionAlignmentTest()
+ : requested_alignment_(::testing::get<0>(GetParam())),
+ scale_factors_(::testing::get<1>(GetParam())) {}
+
+ protected:
+ const int requested_alignment_;
+ const std::vector<double> scale_factors_;
+};
+
+INSTANTIATE_TEST_SUITE_P(
+ AlignmentAndScaleFactors,
+ ResolutionAlignmentTest,
+ ::testing::Combine(
+ ::testing::Values(1, 2, 3, 4, 5, 6, 16, 22), // requested_alignment_
+ ::testing::Values(std::vector<double>{-1.0}, // scale_factors_
+ std::vector<double>{-1.0, -1.0},
+ std::vector<double>{-1.0, -1.0, -1.0},
+ std::vector<double>{4.0, 2.0, 1.0},
+ std::vector<double>{9999.0, -1.0, 1.0},
+ std::vector<double>{3.99, 2.01, 1.0},
+ std::vector<double>{4.9, 1.7, 1.25},
+ std::vector<double>{10.0, 4.0, 3.0},
+ std::vector<double>{1.75, 3.5},
+ std::vector<double>{1.5, 2.5},
+ std::vector<double>{1.3, 1.0})));
+
+TEST_P(ResolutionAlignmentTest, SinkWantsAlignmentApplied) {
+ // Set requested resolution alignment.
video_source_.set_adaptation_enabled(true);
- fake_encoder_.SetRequestedResolutionAlignment(kRequestedResolutionAlignment);
+ fake_encoder_.SetRequestedResolutionAlignment(requested_alignment_);
+ fake_encoder_.SetApplyAlignmentToAllSimulcastLayers(true);
+
+ // Fill config with the scaling factor by which to reduce encoding size.
+ const int num_streams = scale_factors_.size();
+ VideoEncoderConfig config;
+ test::FillEncoderConfiguration(kVideoCodecVP8, num_streams, &config);
+ for (int i = 0; i < num_streams; ++i) {
+ config.simulcast_layers[i].scale_resolution_down_by = scale_factors_[i];
+ }
+ config.video_stream_factory =
+ new rtc::RefCountedObject<cricket::EncoderStreamFactory>(
+ "VP8", /*max qp*/ 56, /*screencast*/ false,
+ /*screenshare enabled*/ false);
+ video_stream_encoder_->ConfigureEncoder(std::move(config), kMaxPayloadLength);
+
video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
- DataRate::BitsPerSec(kTargetBitrateBps),
- DataRate::BitsPerSec(kTargetBitrateBps),
- DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
+ DataRate::BitsPerSec(kSimulcastTargetBitrateBps),
+ DataRate::BitsPerSec(kSimulcastTargetBitrateBps),
+ DataRate::BitsPerSec(kSimulcastTargetBitrateBps), 0, 0, 0);
+ // Wait for all layers before triggering event.
+ sink_.SetNumExpectedLayers(num_streams);
// On the 1st frame, we should have initialized the encoder and
// asked for its resolution requirements.
- video_source_.IncomingCapturedFrame(
- CreateFrame(1, codec_width_, codec_height_));
- WaitForEncodedFrame(1);
- EXPECT_EQ(video_source_.sink_wants().resolution_alignment,
- kRequestedResolutionAlignment);
+ int64_t timestamp_ms = kFrameIntervalMs;
+ video_source_.IncomingCapturedFrame(CreateFrame(timestamp_ms, 1280, 720));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_EQ(1, fake_encoder_.GetNumEncoderInitializations());
// On the 2nd frame, we should be receiving a correctly aligned resolution.
// (It's up the to the encoder to potentially drop the previous frame,
// to avoid coding back-to-back keyframes.)
- video_source_.IncomingCapturedFrame(
- CreateFrame(2, codec_width_, codec_height_));
- WaitForEncodedFrame(2);
- sink_.CheckLastFrameSizeIsMultipleOf(kRequestedResolutionAlignment);
+ timestamp_ms += kFrameIntervalMs;
+ video_source_.IncomingCapturedFrame(CreateFrame(timestamp_ms, 1280, 720));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_GE(fake_encoder_.GetNumEncoderInitializations(), 1);
+
+ VideoCodec codec = fake_encoder_.video_codec();
+ EXPECT_EQ(codec.numberOfSimulcastStreams, num_streams);
+ // Frame size should be a multiple of the requested alignment.
+ for (int i = 0; i < codec.numberOfSimulcastStreams; ++i) {
+ EXPECT_EQ(codec.simulcastStream[i].width % requested_alignment_, 0);
+ EXPECT_EQ(codec.simulcastStream[i].height % requested_alignment_, 0);
+ // Aspect ratio should match.
+ EXPECT_EQ(codec.width * codec.simulcastStream[i].height,
+ codec.height * codec.simulcastStream[i].width);
+ }
video_stream_encoder_->Stop();
}