Fix heap use overrun in FakeEncoder
By removing unnecessary fixed size buffer.
BUG=webrtc:10276
Change-Id: I303303d8c4aa356372875abe6db5711cd10bcc71
Reviewed-on: https://webrtc-review.googlesource.com/c/120811
Reviewed-by: Ilya Nikolaevskiy <ilnik@webrtc.org>
Reviewed-by: Niels Moller <nisse@webrtc.org>
Commit-Queue: Per Kjellander <perkj@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#26509}
diff --git a/media/engine/fake_video_codec_factory.cc b/media/engine/fake_video_codec_factory.cc
index aec3510..d7b6446 100644
--- a/media/engine/fake_video_codec_factory.cc
+++ b/media/engine/fake_video_codec_factory.cc
@@ -34,8 +34,7 @@
// static
std::unique_ptr<VideoEncoder> FakeVideoEncoderFactory::CreateVideoEncoder() {
- return absl::make_unique<test::FakeEncoder>(Clock::GetRealTimeClock(),
- 10000000);
+ return absl::make_unique<test::FakeEncoder>(Clock::GetRealTimeClock());
}
std::vector<SdpVideoFormat> FakeVideoEncoderFactory::GetSupportedFormats()
@@ -51,8 +50,7 @@
std::unique_ptr<VideoEncoder> FakeVideoEncoderFactory::CreateVideoEncoder(
const SdpVideoFormat& format) {
- return absl::make_unique<test::FakeEncoder>(Clock::GetRealTimeClock(),
- 10000000);
+ return absl::make_unique<test::FakeEncoder>(Clock::GetRealTimeClock());
}
FakeVideoDecoderFactory::FakeVideoDecoderFactory() = default;
diff --git a/test/fake_encoder.cc b/test/fake_encoder.cc
index 3e4fca6..c4cbfe1 100644
--- a/test/fake_encoder.cc
+++ b/test/fake_encoder.cc
@@ -47,9 +47,7 @@
}; // namespace
-FakeEncoder::FakeEncoder(Clock* clock) : FakeEncoder(clock, 100000) {}
-
-FakeEncoder::FakeEncoder(Clock* clock, size_t buffer_size)
+FakeEncoder::FakeEncoder(Clock* clock)
: clock_(clock),
callback_(nullptr),
configured_input_framerate_(-1),
@@ -57,11 +55,6 @@
pending_keyframe_(true),
counter_(0),
debt_bytes_(0) {
- // Generate some arbitrary not-all-zero data
- encoded_buffer_.resize(buffer_size);
- for (size_t i = 0; i < encoded_buffer_.size(); ++i) {
- encoded_buffer_[i] = static_cast<uint8_t>(i);
- }
for (bool& used : used_layers_) {
used = false;
}
@@ -133,12 +126,12 @@
specifics.codecType = kVideoCodecGeneric;
std::unique_ptr<uint8_t[]> encoded_buffer(
new uint8_t[frame_info.layers[i].size]);
- memcpy(encoded_buffer.get(), encoded_buffer_.data(),
- frame_info.layers[i].size - 4);
+ // Fill the buffer with arbitrary data. Write someting to make Asan happy.
+ memset(encoded_buffer.get(), 9, frame_info.layers[i].size);
// Write a counter to the image to make each frame unique.
WriteCounter(encoded_buffer.get() + frame_info.layers[i].size - 4, counter);
EncodedImage encoded(encoded_buffer.get(), frame_info.layers[i].size,
- encoded_buffer_.size());
+ frame_info.layers[i].size);
encoded.SetTimestamp(input_image.timestamp());
encoded.capture_time_ms_ = input_image.render_time_ms();
encoded._frameType =
diff --git a/test/fake_encoder.h b/test/fake_encoder.h
index e7a9eb1..0760353 100644
--- a/test/fake_encoder.h
+++ b/test/fake_encoder.h
@@ -36,7 +36,6 @@
class FakeEncoder : public VideoEncoder {
public:
explicit FakeEncoder(Clock* clock);
- FakeEncoder(Clock* clock, size_t buffer_size);
virtual ~FakeEncoder() = default;
// Sets max bitrate. Not thread-safe, call before registering the encoder.
@@ -91,8 +90,6 @@
bool pending_keyframe_ RTC_GUARDED_BY(crit_sect_);
uint32_t counter_ RTC_GUARDED_BY(crit_sect_);
rtc::CriticalSection crit_sect_;
-
- std::vector<uint8_t> encoded_buffer_;
bool used_layers_[kMaxSimulcastStreams];
// Current byte debt to be payed over a number of frames.