Update H264EncoderImpl to use EncodedImage::Allocate

Bug: webrtc:9378
Change-Id: I0d60f8a0a1415a6be09dc1c4c2b0535ccdd6fcd1
Reviewed-on: https://webrtc-review.googlesource.com/c/122086
Reviewed-by: Erik Språng <sprang@webrtc.org>
Commit-Queue: Niels Moller <nisse@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#26650}
diff --git a/modules/video_coding/codecs/h264/h264_encoder_impl.cc b/modules/video_coding/codecs/h264/h264_encoder_impl.cc
index 7e9464c..5987ecb 100644
--- a/modules/video_coding/codecs/h264/h264_encoder_impl.cc
+++ b/modules/video_coding/codecs/h264/h264_encoder_impl.cc
@@ -94,7 +94,6 @@
 // is updated to point to each fragment, with offsets and lengths set as to
 // exclude the start codes.
 static void RtpFragmentize(EncodedImage* encoded_image,
-                           std::unique_ptr<uint8_t[]>* encoded_image_buffer,
                            const VideoFrameBuffer& frame_buffer,
                            SFrameBSInfo* info,
                            RTPFragmentationHeader* frag_header) {
@@ -126,8 +125,7 @@
           << ", encoded bytes: " << required_capacity << ".";
       new_capacity = required_capacity;
     }
-    encoded_image->set_buffer(new uint8_t[new_capacity], new_capacity);
-    encoded_image_buffer->reset(encoded_image->data());
+    encoded_image->Allocate(new_capacity);
   }
 
   // Iterate layers and NAL units, note each NAL unit as a fragment and copy
@@ -179,7 +177,6 @@
   }
   downscaled_buffers_.reserve(kMaxSimulcastStreams - 1);
   encoded_images_.reserve(kMaxSimulcastStreams);
-  encoded_image_buffers_.reserve(kMaxSimulcastStreams);
   encoders_.reserve(kMaxSimulcastStreams);
   configurations_.reserve(kMaxSimulcastStreams);
 }
@@ -222,7 +219,6 @@
   }
   downscaled_buffers_.resize(number_of_streams - 1);
   encoded_images_.resize(number_of_streams);
-  encoded_image_buffers_.resize(number_of_streams);
   encoders_.resize(number_of_streams);
   pictures_.resize(number_of_streams);
   configurations_.resize(number_of_streams);
@@ -302,8 +298,7 @@
     const size_t new_capacity =
         CalcBufferSize(VideoType::kI420, codec_.simulcastStream[idx].width,
                        codec_.simulcastStream[idx].height);
-    encoded_images_[i].set_buffer(new uint8_t[new_capacity], new_capacity);
-    encoded_image_buffers_[i].reset(encoded_images_[i].data());
+    encoded_images_[i].Allocate(new_capacity);
     encoded_images_[i]._completeFrame = true;
     encoded_images_[i]._encodedWidth = codec_.simulcastStream[idx].width;
     encoded_images_[i]._encodedHeight = codec_.simulcastStream[idx].height;
@@ -328,7 +323,6 @@
   downscaled_buffers_.clear();
   configurations_.clear();
   encoded_images_.clear();
-  encoded_image_buffers_.clear();
   pictures_.clear();
   return WEBRTC_VIDEO_CODEC_OK;
 }
@@ -513,8 +507,7 @@
     // Split encoded image up into fragments. This also updates
     // |encoded_image_|.
     RTPFragmentationHeader frag_header;
-    RtpFragmentize(&encoded_images_[i], &encoded_image_buffers_[i],
-                   *frame_buffer, &info, &frag_header);
+    RtpFragmentize(&encoded_images_[i], *frame_buffer, &info, &frag_header);
 
     // Encoder can skip frames to save bandwidth in which case
     // |encoded_images_[i]._length| == 0.
diff --git a/modules/video_coding/codecs/h264/h264_encoder_impl.h b/modules/video_coding/codecs/h264/h264_encoder_impl.h
index 044b6eb..75a8758 100644
--- a/modules/video_coding/codecs/h264/h264_encoder_impl.h
+++ b/modules/video_coding/codecs/h264/h264_encoder_impl.h
@@ -90,7 +90,6 @@
   std::vector<rtc::scoped_refptr<I420Buffer>> downscaled_buffers_;
   std::vector<LayerConfig> configurations_;
   std::vector<EncodedImage> encoded_images_;
-  std::vector<std::unique_ptr<uint8_t[]>> encoded_image_buffers_;
 
   VideoCodec codec_;
   H264PacketizationMode packetization_mode_;