Allocate extra buffer space in FrameObject in case of H264.

Since ffmpeg use an optimized bitstream reader that reads in chunks of 32/64
bits the bitstream buffer has to be increased in order for the reader to not
read out of bounds.

BUG=webrtc:5514
R=stefan@webrtc.org

Review URL: https://codereview.webrtc.org/2476513004 .

Cr-Commit-Position: refs/heads/master@{#14941}
diff --git a/webrtc/modules/video_coding/frame_object.cc b/webrtc/modules/video_coding/frame_object.cc
index b803179..4eb8aee 100644
--- a/webrtc/modules/video_coding/frame_object.cc
+++ b/webrtc/modules/video_coding/frame_object.cc
@@ -34,6 +34,10 @@
       times_nacked_(times_nacked) {
   VCMPacket* packet = packet_buffer_->GetPacket(first_seq_num);
   if (packet) {
+    // RtpFrameObject members
+    frame_type_ = packet->frameType;
+    codec_type_ = packet->codec;
+
     // TODO(philipel): Remove when encoded image is replaced by FrameObject.
     // VCMEncodedFrame members
     CopyCodecSpecific(&packet->video_header);
@@ -41,16 +45,23 @@
     _payloadType = packet->payloadType;
     _timeStamp = packet->timestamp;
     ntp_time_ms_ = packet->ntp_time_ms_;
-    _buffer = new uint8_t[frame_size]();
-    _size = frame_size;
+
+    // Since FFmpeg use an optimized bitstream reader that reads in chunks of
+    // 32/64 bits we have to add at least that much padding to the buffer
+    // to make sure the decoder doesn't read out of bounds.
+    // NOTE! EncodedImage::_size is the size of the buffer (think capacity of
+    //       an std::vector) and EncodedImage::_length is the actual size of
+    //       the bitstream (think size of an std::vector).
+    if (codec_type_ == kVideoCodecH264)
+      _size = frame_size + EncodedImage::kBufferPaddingBytesH264;
+    else
+      _size = frame_size;
+
+    _buffer = new uint8_t[_size];
     _length = frame_size;
     _frameType = packet->frameType;
     GetBitstream(_buffer);
 
-    // RtpFrameObject members
-    frame_type_ = packet->frameType;
-    codec_type_ = packet->codec;
-
     // FrameObject members
     timestamp = packet->timestamp;
   }
diff --git a/webrtc/modules/video_coding/video_packet_buffer_unittest.cc b/webrtc/modules/video_coding/video_packet_buffer_unittest.cc
index 81534e3..1fe3169 100644
--- a/webrtc/modules/video_coding/video_packet_buffer_unittest.cc
+++ b/webrtc/modules/video_coding/video_packet_buffer_unittest.cc
@@ -305,7 +305,7 @@
   CheckFrame(seq_num + 3);
 }
 
-TEST_F(TestPacketBuffer, GetBitstreamFromFrame) {
+TEST_F(TestPacketBuffer, GetBitstream) {
   // "many bitstream, such data" with null termination.
   uint8_t many[] = {0x6d, 0x61, 0x6e, 0x79, 0x20};
   uint8_t bitstream[] = {0x62, 0x69, 0x74, 0x73, 0x74, 0x72,
@@ -331,6 +331,35 @@
   EXPECT_EQ(memcmp(result, "many bitstream, such data", sizeof(result)), 0);
 }
 
+TEST_F(TestPacketBuffer, GetBitstreamH264BufferPadding) {
+  uint16_t seq_num = Rand();
+  uint8_t data[] = "some plain old data";
+
+  // EncodedImage::kBufferPaddingBytesH264 is unknown at compile time.
+  uint8_t* result =
+      new uint8_t[sizeof(data) + EncodedImage::kBufferPaddingBytesH264];
+
+  VCMPacket packet;
+  packet.seqNum = seq_num;
+  packet.codec = kVideoCodecH264;
+  packet.insertStartCode = true;
+  packet.video_header.codecHeader.H264.packetization_type = kH264SingleNalu;
+  packet.dataPtr = data;
+  packet.sizeBytes = sizeof(data);
+  packet.isFirstPacket = true;
+  packet.markerBit = true;
+  packet_buffer_->InsertPacket(packet);
+
+  ASSERT_EQ(1UL, frames_from_callback_.size());
+  EXPECT_EQ(frames_from_callback_[seq_num]->EncodedImage()._length,
+            sizeof(data));
+  EXPECT_EQ(frames_from_callback_[seq_num]->EncodedImage()._size,
+            sizeof(data) + EncodedImage::kBufferPaddingBytesH264);
+  EXPECT_TRUE(frames_from_callback_[seq_num]->GetBitstream(result));
+  EXPECT_EQ(memcmp(result, data, sizeof(data)), 0);
+  delete[] result;
+}
+
 TEST_F(TestPacketBuffer, FreeSlotsOnFrameDestruction) {
   const uint16_t seq_num = Rand();