Revert of Delete webrtc::VideoFrame methods buffer and stride. (patchset #14 id:250001 of https://codereview.webrtc.org/1900673002/ )
Reason for revert:
Breaks chrome FYI bots.
Original issue's description:
> Delete webrtc::VideoFrame methods buffer and stride.
>
> To make the HasOneRef/IsMutable hack work, also had to change the
> video_frame_buffer method to return a const ref to a scoped_ref_ptr,
> to not imply an AddRef.
>
> BUG=webrtc:5682
TBR=perkj@webrtc.org,magjed@webrtc.org,pbos@webrtc.org,pthatcher@webrtc.org,stefan@webrtc.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=webrtc:5682
Review-Url: https://codereview.webrtc.org/1935443002
Cr-Original-Commit-Position: refs/heads/master@{#12558}
Cr-Mirrored-From: https://chromium.googlesource.com/external/webrtc
Cr-Mirrored-Commit: 5b3c443d301f2c2f18dac5b02652c08b91ea3828
diff --git a/api/java/jni/androidmediaencoder_jni.cc b/api/java/jni/androidmediaencoder_jni.cc
index 4508bc9..b7f2da4 100644
--- a/api/java/jni/androidmediaencoder_jni.cc
+++ b/api/java/jni/androidmediaencoder_jni.cc
@@ -794,12 +794,9 @@
CHECK_EXCEPTION(jni);
RTC_CHECK(yuv_buffer) << "Indirect buffer??";
RTC_CHECK(!libyuv::ConvertFromI420(
- frame.video_frame_buffer()->DataY(),
- frame.video_frame_buffer()->StrideY(),
- frame.video_frame_buffer()->DataU(),
- frame.video_frame_buffer()->StrideU(),
- frame.video_frame_buffer()->DataV(),
- frame.video_frame_buffer()->StrideV(),
+ frame.buffer(webrtc::kYPlane), frame.stride(webrtc::kYPlane),
+ frame.buffer(webrtc::kUPlane), frame.stride(webrtc::kUPlane),
+ frame.buffer(webrtc::kVPlane), frame.stride(webrtc::kVPlane),
yuv_buffer, width_, width_, height_, encoder_fourcc_))
<< "ConvertFromI420 failed";
diff --git a/common_video/i420_video_frame_unittest.cc b/common_video/i420_video_frame_unittest.cc
index c942e4a..bc2bac9 100644
--- a/common_video/i420_video_frame_unittest.cc
+++ b/common_video/i420_video_frame_unittest.cc
@@ -51,9 +51,9 @@
VideoFrame frame;
frame. CreateEmptyFrame(10, 10, 12, 14, 220);
int height = frame.height();
- int stride_y = frame.video_frame_buffer()->StrideY();
- int stride_u = frame.video_frame_buffer()->StrideU();
- int stride_v = frame.video_frame_buffer()->StrideV();
+ int stride_y = frame.stride(kYPlane);
+ int stride_u = frame.stride(kUPlane);
+ int stride_v = frame.stride(kVPlane);
// Verify that allocated size was computed correctly.
EXPECT_EQ(ExpectedSize(stride_y, height, kYPlane),
frame.allocated_size(kYPlane));
@@ -101,12 +101,9 @@
// Frame of larger dimensions.
small_frame.CreateEmptyFrame(width, height,
stride_y, stride_u, stride_v);
- memset(small_frame.video_frame_buffer()->MutableDataY(), 1,
- small_frame.allocated_size(kYPlane));
- memset(small_frame.video_frame_buffer()->MutableDataU(), 2,
- small_frame.allocated_size(kUPlane));
- memset(small_frame.video_frame_buffer()->MutableDataV(), 3,
- small_frame.allocated_size(kVPlane));
+ memset(small_frame.buffer(kYPlane), 1, small_frame.allocated_size(kYPlane));
+ memset(small_frame.buffer(kUPlane), 2, small_frame.allocated_size(kUPlane));
+ memset(small_frame.buffer(kVPlane), 3, small_frame.allocated_size(kVPlane));
big_frame.CopyFrame(small_frame);
EXPECT_TRUE(test::FramesEqual(small_frame, big_frame));
}
@@ -144,12 +141,12 @@
const VideoFrame* const_frame1_ptr = &frame1;
const VideoFrame* const_frame2_ptr = &frame2;
- EXPECT_TRUE(const_frame1_ptr->video_frame_buffer()->DataY() ==
- const_frame2_ptr->video_frame_buffer()->DataY());
- EXPECT_TRUE(const_frame1_ptr->video_frame_buffer()->DataU() ==
- const_frame2_ptr->video_frame_buffer()->DataU());
- EXPECT_TRUE(const_frame1_ptr->video_frame_buffer()->DataV() ==
- const_frame2_ptr->video_frame_buffer()->DataV());
+ EXPECT_TRUE(const_frame1_ptr->buffer(kYPlane) ==
+ const_frame2_ptr->buffer(kYPlane));
+ EXPECT_TRUE(const_frame1_ptr->buffer(kUPlane) ==
+ const_frame2_ptr->buffer(kUPlane));
+ EXPECT_TRUE(const_frame1_ptr->buffer(kVPlane) ==
+ const_frame2_ptr->buffer(kVPlane));
EXPECT_EQ(frame2.timestamp(), frame1.timestamp());
EXPECT_EQ(frame2.ntp_time_ms(), frame1.ntp_time_ms());
@@ -187,12 +184,12 @@
width, height, stride_y, stride_uv, stride_uv,
kVideoRotation_0);
// Expect exactly the same pixel data.
- EXPECT_TRUE(test::EqualPlane(buffer_y, frame2.video_frame_buffer()->DataY(),
- stride_y, 15, 15));
- EXPECT_TRUE(test::EqualPlane(buffer_u, frame2.video_frame_buffer()->DataU(),
- stride_uv, 8, 8));
- EXPECT_TRUE(test::EqualPlane(buffer_v, frame2.video_frame_buffer()->DataV(),
- stride_uv, 8, 8));
+ EXPECT_TRUE(
+ test::EqualPlane(buffer_y, frame2.buffer(kYPlane), stride_y, 15, 15));
+ EXPECT_TRUE(
+ test::EqualPlane(buffer_u, frame2.buffer(kUPlane), stride_uv, 8, 8));
+ EXPECT_TRUE(
+ test::EqualPlane(buffer_v, frame2.buffer(kVPlane), stride_uv, 8, 8));
// Compare size.
EXPECT_LE(kSizeY, frame2.allocated_size(kYPlane));
@@ -203,27 +200,27 @@
TEST(TestVideoFrame, ReuseAllocation) {
VideoFrame frame;
frame.CreateEmptyFrame(640, 320, 640, 320, 320);
- const uint8_t* y = frame.video_frame_buffer()->DataY();
- const uint8_t* u = frame.video_frame_buffer()->DataU();
- const uint8_t* v = frame.video_frame_buffer()->DataV();
+ const uint8_t* y = frame.buffer(kYPlane);
+ const uint8_t* u = frame.buffer(kUPlane);
+ const uint8_t* v = frame.buffer(kVPlane);
frame.CreateEmptyFrame(640, 320, 640, 320, 320);
- EXPECT_EQ(y, frame.video_frame_buffer()->DataY());
- EXPECT_EQ(u, frame.video_frame_buffer()->DataU());
- EXPECT_EQ(v, frame.video_frame_buffer()->DataV());
+ EXPECT_EQ(y, frame.buffer(kYPlane));
+ EXPECT_EQ(u, frame.buffer(kUPlane));
+ EXPECT_EQ(v, frame.buffer(kVPlane));
}
TEST(TestVideoFrame, FailToReuseAllocation) {
VideoFrame frame1;
frame1.CreateEmptyFrame(640, 320, 640, 320, 320);
- const uint8_t* y = frame1.video_frame_buffer()->DataY();
- const uint8_t* u = frame1.video_frame_buffer()->DataU();
- const uint8_t* v = frame1.video_frame_buffer()->DataV();
+ const uint8_t* y = frame1.buffer(kYPlane);
+ const uint8_t* u = frame1.buffer(kUPlane);
+ const uint8_t* v = frame1.buffer(kVPlane);
// Make a shallow copy of |frame1|.
VideoFrame frame2(frame1.video_frame_buffer(), 0, 0, kVideoRotation_0);
frame1.CreateEmptyFrame(640, 320, 640, 320, 320);
- EXPECT_NE(y, frame1.video_frame_buffer()->DataY());
- EXPECT_NE(u, frame1.video_frame_buffer()->DataU());
- EXPECT_NE(v, frame1.video_frame_buffer()->DataV());
+ EXPECT_NE(y, frame1.buffer(kYPlane));
+ EXPECT_NE(u, frame1.buffer(kUPlane));
+ EXPECT_NE(v, frame1.buffer(kVPlane));
}
TEST(TestVideoFrame, TextureInitialValues) {
diff --git a/common_video/libyuv/libyuv_unittest.cc b/common_video/libyuv/libyuv_unittest.cc
index 0e43e2e..9f92b8b 100644
--- a/common_video/libyuv/libyuv_unittest.cc
+++ b/common_video/libyuv/libyuv_unittest.cc
@@ -21,6 +21,61 @@
namespace webrtc {
+int PrintBuffer(const uint8_t* buffer, int width, int height, int stride) {
+ if (buffer == NULL)
+ return -1;
+ int k;
+ const uint8_t* tmp_buffer = buffer;
+ for (int i = 0; i < height; i++) {
+ k = 0;
+ for (int j = 0; j < width; j++) {
+ printf("%d ", tmp_buffer[k++]);
+ }
+ tmp_buffer += stride;
+ printf(" \n");
+ }
+ printf(" \n");
+ return 0;
+}
+
+int PrintFrame(const VideoFrame* frame, const char* str) {
+ if (frame == NULL)
+ return -1;
+ printf("%s %dx%d \n", str, frame->width(), frame->height());
+
+ int ret = 0;
+ for (int plane_num = 0; plane_num < kNumOfPlanes; ++plane_num) {
+ PlaneType plane_type = static_cast<PlaneType>(plane_num);
+ int width = (plane_num ? (frame->width() + 1) / 2 : frame->width());
+ int height = (plane_num ? (frame->height() + 1) / 2 : frame->height());
+ ret += PrintBuffer(frame->buffer(plane_type), width, height,
+ frame->stride(plane_type));
+ }
+ return ret;
+}
+
+
+// Create an image from on a YUV frame. Every plane value starts with a start
+// value, and will be set to increasing values.
+void CreateImage(VideoFrame* frame, int plane_offset[kNumOfPlanes]) {
+ if (frame == NULL)
+ return;
+ for (int plane_num = 0; plane_num < kNumOfPlanes; ++plane_num) {
+ int width = (plane_num != kYPlane ? (frame->width() + 1) / 2 :
+ frame->width());
+ int height = (plane_num != kYPlane ? (frame->height() + 1) / 2 :
+ frame->height());
+ PlaneType plane_type = static_cast<PlaneType>(plane_num);
+ uint8_t *data = frame->buffer(plane_type);
+ for (int i = 0; i < height; i++) {
+ for (int j = 0; j < width; j++) {
+ data[j] = static_cast<uint8_t>(i + plane_offset[plane_num] + j);
+ }
+ data += frame->stride(plane_type);
+ }
+ }
+}
+
class TestLibYuv : public ::testing::Test {
protected:
TestLibYuv();
diff --git a/common_video/libyuv/scaler.cc b/common_video/libyuv/scaler.cc
index 6e683c0..c6adbf9 100644
--- a/common_video/libyuv/scaler.cc
+++ b/common_video/libyuv/scaler.cc
@@ -47,7 +47,6 @@
return 0;
}
-// TODO(nisse): Should work with VideoFrameBuffer instead.
int Scaler::Scale(const VideoFrame& src_frame, VideoFrame* dst_frame) {
assert(dst_frame);
if (src_frame.IsZeroSize())
@@ -70,35 +69,31 @@
const int src_offset_x = ((src_width_ - cropped_src_width) / 2) & ~1;
const int src_offset_y = ((src_height_ - cropped_src_height) / 2) & ~1;
- const uint8_t* y_ptr =
- src_frame.video_frame_buffer()->DataY() +
- src_offset_y * src_frame.video_frame_buffer()->StrideY() +
- src_offset_x;
- const uint8_t* u_ptr =
- src_frame.video_frame_buffer()->DataU() +
- src_offset_y / 2 * src_frame.video_frame_buffer()->StrideU() +
- src_offset_x / 2;
- const uint8_t* v_ptr =
- src_frame.video_frame_buffer()->DataV() +
- src_offset_y / 2 * src_frame.video_frame_buffer()->StrideV() +
- src_offset_x / 2;
+ const uint8_t* y_ptr = src_frame.buffer(kYPlane) +
+ src_offset_y * src_frame.stride(kYPlane) +
+ src_offset_x;
+ const uint8_t* u_ptr = src_frame.buffer(kUPlane) +
+ src_offset_y / 2 * src_frame.stride(kUPlane) +
+ src_offset_x / 2;
+ const uint8_t* v_ptr = src_frame.buffer(kVPlane) +
+ src_offset_y / 2 * src_frame.stride(kVPlane) +
+ src_offset_x / 2;
- return libyuv::I420Scale(
- y_ptr,
- src_frame.video_frame_buffer()->StrideY(),
- u_ptr,
- src_frame.video_frame_buffer()->StrideU(),
- v_ptr,
- src_frame.video_frame_buffer()->StrideV(),
- cropped_src_width, cropped_src_height,
- dst_frame->video_frame_buffer()->MutableDataY(),
- dst_frame->video_frame_buffer()->StrideY(),
- dst_frame->video_frame_buffer()->MutableDataU(),
- dst_frame->video_frame_buffer()->StrideU(),
- dst_frame->video_frame_buffer()->MutableDataV(),
- dst_frame->video_frame_buffer()->StrideV(),
- dst_width_, dst_height_,
- libyuv::FilterMode(method_));
+ return libyuv::I420Scale(y_ptr,
+ src_frame.stride(kYPlane),
+ u_ptr,
+ src_frame.stride(kUPlane),
+ v_ptr,
+ src_frame.stride(kVPlane),
+ cropped_src_width, cropped_src_height,
+ dst_frame->buffer(kYPlane),
+ dst_frame->stride(kYPlane),
+ dst_frame->buffer(kUPlane),
+ dst_frame->stride(kUPlane),
+ dst_frame->buffer(kVPlane),
+ dst_frame->stride(kVPlane),
+ dst_width_, dst_height_,
+ libyuv::FilterMode(method_));
}
bool Scaler::SupportedVideoType(VideoType src_video_type,
diff --git a/common_video/libyuv/webrtc_libyuv.cc b/common_video/libyuv/webrtc_libyuv.cc
index 7f5e330..48f5c20 100644
--- a/common_video/libyuv/webrtc_libyuv.cc
+++ b/common_video/libyuv/webrtc_libyuv.cc
@@ -102,42 +102,23 @@
return buffer_size;
}
-static int PrintPlane(const uint8_t* buf,
- int width,
- int height,
- int stride,
- FILE* file) {
- for (int i = 0; i < height; i++, buf += stride) {
- if (fwrite(buf, 1, width, file) != static_cast<unsigned int>(width))
- return -1;
- }
- return 0;
-}
-
-// TODO(nisse): Belongs with the test code?
int PrintVideoFrame(const VideoFrame& frame, FILE* file) {
if (file == NULL)
return -1;
if (frame.IsZeroSize())
return -1;
- int width = frame.video_frame_buffer()->width();
- int height = frame.video_frame_buffer()->height();
- int chroma_width = (width + 1) / 2;
- int chroma_height = (height + 1) / 2;
-
- if (PrintPlane(frame.video_frame_buffer()->DataY(), width, height,
- frame.video_frame_buffer()->StrideY(), file) < 0) {
- return -1;
- }
- if (PrintPlane(frame.video_frame_buffer()->DataU(),
- chroma_width, chroma_height,
- frame.video_frame_buffer()->StrideU(), file) < 0) {
- return -1;
- }
- if (PrintPlane(frame.video_frame_buffer()->DataV(),
- chroma_width, chroma_height,
- frame.video_frame_buffer()->StrideV(), file) < 0) {
- return -1;
+ for (int planeNum = 0; planeNum < kNumOfPlanes; ++planeNum) {
+ int width = (planeNum ? (frame.width() + 1) / 2 : frame.width());
+ int height = (planeNum ? (frame.height() + 1) / 2 : frame.height());
+ PlaneType plane_type = static_cast<PlaneType>(planeNum);
+ const uint8_t* plane_buffer = frame.buffer(plane_type);
+ for (int y = 0; y < height; y++) {
+ if (fwrite(plane_buffer, 1, width, file) !=
+ static_cast<unsigned int>(width)) {
+ return -1;
+ }
+ plane_buffer += frame.stride(plane_type);
+ }
}
return 0;
}
@@ -152,23 +133,22 @@
return -1;
}
- int width = input_frame.video_frame_buffer()->width();
- int height = input_frame.video_frame_buffer()->height();
- int chroma_width = (width + 1) / 2;
- int chroma_height = (height + 1) / 2;
+ int pos = 0;
+ uint8_t* buffer_ptr = buffer;
- libyuv::I420Copy(input_frame.video_frame_buffer()->DataY(),
- input_frame.video_frame_buffer()->StrideY(),
- input_frame.video_frame_buffer()->DataU(),
- input_frame.video_frame_buffer()->StrideU(),
- input_frame.video_frame_buffer()->DataV(),
- input_frame.video_frame_buffer()->StrideV(),
- buffer, width,
- buffer + width*height, chroma_width,
- buffer + width*height + chroma_width*chroma_height,
- chroma_width,
- width, height);
-
+ for (int plane = 0; plane < kNumOfPlanes; ++plane) {
+ int width = (plane ? (input_frame.width() + 1) / 2 :
+ input_frame.width());
+ int height = (plane ? (input_frame.height() + 1) / 2 :
+ input_frame.height());
+ const uint8_t* plane_ptr = input_frame.buffer(
+ static_cast<PlaneType>(plane));
+ for (int y = 0; y < height; y++) {
+ memcpy(&buffer_ptr[pos], plane_ptr, width);
+ pos += width;
+ plane_ptr += input_frame.stride(static_cast<PlaneType>(plane));
+ }
+ }
return static_cast<int>(length);
}
@@ -248,7 +228,6 @@
return libyuv::FOURCC_ANY;
}
-// TODO(nisse): Delete this wrapper, let callers use libyuv directly.
int ConvertToI420(VideoType src_video_type,
const uint8_t* src_frame,
int crop_x,
@@ -266,35 +245,33 @@
dst_width = dst_frame->height();
dst_height = dst_frame->width();
}
- return libyuv::ConvertToI420(
- src_frame, sample_size,
- dst_frame->video_frame_buffer()->MutableDataY(),
- dst_frame->video_frame_buffer()->StrideY(),
- dst_frame->video_frame_buffer()->MutableDataU(),
- dst_frame->video_frame_buffer()->StrideU(),
- dst_frame->video_frame_buffer()->MutableDataV(),
- dst_frame->video_frame_buffer()->StrideV(),
- crop_x, crop_y,
- src_width, src_height,
- dst_width, dst_height,
- ConvertRotationMode(rotation),
- ConvertVideoType(src_video_type));
+ return libyuv::ConvertToI420(src_frame, sample_size,
+ dst_frame->buffer(kYPlane),
+ dst_frame->stride(kYPlane),
+ dst_frame->buffer(kUPlane),
+ dst_frame->stride(kUPlane),
+ dst_frame->buffer(kVPlane),
+ dst_frame->stride(kVPlane),
+ crop_x, crop_y,
+ src_width, src_height,
+ dst_width, dst_height,
+ ConvertRotationMode(rotation),
+ ConvertVideoType(src_video_type));
}
int ConvertFromI420(const VideoFrame& src_frame,
VideoType dst_video_type,
int dst_sample_size,
uint8_t* dst_frame) {
- return libyuv::ConvertFromI420(
- src_frame.video_frame_buffer()->DataY(),
- src_frame.video_frame_buffer()->StrideY(),
- src_frame.video_frame_buffer()->DataU(),
- src_frame.video_frame_buffer()->StrideU(),
- src_frame.video_frame_buffer()->DataV(),
- src_frame.video_frame_buffer()->StrideV(),
- dst_frame, dst_sample_size,
- src_frame.width(), src_frame.height(),
- ConvertVideoType(dst_video_type));
+ return libyuv::ConvertFromI420(src_frame.buffer(kYPlane),
+ src_frame.stride(kYPlane),
+ src_frame.buffer(kUPlane),
+ src_frame.stride(kUPlane),
+ src_frame.buffer(kVPlane),
+ src_frame.stride(kVPlane),
+ dst_frame, dst_sample_size,
+ src_frame.width(), src_frame.height(),
+ ConvertVideoType(dst_video_type));
}
// TODO(mikhal): Create a designated VideoFrame for non I420.
@@ -303,16 +280,15 @@
int dst_sample_size,
uint8_t* dst_frame) {
// YV12 = Y, V, U
- return libyuv::ConvertFromI420(
- src_frame.video_frame_buffer()->DataY(),
- src_frame.video_frame_buffer()->StrideY(),
- src_frame.video_frame_buffer()->DataV(),
- src_frame.video_frame_buffer()->StrideV(),
- src_frame.video_frame_buffer()->DataU(),
- src_frame.video_frame_buffer()->StrideU(),
- dst_frame, dst_sample_size,
- src_frame.width(), src_frame.height(),
- ConvertVideoType(dst_video_type));
+ return libyuv::ConvertFromI420(src_frame.buffer(kYPlane),
+ src_frame.stride(kYPlane),
+ src_frame.buffer(kVPlane),
+ src_frame.stride(kVPlane),
+ src_frame.buffer(kUPlane),
+ src_frame.stride(kUPlane),
+ dst_frame, dst_sample_size,
+ src_frame.width(), src_frame.height(),
+ ConvertVideoType(dst_video_type));
}
// Compute PSNR for an I420 frame (all planes)
@@ -325,18 +301,18 @@
else if (ref_frame->width() < 0 || ref_frame->height() < 0)
return -1;
- double psnr = libyuv::I420Psnr(ref_frame->video_frame_buffer()->DataY(),
- ref_frame->video_frame_buffer()->StrideY(),
- ref_frame->video_frame_buffer()->DataU(),
- ref_frame->video_frame_buffer()->StrideU(),
- ref_frame->video_frame_buffer()->DataV(),
- ref_frame->video_frame_buffer()->StrideV(),
- test_frame->video_frame_buffer()->DataY(),
- test_frame->video_frame_buffer()->StrideY(),
- test_frame->video_frame_buffer()->DataU(),
- test_frame->video_frame_buffer()->StrideU(),
- test_frame->video_frame_buffer()->DataV(),
- test_frame->video_frame_buffer()->StrideV(),
+ double psnr = libyuv::I420Psnr(ref_frame->buffer(kYPlane),
+ ref_frame->stride(kYPlane),
+ ref_frame->buffer(kUPlane),
+ ref_frame->stride(kUPlane),
+ ref_frame->buffer(kVPlane),
+ ref_frame->stride(kVPlane),
+ test_frame->buffer(kYPlane),
+ test_frame->stride(kYPlane),
+ test_frame->buffer(kUPlane),
+ test_frame->stride(kUPlane),
+ test_frame->buffer(kVPlane),
+ test_frame->stride(kVPlane),
test_frame->width(), test_frame->height());
// LibYuv sets the max psnr value to 128, we restrict it here.
// In case of 0 mse in one frame, 128 can skew the results significantly.
@@ -353,18 +329,18 @@
else if (ref_frame->width() < 0 || ref_frame->height() < 0)
return -1;
- return libyuv::I420Ssim(ref_frame->video_frame_buffer()->DataY(),
- ref_frame->video_frame_buffer()->StrideY(),
- ref_frame->video_frame_buffer()->DataU(),
- ref_frame->video_frame_buffer()->StrideU(),
- ref_frame->video_frame_buffer()->DataV(),
- ref_frame->video_frame_buffer()->StrideV(),
- test_frame->video_frame_buffer()->DataY(),
- test_frame->video_frame_buffer()->StrideY(),
- test_frame->video_frame_buffer()->DataU(),
- test_frame->video_frame_buffer()->StrideU(),
- test_frame->video_frame_buffer()->DataV(),
- test_frame->video_frame_buffer()->StrideV(),
+ return libyuv::I420Ssim(ref_frame->buffer(kYPlane),
+ ref_frame->stride(kYPlane),
+ ref_frame->buffer(kUPlane),
+ ref_frame->stride(kUPlane),
+ ref_frame->buffer(kVPlane),
+ ref_frame->stride(kVPlane),
+ test_frame->buffer(kYPlane),
+ test_frame->stride(kYPlane),
+ test_frame->buffer(kUPlane),
+ test_frame->stride(kUPlane),
+ test_frame->buffer(kVPlane),
+ test_frame->stride(kVPlane),
test_frame->width(), test_frame->height());
}
} // namespace webrtc
diff --git a/common_video/video_frame.cc b/common_video/video_frame.cc
index cf6b9c8..fa23cc1 100644
--- a/common_video/video_frame.cc
+++ b/common_video/video_frame.cc
@@ -69,10 +69,8 @@
if (video_frame_buffer_ && video_frame_buffer_->IsMutable() &&
!video_frame_buffer_->native_handle() &&
width == video_frame_buffer_->width() &&
- height == video_frame_buffer_->height() &&
- stride_y == video_frame_buffer_->StrideY() &&
- stride_u == video_frame_buffer_->StrideU() &&
- stride_v == video_frame_buffer_->StrideV()) {
+ height == video_frame_buffer_->height() && stride_y == stride(kYPlane) &&
+ stride_u == stride(kUPlane) && stride_v == stride(kVPlane)) {
return;
}
@@ -95,9 +93,9 @@
const int expected_size_u = half_height * stride_u;
const int expected_size_v = half_height * stride_v;
CreateEmptyFrame(width, height, stride_y, stride_u, stride_v);
- memcpy(video_frame_buffer_->MutableDataY(), buffer_y, expected_size_y);
- memcpy(video_frame_buffer_->MutableDataU(), buffer_u, expected_size_u);
- memcpy(video_frame_buffer_->MutableDataV(), buffer_v, expected_size_v);
+ memcpy(buffer(kYPlane), buffer_y, expected_size_y);
+ memcpy(buffer(kUPlane), buffer_u, expected_size_u);
+ memcpy(buffer(kVPlane), buffer_v, expected_size_v);
rotation_ = rotation;
}
@@ -132,26 +130,22 @@
rotation_ = videoFrame.rotation_;
}
-// TODO(nisse): Delete. Besides test code, only one use, in
-// webrtcvideoengine2.cc:CreateBlackFrame.
+uint8_t* VideoFrame::buffer(PlaneType type) {
+ return video_frame_buffer_ ? video_frame_buffer_->MutableData(type)
+ : nullptr;
+}
+
+const uint8_t* VideoFrame::buffer(PlaneType type) const {
+ return video_frame_buffer_ ? video_frame_buffer_->data(type) : nullptr;
+}
+
int VideoFrame::allocated_size(PlaneType type) const {
const int plane_height = (type == kYPlane) ? height() : (height() + 1) / 2;
- int stride;
- switch (type) {
- case kYPlane:
- stride = video_frame_buffer_->StrideY();
- break;
- case kUPlane:
- stride = video_frame_buffer_->StrideU();
- break;
- case kVPlane:
- stride = video_frame_buffer_->StrideV();
- break;
- default:
- RTC_NOTREACHED();
- return 0;
- }
- return plane_height * stride;
+ return plane_height * stride(type);
+}
+
+int VideoFrame::stride(PlaneType type) const {
+ return video_frame_buffer_ ? video_frame_buffer_->stride(type) : 0;
}
int VideoFrame::width() const {
@@ -166,8 +160,7 @@
return !video_frame_buffer_;
}
-const rtc::scoped_refptr<VideoFrameBuffer>& VideoFrame::video_frame_buffer()
- const {
+rtc::scoped_refptr<VideoFrameBuffer> VideoFrame::video_frame_buffer() const {
return video_frame_buffer_;
}
diff --git a/media/base/videoframe.h b/media/base/videoframe.h
index 596706f..9e0fbfd 100644
--- a/media/base/videoframe.h
+++ b/media/base/videoframe.h
@@ -54,8 +54,6 @@
// Returns the underlying video frame buffer. This function is ok to call
// multiple times, but the returned object will refer to the same memory.
- // TODO(nisse): Change to return a const ref to a scoped_refptr, for
- // consistency with webrtc::VideoFrame.
virtual rtc::scoped_refptr<webrtc::VideoFrameBuffer> video_frame_buffer()
const = 0;
diff --git a/media/engine/webrtcvideoengine2.cc b/media/engine/webrtcvideoengine2.cc
index d2e6b2d..383f29a 100644
--- a/media/engine/webrtcvideoengine2.cc
+++ b/media/engine/webrtcvideoengine2.cc
@@ -1560,22 +1560,19 @@
DestroyVideoEncoder(&allocated_encoder_);
}
-static webrtc::VideoFrame CreateBlackFrame(int width,
- int height,
- int64_t render_time_ms_,
- webrtc::VideoRotation rotation) {
- webrtc::VideoFrame frame;
- frame.CreateEmptyFrame(width, height, width, (width + 1) / 2,
- (width + 1) / 2);
- memset(frame.video_frame_buffer()->MutableDataY(), 16,
- frame.allocated_size(webrtc::kYPlane));
- memset(frame.video_frame_buffer()->MutableDataU(), 128,
- frame.allocated_size(webrtc::kUPlane));
- memset(frame.video_frame_buffer()->MutableDataV(), 128,
- frame.allocated_size(webrtc::kVPlane));
- frame.set_rotation(rotation);
- frame.set_render_time_ms(render_time_ms_);
- return frame;
+static void CreateBlackFrame(webrtc::VideoFrame* video_frame,
+ int width,
+ int height,
+ webrtc::VideoRotation rotation) {
+ video_frame->CreateEmptyFrame(width, height, width, (width + 1) / 2,
+ (width + 1) / 2);
+ memset(video_frame->buffer(webrtc::kYPlane), 16,
+ video_frame->allocated_size(webrtc::kYPlane));
+ memset(video_frame->buffer(webrtc::kUPlane), 128,
+ video_frame->allocated_size(webrtc::kUPlane));
+ memset(video_frame->buffer(webrtc::kVPlane), 128,
+ video_frame->allocated_size(webrtc::kVPlane));
+ video_frame->set_rotation(rotation);
}
void WebRtcVideoChannel2::WebRtcVideoSendStream::OnFrame(
@@ -1633,17 +1630,19 @@
if (source == NULL) {
if (stream_ != NULL) {
LOG(LS_VERBOSE) << "Disabling capturer, sending black frame.";
+ webrtc::VideoFrame black_frame;
+
+ CreateBlackFrame(&black_frame, last_dimensions_.width,
+ last_dimensions_.height, last_rotation_);
+
// Force this black frame not to be dropped due to timestamp order
// check. As IncomingCapturedFrame will drop the frame if this frame's
// timestamp is less than or equal to last frame's timestamp, it is
// necessary to give this black frame a larger timestamp than the
// previous one.
last_frame_timestamp_ms_ += 1;
- stream_->Input()->IncomingCapturedFrame(
- CreateBlackFrame(last_dimensions_.width, last_dimensions_.height,
- last_frame_timestamp_ms_, last_rotation_));
-
-
+ black_frame.set_render_time_ms(last_frame_timestamp_ms_);
+ stream_->Input()->IncomingCapturedFrame(black_frame);
}
}
}
diff --git a/media/engine/webrtcvideoengine2_unittest.cc b/media/engine/webrtcvideoengine2_unittest.cc
index e89899e..0a04fc7 100644
--- a/media/engine/webrtcvideoengine2_unittest.cc
+++ b/media/engine/webrtcvideoengine2_unittest.cc
@@ -69,11 +69,11 @@
int height) {
video_frame->CreateEmptyFrame(
width, height, width, (width + 1) / 2, (width + 1) / 2);
- memset(video_frame->video_frame_buffer()->MutableDataY(), 16,
+ memset(video_frame->buffer(webrtc::kYPlane), 16,
video_frame->allocated_size(webrtc::kYPlane));
- memset(video_frame->video_frame_buffer()->MutableDataU(), 128,
+ memset(video_frame->buffer(webrtc::kUPlane), 128,
video_frame->allocated_size(webrtc::kUPlane));
- memset(video_frame->video_frame_buffer()->MutableDataV(), 128,
+ memset(video_frame->buffer(webrtc::kVPlane), 128,
video_frame->allocated_size(webrtc::kVPlane));
}
diff --git a/modules/video_capture/test/video_capture_unittest.cc b/modules/video_capture/test/video_capture_unittest.cc
index 7309f1b..e75ad03 100644
--- a/modules/video_capture/test/video_capture_unittest.cc
+++ b/modules/video_capture/test/video_capture_unittest.cc
@@ -23,7 +23,6 @@
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/sleep.h"
#include "webrtc/system_wrappers/include/tick_util.h"
-#include "webrtc/test/frame_utils.h"
#include "webrtc/video_frame.h"
using webrtc::CriticalSectionWrapper;
@@ -61,6 +60,32 @@
static const int kTestWidth = 352;
static const int kTestFramerate = 30;
+// Compares the content of two video frames.
+static bool CompareFrames(const webrtc::VideoFrame& frame1,
+ const webrtc::VideoFrame& frame2) {
+ bool result =
+ (frame1.stride(webrtc::kYPlane) == frame2.stride(webrtc::kYPlane)) &&
+ (frame1.stride(webrtc::kUPlane) == frame2.stride(webrtc::kUPlane)) &&
+ (frame1.stride(webrtc::kVPlane) == frame2.stride(webrtc::kVPlane)) &&
+ (frame1.width() == frame2.width()) &&
+ (frame1.height() == frame2.height());
+
+ if (!result)
+ return false;
+ for (int plane = 0; plane < webrtc::kNumOfPlanes; plane ++) {
+ webrtc::PlaneType plane_type = static_cast<webrtc::PlaneType>(plane);
+ int allocated_size1 = frame1.allocated_size(plane_type);
+ int allocated_size2 = frame2.allocated_size(plane_type);
+ if (allocated_size1 != allocated_size2)
+ return false;
+ const uint8_t* plane_buffer1 = frame1.buffer(plane_type);
+ const uint8_t* plane_buffer2 = frame2.buffer(plane_type);
+ if (memcmp(plane_buffer1, plane_buffer2, allocated_size1))
+ return false;
+ }
+ return true;
+}
+
class TestVideoCaptureCallback : public VideoCaptureDataCallback {
public:
TestVideoCaptureCallback()
@@ -107,7 +132,7 @@
incoming_frames_++;
last_render_time_ms_ = videoFrame.render_time_ms();
- last_frame_ = videoFrame.video_frame_buffer();
+ last_frame_.CopyFrame(videoFrame);
}
virtual void OnCaptureDelayChanged(const int32_t id,
@@ -143,8 +168,7 @@
bool CompareLastFrame(const webrtc::VideoFrame& frame) {
CriticalSectionScoped cs(capture_cs_.get());
- return webrtc::test::FrameBufsEqual(last_frame_,
- frame.video_frame_buffer());
+ return CompareFrames(last_frame_, frame);
}
void SetExpectedCaptureRotation(webrtc::VideoRotation rotation) {
@@ -159,7 +183,7 @@
int64_t last_render_time_ms_;
int incoming_frames_;
int timing_warnings_;
- rtc::scoped_refptr<webrtc::VideoFrameBuffer> last_frame_;
+ webrtc::VideoFrame last_frame_;
webrtc::VideoRotation rotate_frame_;
};
@@ -424,11 +448,10 @@
test_frame_.CreateEmptyFrame(kTestWidth, kTestHeight, kTestWidth,
((kTestWidth + 1) / 2), (kTestWidth + 1) / 2);
SleepMs(1); // Wait 1ms so that two tests can't have the same timestamp.
- memset(test_frame_.video_frame_buffer()->MutableDataY(), 127,
- kTestWidth * kTestHeight);
- memset(test_frame_.video_frame_buffer()->MutableDataU(), 127,
+ memset(test_frame_.buffer(webrtc::kYPlane), 127, kTestWidth * kTestHeight);
+ memset(test_frame_.buffer(webrtc::kUPlane), 127,
((kTestWidth + 1) / 2) * ((kTestHeight + 1) / 2));
- memset(test_frame_.video_frame_buffer()->MutableDataV(), 127,
+ memset(test_frame_.buffer(webrtc::kVPlane), 127,
((kTestWidth + 1) / 2) * ((kTestHeight + 1) / 2));
capture_module_->RegisterCaptureDataCallback(capture_callback_);
diff --git a/modules/video_capture/video_capture.gypi b/modules/video_capture/video_capture.gypi
index a2b2f58..c80f2bf 100644
--- a/modules/video_capture/video_capture.gypi
+++ b/modules/video_capture/video_capture.gypi
@@ -172,7 +172,6 @@
'video_capture_module_internal_impl',
'webrtc_utility',
'<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
- '<(webrtc_root)/test/test.gyp:video_test_common',
'<(DEPTH)/testing/gtest.gyp:gtest',
],
'sources': [
diff --git a/modules/video_coding/codecs/h264/h264_decoder_impl.cc b/modules/video_coding/codecs/h264/h264_decoder_impl.cc
index f560a37..e98666d 100644
--- a/modules/video_coding/codecs/h264/h264_decoder_impl.cc
+++ b/modules/video_coding/codecs/h264/h264_decoder_impl.cc
@@ -129,12 +129,10 @@
video_frame->set_video_frame_buffer(
decoder->pool_.CreateBuffer(width, height));
// DCHECK that we have a continuous buffer as is required.
- RTC_DCHECK_EQ(video_frame->video_frame_buffer()->DataU(),
- video_frame->video_frame_buffer()->DataY() +
- video_frame->allocated_size(kYPlane));
- RTC_DCHECK_EQ(video_frame->video_frame_buffer()->DataV(),
- video_frame->video_frame_buffer()->DataU() +
- video_frame->allocated_size(kUPlane));
+ RTC_DCHECK_EQ(video_frame->buffer(kUPlane),
+ video_frame->buffer(kYPlane) + video_frame->allocated_size(kYPlane));
+ RTC_DCHECK_EQ(video_frame->buffer(kVPlane),
+ video_frame->buffer(kUPlane) + video_frame->allocated_size(kUPlane));
int total_size = video_frame->allocated_size(kYPlane) +
video_frame->allocated_size(kUPlane) +
video_frame->allocated_size(kVPlane);
@@ -143,18 +141,12 @@
av_frame->reordered_opaque = context->reordered_opaque;
// Set |av_frame| members as required by FFmpeg.
- av_frame->data[kYPlaneIndex] =
- video_frame->video_frame_buffer()->MutableDataY();
- av_frame->linesize[kYPlaneIndex] =
- video_frame->video_frame_buffer()->StrideY();
- av_frame->data[kUPlaneIndex] =
- video_frame->video_frame_buffer()->MutableDataU();
- av_frame->linesize[kUPlaneIndex] =
- video_frame->video_frame_buffer()->StrideU();
- av_frame->data[kVPlaneIndex] =
- video_frame->video_frame_buffer()->MutableDataV();
- av_frame->linesize[kVPlaneIndex] =
- video_frame->video_frame_buffer()->StrideV();
+ av_frame->data[kYPlaneIndex] = video_frame->buffer(kYPlane);
+ av_frame->linesize[kYPlaneIndex] = video_frame->stride(kYPlane);
+ av_frame->data[kUPlaneIndex] = video_frame->buffer(kUPlane);
+ av_frame->linesize[kUPlaneIndex] = video_frame->stride(kUPlane);
+ av_frame->data[kVPlaneIndex] = video_frame->buffer(kVPlane);
+ av_frame->linesize[kVPlaneIndex] = video_frame->stride(kVPlane);
RTC_DCHECK_EQ(av_frame->extended_data, av_frame->data);
av_frame->buf[0] = av_buffer_create(av_frame->data[kYPlaneIndex],
@@ -347,12 +339,9 @@
VideoFrame* video_frame = static_cast<VideoFrame*>(
av_buffer_get_opaque(av_frame_->buf[0]));
RTC_DCHECK(video_frame);
- RTC_CHECK_EQ(av_frame_->data[kYPlane],
- video_frame->video_frame_buffer()->DataY());
- RTC_CHECK_EQ(av_frame_->data[kUPlane],
- video_frame->video_frame_buffer()->DataU());
- RTC_CHECK_EQ(av_frame_->data[kVPlane],
- video_frame->video_frame_buffer()->DataV());
+ RTC_CHECK_EQ(av_frame_->data[kYPlane], video_frame->buffer(kYPlane));
+ RTC_CHECK_EQ(av_frame_->data[kUPlane], video_frame->buffer(kUPlane));
+ RTC_CHECK_EQ(av_frame_->data[kVPlane], video_frame->buffer(kVPlane));
video_frame->set_timestamp(input_image._timeStamp);
// The decoded image may be larger than what is supposed to be visible, see
@@ -363,9 +352,9 @@
video_frame->set_video_frame_buffer(
new rtc::RefCountedObject<WrappedI420Buffer>(
av_frame_->width, av_frame_->height,
- buf->DataY(), buf->StrideY(),
- buf->DataU(), buf->StrideU(),
- buf->DataV(), buf->StrideV(),
+ buf->data(kYPlane), buf->stride(kYPlane),
+ buf->data(kUPlane), buf->stride(kUPlane),
+ buf->data(kVPlane), buf->stride(kVPlane),
rtc::KeepRefUntilDone(buf)));
}
diff --git a/modules/video_coding/codecs/h264/h264_encoder_impl.cc b/modules/video_coding/codecs/h264/h264_encoder_impl.cc
index 4a08b88..18eccb2 100644
--- a/modules/video_coding/codecs/h264/h264_encoder_impl.cc
+++ b/modules/video_coding/codecs/h264/h264_encoder_impl.cc
@@ -368,12 +368,12 @@
picture.iPicHeight = frame.height();
picture.iColorFormat = EVideoFormatType::videoFormatI420;
picture.uiTimeStamp = frame.ntp_time_ms();
- picture.iStride[0] = frame.video_frame_buffer()->StrideY();
- picture.iStride[1] = frame.video_frame_buffer()->StrideU();
- picture.iStride[2] = frame.video_frame_buffer()->StrideV();
- picture.pData[0] = const_cast<uint8_t*>(frame.video_frame_buffer()->DataY());
- picture.pData[1] = const_cast<uint8_t*>(frame.video_frame_buffer()->DataU());
- picture.pData[2] = const_cast<uint8_t*>(frame.video_frame_buffer()->DataV());
+ picture.iStride[0] = frame.stride(kYPlane);
+ picture.iStride[1] = frame.stride(kUPlane);
+ picture.iStride[2] = frame.stride(kVPlane);
+ picture.pData[0] = const_cast<uint8_t*>(frame.buffer(kYPlane));
+ picture.pData[1] = const_cast<uint8_t*>(frame.buffer(kUPlane));
+ picture.pData[2] = const_cast<uint8_t*>(frame.buffer(kVPlane));
// EncodeFrame output.
SFrameBSInfo info;
diff --git a/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc b/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc
index 5f6a231..7d04f23 100644
--- a/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc
+++ b/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc
@@ -168,14 +168,10 @@
int dst_stride_uv = CVPixelBufferGetBytesPerRowOfPlane(pixel_buffer, 1);
// Convert I420 to NV12.
int ret = libyuv::I420ToNV12(
- frame.video_frame_buffer()->DataY(),
- frame.video_frame_buffer()->StrideY(),
- frame.video_frame_buffer()->DataU(),
- frame.video_frame_buffer()->StrideU(),
- frame.video_frame_buffer()->DataV(),
- frame.video_frame_buffer()->StrideV(),
- dst_y, dst_stride_y, dst_uv, dst_stride_uv,
- frame.width(), frame.height());
+ frame.buffer(webrtc::kYPlane), frame.stride(webrtc::kYPlane),
+ frame.buffer(webrtc::kUPlane), frame.stride(webrtc::kUPlane),
+ frame.buffer(webrtc::kVPlane), frame.stride(webrtc::kVPlane), dst_y,
+ dst_stride_y, dst_uv, dst_stride_uv, frame.width(), frame.height());
CVPixelBufferUnlockBaseAddress(pixel_buffer, 0);
if (ret) {
LOG(LS_ERROR) << "Error converting I420 VideoFrame to NV12 :" << ret;
diff --git a/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc b/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
index be55133..cbd4ec1 100644
--- a/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
+++ b/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
@@ -301,21 +301,14 @@
// Aligning stride values based on width.
dst_frame.CreateEmptyFrame(dst_width, dst_height, dst_width,
(dst_width + 1) / 2, (dst_width + 1) / 2);
- libyuv::I420Scale(input_image.video_frame_buffer()->DataY(),
- input_image.video_frame_buffer()->StrideY(),
- input_image.video_frame_buffer()->DataU(),
- input_image.video_frame_buffer()->StrideU(),
- input_image.video_frame_buffer()->DataV(),
- input_image.video_frame_buffer()->StrideV(),
- src_width, src_height,
- dst_frame.video_frame_buffer()->MutableDataY(),
- dst_frame.video_frame_buffer()->StrideY(),
- dst_frame.video_frame_buffer()->MutableDataU(),
- dst_frame.video_frame_buffer()->StrideU(),
- dst_frame.video_frame_buffer()->MutableDataV(),
- dst_frame.video_frame_buffer()->StrideV(),
- dst_width, dst_height,
- libyuv::kFilterBilinear);
+ libyuv::I420Scale(
+ input_image.buffer(kYPlane), input_image.stride(kYPlane),
+ input_image.buffer(kUPlane), input_image.stride(kUPlane),
+ input_image.buffer(kVPlane), input_image.stride(kVPlane), src_width,
+ src_height, dst_frame.buffer(kYPlane), dst_frame.stride(kYPlane),
+ dst_frame.buffer(kUPlane), dst_frame.stride(kUPlane),
+ dst_frame.buffer(kVPlane), dst_frame.stride(kVPlane), dst_width,
+ dst_height, libyuv::kFilterBilinear);
dst_frame.set_timestamp(input_image.timestamp());
dst_frame.set_render_time_ms(input_image.render_time_ms());
streaminfos_[stream_idx].encoder->Encode(dst_frame, codec_specific_info,
diff --git a/modules/video_coding/codecs/vp8/simulcast_unittest.h b/modules/video_coding/codecs/vp8/simulcast_unittest.h
index a840a61..2b2aa5d 100644
--- a/modules/video_coding/codecs/vp8/simulcast_unittest.h
+++ b/modules/video_coding/codecs/vp8/simulcast_unittest.h
@@ -119,13 +119,13 @@
Vp8TestDecodedImageCallback() : decoded_frames_(0) {}
int32_t Decoded(VideoFrame& decoded_image) override {
for (int i = 0; i < decoded_image.width(); ++i) {
- EXPECT_NEAR(kColorY, decoded_image.video_frame_buffer()->DataY()[i], 1);
+ EXPECT_NEAR(kColorY, decoded_image.buffer(kYPlane)[i], 1);
}
// TODO(mikhal): Verify the difference between U,V and the original.
for (int i = 0; i < ((decoded_image.width() + 1) / 2); ++i) {
- EXPECT_NEAR(kColorU, decoded_image.video_frame_buffer()->DataU()[i], 4);
- EXPECT_NEAR(kColorV, decoded_image.video_frame_buffer()->DataV()[i], 4);
+ EXPECT_NEAR(kColorU, decoded_image.buffer(kUPlane)[i], 4);
+ EXPECT_NEAR(kColorV, decoded_image.buffer(kVPlane)[i], 4);
}
decoded_frames_++;
return 0;
@@ -222,40 +222,26 @@
TestVp8Simulcast(VP8Encoder* encoder, VP8Decoder* decoder)
: encoder_(encoder), decoder_(decoder) {}
- static void SetPlane(uint8_t* data,
- uint8_t value,
- int width,
- int height,
- int stride) {
- for (int i = 0; i < height; i++, data += stride) {
+ // Creates an VideoFrame from |plane_colors|.
+ static void CreateImage(VideoFrame* frame, int plane_colors[kNumOfPlanes]) {
+ for (int plane_num = 0; plane_num < kNumOfPlanes; ++plane_num) {
+ int width =
+ (plane_num != kYPlane ? (frame->width() + 1) / 2 : frame->width());
+ int height =
+ (plane_num != kYPlane ? (frame->height() + 1) / 2 : frame->height());
+ PlaneType plane_type = static_cast<PlaneType>(plane_num);
+ uint8_t* data = frame->buffer(plane_type);
// Setting allocated area to zero - setting only image size to
// requested values - will make it easier to distinguish between image
// size and frame size (accounting for stride).
- memset(data, value, width);
- memset(data + width, 0, stride - width);
+ memset(frame->buffer(plane_type), 0, frame->allocated_size(plane_type));
+ for (int i = 0; i < height; i++) {
+ memset(data, plane_colors[plane_num], width);
+ data += frame->stride(plane_type);
+ }
}
}
- // Fills in an VideoFrameBuffer from |plane_colors|.
- static void CreateImage(const rtc::scoped_refptr<VideoFrameBuffer>& buffer,
- int plane_colors[kNumOfPlanes]) {
- int width = buffer->width();
- int height = buffer->height();
- int chroma_width = (width + 1) / 2;
- int chroma_height = (height + 1) / 2;
-
- SetPlane(buffer->MutableDataY(), plane_colors[0],
- width, height, buffer->StrideY());
-
- SetPlane(buffer->MutableDataU(), plane_colors[1],
- chroma_width, chroma_height,
- buffer->StrideU());
-
- SetPlane(buffer->MutableDataV(), plane_colors[2],
- chroma_width, chroma_height,
- buffer->StrideV());
- }
-
static void DefaultSettings(VideoCodec* settings,
const int* temporal_layer_profile) {
assert(settings);
@@ -319,11 +305,11 @@
int half_width = (kDefaultWidth + 1) / 2;
input_frame_.CreateEmptyFrame(kDefaultWidth, kDefaultHeight, kDefaultWidth,
half_width, half_width);
- memset(input_frame_.video_frame_buffer()->MutableDataY(), 0,
+ memset(input_frame_.buffer(kYPlane), 0,
input_frame_.allocated_size(kYPlane));
- memset(input_frame_.video_frame_buffer()->MutableDataU(), 0,
+ memset(input_frame_.buffer(kUPlane), 0,
input_frame_.allocated_size(kUPlane));
- memset(input_frame_.video_frame_buffer()->MutableDataV(), 0,
+ memset(input_frame_.buffer(kVPlane), 0,
input_frame_.allocated_size(kVPlane));
}
@@ -569,11 +555,11 @@
int half_width = (settings_.width + 1) / 2;
input_frame_.CreateEmptyFrame(settings_.width, settings_.height,
settings_.width, half_width, half_width);
- memset(input_frame_.video_frame_buffer()->MutableDataY(), 0,
+ memset(input_frame_.buffer(kYPlane), 0,
input_frame_.allocated_size(kYPlane));
- memset(input_frame_.video_frame_buffer()->MutableDataU(), 0,
+ memset(input_frame_.buffer(kUPlane), 0,
input_frame_.allocated_size(kUPlane));
- memset(input_frame_.video_frame_buffer()->MutableDataV(), 0,
+ memset(input_frame_.buffer(kVPlane), 0,
input_frame_.allocated_size(kVPlane));
// The for loop above did not set the bitrate of the highest layer.
@@ -610,11 +596,11 @@
half_width = (settings_.width + 1) / 2;
input_frame_.CreateEmptyFrame(settings_.width, settings_.height,
settings_.width, half_width, half_width);
- memset(input_frame_.video_frame_buffer()->MutableDataY(), 0,
+ memset(input_frame_.buffer(kYPlane), 0,
input_frame_.allocated_size(kYPlane));
- memset(input_frame_.video_frame_buffer()->MutableDataU(), 0,
+ memset(input_frame_.buffer(kUPlane), 0,
input_frame_.allocated_size(kUPlane));
- memset(input_frame_.video_frame_buffer()->MutableDataV(), 0,
+ memset(input_frame_.buffer(kVPlane), 0,
input_frame_.allocated_size(kVPlane));
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
}
@@ -705,7 +691,7 @@
plane_offset[kYPlane] = kColorY;
plane_offset[kUPlane] = kColorU;
plane_offset[kVPlane] = kColorV;
- CreateImage(input_frame_.video_frame_buffer(), plane_offset);
+ CreateImage(&input_frame_, plane_offset);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
int picture_id = -1;
@@ -721,7 +707,7 @@
plane_offset[kYPlane] += 1;
plane_offset[kUPlane] += 1;
plane_offset[kVPlane] += 1;
- CreateImage(input_frame_.video_frame_buffer(), plane_offset);
+ CreateImage(&input_frame_, plane_offset);
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
@@ -729,7 +715,7 @@
plane_offset[kYPlane] += 1;
plane_offset[kUPlane] += 1;
plane_offset[kVPlane] += 1;
- CreateImage(input_frame_.video_frame_buffer(), plane_offset);
+ CreateImage(&input_frame_, plane_offset);
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
@@ -738,7 +724,7 @@
plane_offset[kYPlane] += 1;
plane_offset[kUPlane] += 1;
plane_offset[kVPlane] += 1;
- CreateImage(input_frame_.video_frame_buffer(), plane_offset);
+ CreateImage(&input_frame_, plane_offset);
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
@@ -753,7 +739,7 @@
plane_offset[kYPlane] = kColorY;
plane_offset[kUPlane] = kColorU;
plane_offset[kVPlane] = kColorV;
- CreateImage(input_frame_.video_frame_buffer(), plane_offset);
+ CreateImage(&input_frame_, plane_offset);
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, &codec_specific, NULL));
@@ -912,7 +898,7 @@
plane_offset[kYPlane] = kColorY;
plane_offset[kUPlane] = kColorU;
plane_offset[kVPlane] = kColorV;
- CreateImage(input_frame_.video_frame_buffer(), plane_offset);
+ CreateImage(&input_frame_, plane_offset);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
@@ -920,7 +906,7 @@
plane_offset[kYPlane] += 1;
plane_offset[kUPlane] += 1;
plane_offset[kVPlane] += 1;
- CreateImage(input_frame_.video_frame_buffer(), plane_offset);
+ CreateImage(&input_frame_, plane_offset);
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
diff --git a/modules/video_coding/codecs/vp8/vp8_impl.cc b/modules/video_coding/codecs/vp8/vp8_impl.cc
index 546fd73..3eb47ee 100644
--- a/modules/video_coding/codecs/vp8/vp8_impl.cc
+++ b/modules/video_coding/codecs/vp8/vp8_impl.cc
@@ -752,18 +752,15 @@
// Image in vpx_image_t format.
// Input image is const. VP8's raw image is not defined as const.
raw_images_[0].planes[VPX_PLANE_Y] =
- const_cast<uint8_t*>(input_image.video_frame_buffer()->DataY());
+ const_cast<uint8_t*>(input_image.buffer(kYPlane));
raw_images_[0].planes[VPX_PLANE_U] =
- const_cast<uint8_t*>(input_image.video_frame_buffer()->DataU());
+ const_cast<uint8_t*>(input_image.buffer(kUPlane));
raw_images_[0].planes[VPX_PLANE_V] =
- const_cast<uint8_t*>(input_image.video_frame_buffer()->DataV());
+ const_cast<uint8_t*>(input_image.buffer(kVPlane));
- raw_images_[0].stride[VPX_PLANE_Y] =
- input_image.video_frame_buffer()->StrideY();
- raw_images_[0].stride[VPX_PLANE_U] =
- input_image.video_frame_buffer()->StrideU();
- raw_images_[0].stride[VPX_PLANE_V] =
- input_image.video_frame_buffer()->StrideV();
+ raw_images_[0].stride[VPX_PLANE_Y] = input_image.stride(kYPlane);
+ raw_images_[0].stride[VPX_PLANE_U] = input_image.stride(kUPlane);
+ raw_images_[0].stride[VPX_PLANE_V] = input_image.stride(kVPlane);
for (size_t i = 1; i < encoders_.size(); ++i) {
// Scale the image down a number of times by downsampling factor
@@ -1360,12 +1357,9 @@
libyuv::I420Copy(img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V],
- decoded_image.video_frame_buffer()->MutableDataY(),
- decoded_image.video_frame_buffer()->StrideY(),
- decoded_image.video_frame_buffer()->MutableDataU(),
- decoded_image.video_frame_buffer()->StrideU(),
- decoded_image.video_frame_buffer()->MutableDataV(),
- decoded_image.video_frame_buffer()->StrideV(),
+ decoded_image.buffer(kYPlane), decoded_image.stride(kYPlane),
+ decoded_image.buffer(kUPlane), decoded_image.stride(kUPlane),
+ decoded_image.buffer(kVPlane), decoded_image.stride(kVPlane),
img->d_w, img->d_h);
decoded_image.set_ntp_time_ms(ntp_time_ms);
int ret = decode_complete_callback_->Decoded(decoded_image);
diff --git a/modules/video_coding/codecs/vp9/vp9_impl.cc b/modules/video_coding/codecs/vp9/vp9_impl.cc
index 0fd556c..9d06b6b 100644
--- a/modules/video_coding/codecs/vp9/vp9_impl.cc
+++ b/modules/video_coding/codecs/vp9/vp9_impl.cc
@@ -500,15 +500,12 @@
// Image in vpx_image_t format.
// Input image is const. VPX's raw image is not defined as const.
- raw_->planes[VPX_PLANE_Y] =
- const_cast<uint8_t*>(input_image.video_frame_buffer()->DataY());
- raw_->planes[VPX_PLANE_U] =
- const_cast<uint8_t*>(input_image.video_frame_buffer()->DataU());
- raw_->planes[VPX_PLANE_V] =
- const_cast<uint8_t*>(input_image.video_frame_buffer()->DataV());
- raw_->stride[VPX_PLANE_Y] = input_image.video_frame_buffer()->StrideY();
- raw_->stride[VPX_PLANE_U] = input_image.video_frame_buffer()->StrideU();
- raw_->stride[VPX_PLANE_V] = input_image.video_frame_buffer()->StrideV();
+ raw_->planes[VPX_PLANE_Y] = const_cast<uint8_t*>(input_image.buffer(kYPlane));
+ raw_->planes[VPX_PLANE_U] = const_cast<uint8_t*>(input_image.buffer(kUPlane));
+ raw_->planes[VPX_PLANE_V] = const_cast<uint8_t*>(input_image.buffer(kVPlane));
+ raw_->stride[VPX_PLANE_Y] = input_image.stride(kYPlane);
+ raw_->stride[VPX_PLANE_U] = input_image.stride(kUPlane);
+ raw_->stride[VPX_PLANE_V] = input_image.stride(kVPlane);
vpx_enc_frame_flags_t flags = 0;
bool send_keyframe = (frame_type == kVideoFrameKey);
diff --git a/modules/video_processing/content_analysis.cc b/modules/video_processing/content_analysis.cc
index e8b2af4..54c04da 100644
--- a/modules/video_processing/content_analysis.cc
+++ b/modules/video_processing/content_analysis.cc
@@ -60,7 +60,7 @@
return NULL;
}
// Only interested in the Y plane.
- orig_frame_ = inputFrame.video_frame_buffer()->DataY();
+ orig_frame_ = inputFrame.buffer(kYPlane);
// Compute spatial metrics: 3 spatial prediction errors.
(this->*ComputeSpatialMetrics)();
diff --git a/modules/video_processing/test/video_processing_unittest.cc b/modules/video_processing/test/video_processing_unittest.cc
index 37b2c79..f2667e7 100644
--- a/modules/video_processing/test/video_processing_unittest.cc
+++ b/modules/video_processing/test/video_processing_unittest.cc
@@ -17,7 +17,6 @@
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "webrtc/system_wrappers/include/tick_util.h"
-#include "webrtc/test/frame_utils.h"
#include "webrtc/test/testsupport/fileutils.h"
namespace webrtc {
@@ -72,12 +71,9 @@
video_frame_.CreateEmptyFrame(width_, height_, width_,
half_width_, half_width_);
// Clear video frame so DrMemory/Valgrind will allow reads of the buffer.
- memset(video_frame_.video_frame_buffer()->MutableDataY(), 0,
- video_frame_.allocated_size(kYPlane));
- memset(video_frame_.video_frame_buffer()->MutableDataU(), 0,
- video_frame_.allocated_size(kUPlane));
- memset(video_frame_.video_frame_buffer()->MutableDataV(), 0,
- video_frame_.allocated_size(kVPlane));
+ memset(video_frame_.buffer(kYPlane), 0, video_frame_.allocated_size(kYPlane));
+ memset(video_frame_.buffer(kUPlane), 0, video_frame_.allocated_size(kUPlane));
+ memset(video_frame_.buffer(kVPlane), 0, video_frame_.allocated_size(kVPlane));
const std::string video_file =
webrtc::test::ResourcePath("foreman_cif", "yuv");
source_file_ = fopen(video_file.c_str(), "rb");
diff --git a/modules/video_processing/video_denoiser.cc b/modules/video_processing/video_denoiser.cc
index 61a11fe..c1cab81 100644
--- a/modules/video_processing/video_denoiser.cc
+++ b/modules/video_processing/video_denoiser.cc
@@ -81,19 +81,17 @@
height_ = frame.height();
mb_cols_ = width_ >> 4;
mb_rows_ = height_ >> 4;
- stride_y_ = frame.video_frame_buffer()->StrideY();
- stride_u_ = frame.video_frame_buffer()->StrideU();
- stride_v_ = frame.video_frame_buffer()->StrideV();
+ stride_y_ = frame.stride(kYPlane);
+ stride_u_ = frame.stride(kUPlane);
+ stride_v_ = frame.stride(kVPlane);
// Allocate an empty buffer for denoised_frame_prev.
denoised_frame_prev->CreateEmptyFrame(width_, height_, stride_y_, stride_u_,
stride_v_);
// Allocate and initialize denoised_frame with key frame.
- denoised_frame->CreateFrame(
- frame.video_frame_buffer()->DataY(),
- frame.video_frame_buffer()->DataU(),
- frame.video_frame_buffer()->DataV(),
- width_, height_, stride_y_, stride_u_, stride_v_, kVideoRotation_0);
+ denoised_frame->CreateFrame(frame.buffer(kYPlane), frame.buffer(kUPlane),
+ frame.buffer(kVPlane), width_, height_, stride_y_,
+ stride_u_, stride_v_, kVideoRotation_0);
// Set time parameters to the output frame.
denoised_frame->set_timestamp(frame.timestamp());
denoised_frame->set_render_time_ms(frame.render_time_ms());
@@ -238,14 +236,13 @@
}
// Set buffer pointers.
- const uint8_t* y_src = frame.video_frame_buffer()->DataY();
- const uint8_t* u_src = frame.video_frame_buffer()->DataU();
- const uint8_t* v_src = frame.video_frame_buffer()->DataV();
- uint8_t* y_dst = denoised_frame->video_frame_buffer()->MutableDataY();
- uint8_t* u_dst = denoised_frame->video_frame_buffer()->MutableDataU();
- uint8_t* v_dst = denoised_frame->video_frame_buffer()->MutableDataV();
- uint8_t* y_dst_prev =
- denoised_frame_prev->video_frame_buffer()->MutableDataY();
+ const uint8_t* y_src = frame.buffer(kYPlane);
+ const uint8_t* u_src = frame.buffer(kUPlane);
+ const uint8_t* v_src = frame.buffer(kVPlane);
+ uint8_t* y_dst = denoised_frame->buffer(kYPlane);
+ uint8_t* u_dst = denoised_frame->buffer(kUPlane);
+ uint8_t* v_dst = denoised_frame->buffer(kVPlane);
+ uint8_t* y_dst_prev = denoised_frame_prev->buffer(kYPlane);
memset(x_density_.get(), 0, mb_cols_);
memset(y_density_.get(), 0, mb_rows_);
memset(moving_object_.get(), 1, mb_cols_ * mb_rows_);
diff --git a/test/frame_generator.cc b/test/frame_generator.cc
index efc072f..3287aba 100644
--- a/test/frame_generator.cc
+++ b/test/frame_generator.cc
@@ -39,12 +39,9 @@
uint8_t u = fabs(sin(angle_)) * 0xFF;
uint8_t v = fabs(cos(angle_)) * 0xFF;
- memset(frame_.video_frame_buffer()->MutableDataY(), 0x80,
- frame_.allocated_size(kYPlane));
- memset(frame_.video_frame_buffer()->MutableDataU(), u,
- frame_.allocated_size(kUPlane));
- memset(frame_.video_frame_buffer()->MutableDataV(), v,
- frame_.allocated_size(kVPlane));
+ memset(frame_.buffer(kYPlane), 0x80, frame_.allocated_size(kYPlane));
+ memset(frame_.buffer(kUPlane), u, frame_.allocated_size(kUPlane));
+ memset(frame_.buffer(kVPlane), v, frame_.allocated_size(kVPlane));
return &frame_;
}
@@ -203,24 +200,24 @@
int pixels_scrolled_y =
static_cast<int>(scroll_margin_y * scroll_factor + 0.5);
- int offset_y = (current_source_frame_->video_frame_buffer()->StrideY() *
+ int offset_y = (current_source_frame_->stride(PlaneType::kYPlane) *
pixels_scrolled_y) +
pixels_scrolled_x;
- int offset_u = (current_source_frame_->video_frame_buffer()->StrideU() *
+ int offset_u = (current_source_frame_->stride(PlaneType::kUPlane) *
(pixels_scrolled_y / 2)) +
(pixels_scrolled_x / 2);
- int offset_v = (current_source_frame_->video_frame_buffer()->StrideV() *
+ int offset_v = (current_source_frame_->stride(PlaneType::kVPlane) *
(pixels_scrolled_y / 2)) +
(pixels_scrolled_x / 2);
current_frame_.CreateFrame(
- ¤t_source_frame_->video_frame_buffer()->DataY()[offset_y],
- ¤t_source_frame_->video_frame_buffer()->DataU()[offset_u],
- ¤t_source_frame_->video_frame_buffer()->DataV()[offset_v],
+ ¤t_source_frame_->buffer(PlaneType::kYPlane)[offset_y],
+ ¤t_source_frame_->buffer(PlaneType::kUPlane)[offset_u],
+ ¤t_source_frame_->buffer(PlaneType::kVPlane)[offset_v],
kTargetWidth, kTargetHeight,
- current_source_frame_->video_frame_buffer()->StrideY(),
- current_source_frame_->video_frame_buffer()->StrideU(),
- current_source_frame_->video_frame_buffer()->StrideV(),
+ current_source_frame_->stride(PlaneType::kYPlane),
+ current_source_frame_->stride(PlaneType::kUPlane),
+ current_source_frame_->stride(PlaneType::kVPlane),
kVideoRotation_0);
}
diff --git a/test/frame_generator_unittest.cc b/test/frame_generator_unittest.cc
index 3d897f8..6376e2c 100644
--- a/test/frame_generator_unittest.cc
+++ b/test/frame_generator_unittest.cc
@@ -58,17 +58,17 @@
void CheckFrameAndMutate(VideoFrame* frame, uint8_t y, uint8_t u, uint8_t v) {
// Check that frame is valid, has the correct color and timestamp are clean.
ASSERT_NE(nullptr, frame);
- const uint8_t* buffer;
+ uint8_t* buffer;
ASSERT_EQ(y_size, frame->allocated_size(PlaneType::kYPlane));
- buffer = frame->video_frame_buffer()->DataY();
+ buffer = frame->buffer(PlaneType::kYPlane);
for (int i = 0; i < y_size; ++i)
ASSERT_EQ(y, buffer[i]);
ASSERT_EQ(uv_size, frame->allocated_size(PlaneType::kUPlane));
- buffer = frame->video_frame_buffer()->DataU();
+ buffer = frame->buffer(PlaneType::kUPlane);
for (int i = 0; i < uv_size; ++i)
ASSERT_EQ(u, buffer[i]);
ASSERT_EQ(uv_size, frame->allocated_size(PlaneType::kVPlane));
- buffer = frame->video_frame_buffer()->DataV();
+ buffer = frame->buffer(PlaneType::kVPlane);
for (int i = 0; i < uv_size; ++i)
ASSERT_EQ(v, buffer[i]);
EXPECT_EQ(0, frame->ntp_time_ms());
diff --git a/video/video_capture_input_unittest.cc b/video/video_capture_input_unittest.cc
index 2da722b..8ec89c7 100644
--- a/video/video_capture_input_unittest.cc
+++ b/video/video_capture_input_unittest.cc
@@ -54,7 +54,8 @@
EXPECT_TRUE(input_->GetVideoFrame(&frame));
ASSERT_TRUE(frame.video_frame_buffer());
if (!frame.video_frame_buffer()->native_handle()) {
- output_frame_ybuffers_.push_back(frame.video_frame_buffer()->DataY());
+ output_frame_ybuffers_.push_back(
+ static_cast<const VideoFrame*>(&frame)->buffer(kYPlane));
}
output_frames_.push_back(
std::unique_ptr<VideoFrame>(new VideoFrame(frame)));
@@ -180,7 +181,8 @@
std::vector<const uint8_t*> ybuffer_pointers;
for (int i = 0; i < kNumFrame; ++i) {
input_frames_.push_back(CreateVideoFrame(static_cast<uint8_t>(i + 1)));
- ybuffer_pointers.push_back(input_frames_[i]->video_frame_buffer()->DataY());
+ const VideoFrame* const_input_frame = input_frames_[i].get();
+ ybuffer_pointers.push_back(const_input_frame->buffer(kYPlane));
AddInputFrame(input_frames_[i].get());
WaitOutputFrame();
}
diff --git a/video/video_encoder_unittest.cc b/video/video_encoder_unittest.cc
index f2c3ea6..0f28f89 100644
--- a/video/video_encoder_unittest.cc
+++ b/video/video_encoder_unittest.cc
@@ -107,11 +107,11 @@
void VideoEncoderSoftwareFallbackWrapperTest::EncodeFrame() {
frame_.CreateEmptyFrame(kWidth, kHeight, kWidth, (kWidth + 1) / 2,
(kWidth + 1) / 2);
- memset(frame_.video_frame_buffer()->MutableDataY(), 16,
+ memset(frame_.buffer(webrtc::kYPlane), 16,
frame_.allocated_size(webrtc::kYPlane));
- memset(frame_.video_frame_buffer()->MutableDataU(), 128,
+ memset(frame_.buffer(webrtc::kUPlane), 128,
frame_.allocated_size(webrtc::kUPlane));
- memset(frame_.video_frame_buffer()->MutableDataV(), 128,
+ memset(frame_.buffer(webrtc::kVPlane), 128,
frame_.allocated_size(webrtc::kVPlane));
std::vector<FrameType> types(1, kVideoFrameKey);
diff --git a/video_frame.h b/video_frame.h
index 4dc3411..409abf3 100644
--- a/video_frame.h
+++ b/video_frame.h
@@ -65,9 +65,17 @@
// reference to the video buffer also retained by |videoFrame|.
void ShallowCopy(const VideoFrame& videoFrame);
+ // Get pointer to buffer per plane.
+ uint8_t* buffer(PlaneType type);
+ // Overloading with const.
+ const uint8_t* buffer(PlaneType type) const;
+
// Get allocated size per plane.
int allocated_size(PlaneType type) const;
+ // Get allocated stride per plane.
+ int stride(PlaneType type) const;
+
// Get frame width.
int width() const;
@@ -116,10 +124,7 @@
// Return the underlying buffer. Never nullptr for a properly
// initialized VideoFrame.
- // Creating a new reference breaks the HasOneRef and IsMutable
- // logic. So return a const ref to our reference.
- const rtc::scoped_refptr<webrtc::VideoFrameBuffer>& video_frame_buffer()
- const;
+ rtc::scoped_refptr<webrtc::VideoFrameBuffer> video_frame_buffer() const;
// Set the underlying buffer.
void set_video_frame_buffer(