iOS HW H264 support.
First step towards supporting H264 on iOS. More tuning/experimentation
required in future CLs. Tested using AppRTCDemo on iPhone6 + iPad Mini.
Future work to get it working on OS/X, simulator (renders black screen
currently) and with the Android AppRTCDemo. Currently protected with a
compile time guard.
BUG=4081
R=andrew@webrtc.org, haysc@webrtc.org, holmer@google.com, jiayl@webrtc.org, kjellander@webrtc.org, pbos@webrtc.org, phoglund@webrtc.org, stefan@webrtc.org
Review URL: https://codereview.webrtc.org/1187573004.
Cr-Commit-Position: refs/heads/master@{#9515}
diff --git a/talk/app/webrtc/objc/RTCPeerConnectionFactory.mm b/talk/app/webrtc/objc/RTCPeerConnectionFactory.mm
index 50ea47d..b7b8966 100644
--- a/talk/app/webrtc/objc/RTCPeerConnectionFactory.mm
+++ b/talk/app/webrtc/objc/RTCPeerConnectionFactory.mm
@@ -54,7 +54,6 @@
#include "webrtc/base/logging.h"
#include "webrtc/base/ssladapter.h"
-
@implementation RTCPeerConnectionFactory {
rtc::scoped_ptr<rtc::Thread> _signalingThread;
rtc::scoped_ptr<rtc::Thread> _workerThread;
@@ -80,8 +79,9 @@
_workerThread.reset(new rtc::Thread());
result = _workerThread->Start();
NSAssert(result, @"Failed to start worker thread.");
+
_nativeFactory = webrtc::CreatePeerConnectionFactory(
- _signalingThread.get(), _workerThread.get(), NULL, NULL, NULL);
+ _signalingThread.get(), _workerThread.get(), nullptr, nullptr, nullptr);
NSAssert(_nativeFactory, @"Failed to initialize PeerConnectionFactory!");
// Uncomment to get sensitive logs emitted (to stderr or logcat).
// rtc::LogMessage::LogToDebug(rtc::LS_SENSITIVE);
diff --git a/talk/examples/objc/AppRTCDemo/ARDAppClient.m b/talk/examples/objc/AppRTCDemo/ARDAppClient.m
index 0f3c423..ac99ca2 100644
--- a/talk/examples/objc/AppRTCDemo/ARDAppClient.m
+++ b/talk/examples/objc/AppRTCDemo/ARDAppClient.m
@@ -42,6 +42,7 @@
#import "ARDCEODTURNClient.h"
#import "ARDJoinResponse.h"
#import "ARDMessageResponse.h"
+#import "ARDSDPUtils.h"
#import "ARDSignalingMessage.h"
#import "ARDUtilities.h"
#import "ARDWebSocketChannel.h"
@@ -344,10 +345,15 @@
[_delegate appClient:self didError:sdpError];
return;
}
+ // Prefer H264 if available.
+ RTCSessionDescription *sdpPreferringH264 =
+ [ARDSDPUtils descriptionForDescription:sdp
+ preferredVideoCodec:@"H264"];
[_peerConnection setLocalDescriptionWithDelegate:self
- sessionDescription:sdp];
+ sessionDescription:sdpPreferringH264];
ARDSessionDescriptionMessage *message =
- [[ARDSessionDescriptionMessage alloc] initWithDescription:sdp];
+ [[ARDSessionDescriptionMessage alloc]
+ initWithDescription:sdpPreferringH264];
[self sendSignalingMessage:message];
});
}
@@ -441,8 +447,12 @@
ARDSessionDescriptionMessage *sdpMessage =
(ARDSessionDescriptionMessage *)message;
RTCSessionDescription *description = sdpMessage.sessionDescription;
+ // Prefer H264 if available.
+ RTCSessionDescription *sdpPreferringH264 =
+ [ARDSDPUtils descriptionForDescription:description
+ preferredVideoCodec:@"H264"];
[_peerConnection setRemoteDescriptionWithDelegate:self
- sessionDescription:description];
+ sessionDescription:sdpPreferringH264];
break;
}
case kARDSignalingMessageTypeCandidate: {
diff --git a/talk/examples/objc/AppRTCDemo/ARDSDPUtils.h b/talk/examples/objc/AppRTCDemo/ARDSDPUtils.h
new file mode 100644
index 0000000..2f14e6d
--- /dev/null
+++ b/talk/examples/objc/AppRTCDemo/ARDSDPUtils.h
@@ -0,0 +1,41 @@
+/*
+ * libjingle
+ * Copyright 2015 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import <Foundation/Foundation.h>
+
+@class RTCSessionDescription;
+
+@interface ARDSDPUtils : NSObject
+
+// Updates the original SDP description to instead prefer the specified video
+// codec. We do this by placing the specified codec at the beginning of the
+// codec list if it exists in the sdp.
++ (RTCSessionDescription *)
+ descriptionForDescription:(RTCSessionDescription *)description
+ preferredVideoCodec:(NSString *)codec;
+
+@end
diff --git a/talk/examples/objc/AppRTCDemo/ARDSDPUtils.m b/talk/examples/objc/AppRTCDemo/ARDSDPUtils.m
new file mode 100644
index 0000000..157d6fc
--- /dev/null
+++ b/talk/examples/objc/AppRTCDemo/ARDSDPUtils.m
@@ -0,0 +1,108 @@
+/*
+ * libjingle
+ * Copyright 2015 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import "ARDSDPUtils.h"
+
+#import "RTCSessionDescription.h"
+
+@implementation ARDSDPUtils
+
++ (RTCSessionDescription *)
+ descriptionForDescription:(RTCSessionDescription *)description
+ preferredVideoCodec:(NSString *)codec {
+ NSString *sdpString = description.description;
+ NSString *lineSeparator = @"\n";
+ NSString *mLineSeparator = @" ";
+ // Copied from PeerConnectionClient.java.
+ // TODO(tkchin): Move this to a shared C++ file.
+ NSMutableArray *lines =
+ [NSMutableArray arrayWithArray:
+ [sdpString componentsSeparatedByString:lineSeparator]];
+ int mLineIndex = -1;
+ NSString *codecRtpMap = nil;
+ // a=rtpmap:<payload type> <encoding name>/<clock rate>
+ // [/<encoding parameters>]
+ NSString *pattern =
+ [NSString stringWithFormat:@"^a=rtpmap:(\\d+) %@(/\\d+)+[\r]?$", codec];
+ NSRegularExpression *regex =
+ [NSRegularExpression regularExpressionWithPattern:pattern
+ options:0
+ error:nil];
+ for (NSInteger i = 0; (i < lines.count) && (mLineIndex == -1 || !codecRtpMap);
+ ++i) {
+ NSString *line = lines[i];
+ if ([line hasPrefix:@"m=video"]) {
+ mLineIndex = i;
+ continue;
+ }
+ NSTextCheckingResult *codecMatches =
+ [regex firstMatchInString:line
+ options:0
+ range:NSMakeRange(0, line.length)];
+ if (codecMatches) {
+ codecRtpMap =
+ [line substringWithRange:[codecMatches rangeAtIndex:1]];
+ continue;
+ }
+ }
+ if (mLineIndex == -1) {
+ NSLog(@"No m=video line, so can't prefer %@", codec);
+ return description;
+ }
+ if (!codecRtpMap) {
+ NSLog(@"No rtpmap for %@", codec);
+ return description;
+ }
+ NSArray *origMLineParts =
+ [lines[mLineIndex] componentsSeparatedByString:mLineSeparator];
+ if (origMLineParts.count > 3) {
+ NSMutableArray *newMLineParts =
+ [NSMutableArray arrayWithCapacity:origMLineParts.count];
+ NSInteger origPartIndex = 0;
+ // Format is: m=<media> <port> <proto> <fmt> ...
+ [newMLineParts addObject:origMLineParts[origPartIndex++]];
+ [newMLineParts addObject:origMLineParts[origPartIndex++]];
+ [newMLineParts addObject:origMLineParts[origPartIndex++]];
+ [newMLineParts addObject:codecRtpMap];
+ for (; origPartIndex < origMLineParts.count; ++origPartIndex) {
+ if (![codecRtpMap isEqualToString:origMLineParts[origPartIndex]]) {
+ [newMLineParts addObject:origMLineParts[origPartIndex]];
+ }
+ }
+ NSString *newMLine =
+ [newMLineParts componentsJoinedByString:mLineSeparator];
+ [lines replaceObjectAtIndex:mLineIndex
+ withObject:newMLine];
+ } else {
+ NSLog(@"Wrong SDP media description format: %@", lines[mLineIndex]);
+ }
+ NSString *mangledSdpString = [lines componentsJoinedByString:lineSeparator];
+ return [[RTCSessionDescription alloc] initWithType:description.type
+ sdp:mangledSdpString];
+}
+
+@end
diff --git a/talk/examples/objc/AppRTCDemo/tests/ARDAppClientTest.mm b/talk/examples/objc/AppRTCDemo/tests/ARDAppClientTest.mm
index 396f64f..47df526 100644
--- a/talk/examples/objc/AppRTCDemo/tests/ARDAppClientTest.mm
+++ b/talk/examples/objc/AppRTCDemo/tests/ARDAppClientTest.mm
@@ -31,8 +31,10 @@
#import "ARDAppClient+Internal.h"
#import "ARDJoinResponse+Internal.h"
#import "ARDMessageResponse+Internal.h"
+#import "ARDSDPUtils.h"
#import "RTCMediaConstraints.h"
#import "RTCPeerConnectionFactory.h"
+#import "RTCSessionDescription.h"
#include "webrtc/base/gunit.h"
#include "webrtc/base/ssladapter.h"
@@ -304,6 +306,27 @@
@end
+@interface ARDSDPUtilsTest : ARDTestCase
+- (void)testPreferVideoCodec;
+@end
+
+@implementation ARDSDPUtilsTest
+
+- (void)testPreferVideoCodec {
+ NSString *sdp = @("m=video 9 RTP/SAVPF 100 116 117 96 120\n"
+ "a=rtpmap:120 H264/90000\n");
+ NSString *expectedSdp = @("m=video 9 RTP/SAVPF 120 100 116 117 96\n"
+ "a=rtpmap:120 H264/90000\n");
+ RTCSessionDescription* desc =
+ [[RTCSessionDescription alloc] initWithType:@"offer" sdp:sdp];
+ RTCSessionDescription *h264Desc =
+ [ARDSDPUtils descriptionForDescription:desc
+ preferredVideoCodec:@"H264"];
+ EXPECT_TRUE([h264Desc.description isEqualToString:expectedSdp]);
+}
+
+@end
+
class SignalingTest : public ::testing::Test {
protected:
static void SetUpTestCase() {
@@ -320,3 +343,12 @@
[test testSession];
}
}
+
+TEST_F(SignalingTest, SDPTest) {
+ @autoreleasepool {
+ ARDSDPUtilsTest *test = [[ARDSDPUtilsTest alloc] init];
+ [test testPreferVideoCodec];
+ }
+}
+
+
diff --git a/talk/libjingle_examples.gyp b/talk/libjingle_examples.gyp
index 810a9ec..8a08481 100755
--- a/talk/libjingle_examples.gyp
+++ b/talk/libjingle_examples.gyp
@@ -173,6 +173,8 @@
'examples/objc/AppRTCDemo/ARDMessageResponse.m',
'examples/objc/AppRTCDemo/ARDMessageResponse+Internal.h',
'examples/objc/AppRTCDemo/ARDRoomServerClient.h',
+ 'examples/objc/AppRTCDemo/ARDSDPUtils.h',
+ 'examples/objc/AppRTCDemo/ARDSDPUtils.m',
'examples/objc/AppRTCDemo/ARDSignalingChannel.h',
'examples/objc/AppRTCDemo/ARDSignalingMessage.h',
'examples/objc/AppRTCDemo/ARDSignalingMessage.m',
diff --git a/talk/media/base/constants.cc b/talk/media/base/constants.cc
index 562dad4..0d0a33c 100644
--- a/talk/media/base/constants.cc
+++ b/talk/media/base/constants.cc
@@ -128,9 +128,11 @@
const char kVp8CodecName[] = "VP8";
const char kVp9CodecName[] = "VP9";
+const char kH264CodecName[] = "H264";
const int kDefaultVp8PlType = 100;
const int kDefaultVp9PlType = 101;
+const int kDefaultH264PlType = 107;
const int kDefaultRedPlType = 116;
const int kDefaultUlpfecType = 117;
const int kDefaultRtxVp8PlType = 96;
diff --git a/talk/media/base/constants.h b/talk/media/base/constants.h
index 84216fb..d92cb22 100644
--- a/talk/media/base/constants.h
+++ b/talk/media/base/constants.h
@@ -158,9 +158,11 @@
extern const char kVp8CodecName[];
extern const char kVp9CodecName[];
+extern const char kH264CodecName[];
extern const int kDefaultVp8PlType;
extern const int kDefaultVp9PlType;
+extern const int kDefaultH264PlType;
extern const int kDefaultRedPlType;
extern const int kDefaultUlpfecType;
extern const int kDefaultRtxVp8PlType;
diff --git a/talk/media/webrtc/webrtcvideoengine2.cc b/talk/media/webrtc/webrtcvideoengine2.cc
index d68d619..0a2152e 100644
--- a/talk/media/webrtc/webrtcvideoengine2.cc
+++ b/talk/media/webrtc/webrtcvideoengine2.cc
@@ -43,6 +43,7 @@
#include "webrtc/base/logging.h"
#include "webrtc/base/stringutils.h"
#include "webrtc/call.h"
+#include "webrtc/modules/video_coding/codecs/h264/include/h264.h"
#include "webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h"
#include "webrtc/system_wrappers/interface/field_trial.h"
#include "webrtc/system_wrappers/interface/trace_event.h"
@@ -157,6 +158,10 @@
webrtc::field_trial::FindFullName("WebRTC-SupportVP9");
return group_name == "Enabled" || group_name == "EnabledByFlag";
}
+ if (CodecNamesEq(codec_name, kH264CodecName)) {
+ return webrtc::H264Encoder::IsSupported() &&
+ webrtc::H264Decoder::IsSupported();
+ }
return false;
}
@@ -316,8 +321,6 @@
static const int kDefaultRtcpReceiverReportSsrc = 1;
-const char kH264CodecName[] = "H264";
-
const int kMinBandwidthBps = 30000;
const int kStartBandwidthBps = 300000;
const int kMaxBandwidthBps = 2000000;
@@ -331,6 +334,10 @@
}
codecs.push_back(MakeVideoCodecWithDefaultFeedbackParams(kDefaultVp8PlType,
kVp8CodecName));
+ if (CodecIsInternallySupported(kH264CodecName)) {
+ codecs.push_back(MakeVideoCodecWithDefaultFeedbackParams(kDefaultH264PlType,
+ kH264CodecName));
+ }
codecs.push_back(
VideoCodec::CreateRtxCodec(kDefaultRtxVp8PlType, kDefaultVp8PlType));
codecs.push_back(VideoCodec(kDefaultRedPlType, kRedCodecName));
@@ -1876,6 +1883,9 @@
} else if (type == webrtc::kVideoCodecVP9) {
return AllocatedEncoder(
webrtc::VideoEncoder::Create(webrtc::VideoEncoder::kVp9), type, false);
+ } else if (type == webrtc::kVideoCodecH264) {
+ return AllocatedEncoder(
+ webrtc::VideoEncoder::Create(webrtc::VideoEncoder::kH264), type, false);
}
// This shouldn't happen, we should not be trying to create something we don't
@@ -2284,6 +2294,11 @@
webrtc::VideoDecoder::Create(webrtc::VideoDecoder::kVp9), type, false);
}
+ if (type == webrtc::kVideoCodecH264) {
+ return AllocatedDecoder(
+ webrtc::VideoDecoder::Create(webrtc::VideoDecoder::kH264), type, false);
+ }
+
// This shouldn't happen, we should not be trying to create something we don't
// support.
DCHECK(false);
diff --git a/webrtc/BUILD.gn b/webrtc/BUILD.gn
index 3b54b56..1035b79 100644
--- a/webrtc/BUILD.gn
+++ b/webrtc/BUILD.gn
@@ -40,6 +40,9 @@
"WEBRTC_IOS",
]
}
+ if (is_ios && rtc_use_objc_h264) {
+ defines += [ "WEBRTC_OBJC_H264" ]
+ }
if (is_linux) {
defines += [ "WEBRTC_LINUX" ]
}
diff --git a/webrtc/build/common.gypi b/webrtc/build/common.gypi
index 0ab88c6..9335d35 100644
--- a/webrtc/build/common.gypi
+++ b/webrtc/build/common.gypi
@@ -124,6 +124,10 @@
# Determines whether NEON code will be built.
'build_with_neon%': 0,
+ # Enable this to use HW H.264 encoder/decoder on iOS/Mac PeerConnections.
+ # Enabling this may break interop with Android clients that support H264.
+ 'use_objc_h264%': 0,
+
'conditions': [
['build_with_chromium==1', {
# Exclude pulse audio on Chromium since its prerequisites don't require
@@ -333,6 +337,11 @@
'WEBRTC_IOS',
],
}],
+ ['OS=="ios" and use_objc_h264==1', {
+ 'defines': [
+ 'WEBRTC_OBJC_H264',
+ ],
+ }],
['OS=="linux"', {
'defines': [
'WEBRTC_LINUX',
diff --git a/webrtc/build/webrtc.gni b/webrtc/build/webrtc.gni
index fa05996..20fadab 100644
--- a/webrtc/build/webrtc.gni
+++ b/webrtc/build/webrtc.gni
@@ -109,6 +109,10 @@
rtc_build_with_neon = (current_cpu == "arm" &&
(arm_use_neon == 1 || arm_optionally_use_neon == 1)) ||
current_cpu == "arm64"
+
+ # Enable this to use HW H.264 encoder/decoder on iOS PeerConnections.
+ # Enabling this may break interop with Android clients that support H264.
+ rtc_use_objc_h264 = false
}
# Make it possible to provide custom locations for some libraries (move these
diff --git a/webrtc/modules/modules.gyp b/webrtc/modules/modules.gyp
index fc0673a..e44cfcc 100644
--- a/webrtc/modules/modules.gyp
+++ b/webrtc/modules/modules.gyp
@@ -20,6 +20,7 @@
'remote_bitrate_estimator/remote_bitrate_estimator.gypi',
'rtp_rtcp/rtp_rtcp.gypi',
'utility/utility.gypi',
+ 'video_coding/codecs/h264/h264.gypi',
'video_coding/codecs/i420/main/source/i420.gypi',
'video_coding/video_coding.gypi',
'video_capture/video_capture.gypi',
@@ -352,6 +353,9 @@
],
}],
['OS=="ios"', {
+ 'sources': [
+ 'video_coding/codecs/h264/h264_video_toolbox_nalu_unittest.cc',
+ ],
'mac_bundle_resources': [
'<(DEPTH)/resources/audio_coding/speech_mono_16kHz.pcm',
'<(DEPTH)/resources/audio_coding/testfile32kHz.pcm',
diff --git a/webrtc/modules/video_coding/BUILD.gn b/webrtc/modules/video_coding/BUILD.gn
index 88f9bba..e05ab85 100644
--- a/webrtc/modules/video_coding/BUILD.gn
+++ b/webrtc/modules/video_coding/BUILD.gn
@@ -81,6 +81,7 @@
deps = [
":video_coding_utility",
+ ":webrtc_h264",
":webrtc_i420",
":webrtc_vp8",
":webrtc_vp9",
@@ -115,6 +116,29 @@
]
}
+source_set("webrtc_h264") {
+ sources = [
+ "codecs/h264/h264.cc",
+ "codecs/h264/include/h264.h",
+ ]
+
+ configs += [ "../..:common_config" ]
+ public_configs = [ "../..:common_inherited_config" ]
+
+ if (is_clang) {
+ # Suppress warnings from Chrome's Clang plugins.
+ # See http://code.google.com/p/webrtc/issues/detail?id=163 for details.
+ configs -= [ "//build/config/clang:find_bad_constructs" ]
+ }
+
+ deps = [
+ "../../system_wrappers",
+ ]
+}
+
+# TODO(tkchin): Source set for webrtc_h264_video_toolbox. Currently not
+# possible to add, see https://crbug.com/297668.
+
source_set("webrtc_i420") {
sources = [
"codecs/i420/main/interface/i420.h",
diff --git a/webrtc/modules/video_coding/codecs/h264/h264.cc b/webrtc/modules/video_coding/codecs/h264/h264.cc
new file mode 100644
index 0000000..d4123a2
--- /dev/null
+++ b/webrtc/modules/video_coding/codecs/h264/h264.cc
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#include "webrtc/modules/video_coding/codecs/h264/include/h264.h"
+
+#if defined(WEBRTC_IOS)
+#include "webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.h"
+#include "webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.h"
+#endif
+
+#include "webrtc/base/checks.h"
+
+namespace webrtc {
+
+// We need this file to be C++ only so it will compile properly for all
+// platforms. In order to write ObjC specific implementations we use private
+// externs. This function is defined in h264.mm.
+#if defined(WEBRTC_IOS)
+extern bool IsH264CodecSupportedObjC();
+#endif
+
+bool IsH264CodecSupported() {
+#if defined(WEBRTC_IOS)
+ return IsH264CodecSupportedObjC();
+#else
+ return false;
+#endif
+}
+
+H264Encoder* H264Encoder::Create() {
+ DCHECK(H264Encoder::IsSupported());
+#if defined(WEBRTC_IOS) && defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
+ return new H264VideoToolboxEncoder();
+#else
+ RTC_NOTREACHED();
+ return nullptr;
+#endif
+}
+
+bool H264Encoder::IsSupported() {
+ return IsH264CodecSupported();
+}
+
+H264Decoder* H264Decoder::Create() {
+ DCHECK(H264Decoder::IsSupported());
+#if defined(WEBRTC_IOS) && defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
+ return new H264VideoToolboxDecoder();
+#else
+ RTC_NOTREACHED();
+ return nullptr;
+#endif
+}
+
+bool H264Decoder::IsSupported() {
+ return IsH264CodecSupported();
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/codecs/h264/h264.gypi b/webrtc/modules/video_coding/codecs/h264/h264.gypi
new file mode 100644
index 0000000..a20865c
--- /dev/null
+++ b/webrtc/modules/video_coding/codecs/h264/h264.gypi
@@ -0,0 +1,63 @@
+# Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+{
+ 'includes': [
+ '../../../../build/common.gypi',
+ ],
+ 'targets': [
+ {
+ 'target_name': 'webrtc_h264',
+ 'type': 'static_library',
+ 'conditions': [
+ ['OS=="ios"', {
+ 'dependencies': [
+ 'webrtc_h264_video_toolbox',
+ ],
+ 'sources': [
+ 'h264_objc.mm',
+ ],
+ }],
+ ],
+ 'sources': [
+ 'h264.cc',
+ 'include/h264.h',
+ ],
+ }, # webrtc_h264
+ ],
+ 'conditions': [
+ ['OS=="ios"', {
+ 'targets': [
+ {
+ 'target_name': 'webrtc_h264_video_toolbox',
+ 'type': 'static_library',
+ 'dependencies': [
+ '<(DEPTH)/third_party/libyuv/libyuv.gyp:libyuv',
+ ],
+ 'link_settings': {
+ 'xcode_settings': {
+ 'OTHER_LDFLAGS': [
+ '-framework CoreMedia',
+ '-framework CoreVideo',
+ '-framework VideoToolbox',
+ ],
+ },
+ },
+ 'sources': [
+ 'h264_video_toolbox_decoder.cc',
+ 'h264_video_toolbox_decoder.h',
+ 'h264_video_toolbox_encoder.cc',
+ 'h264_video_toolbox_encoder.h',
+ 'h264_video_toolbox_nalu.cc',
+ 'h264_video_toolbox_nalu.h',
+ ],
+ }, # webrtc_h264_video_toolbox
+ ], # targets
+ }], # OS=="ios"
+ ], # conditions
+}
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_objc.mm b/webrtc/modules/video_coding/codecs/h264/h264_objc.mm
new file mode 100644
index 0000000..b9e0fc0
--- /dev/null
+++ b/webrtc/modules/video_coding/codecs/h264/h264_objc.mm
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#include "webrtc/modules/video_coding/codecs/h264/include/h264.h"
+
+#if defined(WEBRTC_IOS)
+#import <UIKit/UIKit.h>
+#endif
+
+namespace webrtc {
+
+bool IsH264CodecSupportedObjC() {
+#if defined(WEBRTC_OBJC_H264) && \
+ defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED) && \
+ defined(WEBRTC_IOS)
+ // Supported on iOS8+.
+ return [[[UIDevice currentDevice] systemVersion] doubleValue] >= 8.0;
+#else
+ // TODO(tkchin): Support OS/X once we stop mixing libstdc++ and libc++ on
+ // OSX 10.9.
+ return false;
+#endif
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.cc b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.cc
new file mode 100644
index 0000000..e905fd0
--- /dev/null
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.cc
@@ -0,0 +1,271 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#include "webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.h"
+
+#if defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
+
+#include "libyuv/convert.h"
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/common_video/interface/video_frame_buffer.h"
+#include "webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h"
+#include "webrtc/video_frame.h"
+
+namespace internal {
+
+// Convenience function for creating a dictionary.
+inline CFDictionaryRef CreateCFDictionary(CFTypeRef* keys,
+ CFTypeRef* values,
+ size_t size) {
+ return CFDictionaryCreate(nullptr, keys, values, size,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks);
+}
+
+// Struct that we pass to the decoder per frame to decode. We receive it again
+// in the decoder callback.
+struct FrameDecodeParams {
+ FrameDecodeParams(webrtc::DecodedImageCallback* cb, int64_t ts)
+ : callback(cb), timestamp(ts) {}
+ webrtc::DecodedImageCallback* callback;
+ int64_t timestamp;
+};
+
+// On decode we receive a CVPixelBuffer, which we need to convert to a frame
+// buffer for use in the rest of WebRTC. Unfortunately this involves a frame
+// copy.
+// TODO(tkchin): Stuff CVPixelBuffer into a TextureBuffer and pass that along
+// instead once the pipeline supports it.
+rtc::scoped_refptr<webrtc::VideoFrameBuffer> VideoFrameBufferForPixelBuffer(
+ CVPixelBufferRef pixel_buffer) {
+ DCHECK(pixel_buffer);
+ DCHECK(CVPixelBufferGetPixelFormatType(pixel_buffer) ==
+ kCVPixelFormatType_420YpCbCr8BiPlanarFullRange);
+ size_t width = CVPixelBufferGetWidthOfPlane(pixel_buffer, 0);
+ size_t height = CVPixelBufferGetHeightOfPlane(pixel_buffer, 0);
+ // TODO(tkchin): Use a frame buffer pool.
+ rtc::scoped_refptr<webrtc::VideoFrameBuffer> buffer =
+ new rtc::RefCountedObject<webrtc::I420Buffer>(width, height);
+ CVPixelBufferLockBaseAddress(pixel_buffer, kCVPixelBufferLock_ReadOnly);
+ const uint8* src_y = reinterpret_cast<const uint8*>(
+ CVPixelBufferGetBaseAddressOfPlane(pixel_buffer, 0));
+ int src_y_stride = CVPixelBufferGetBytesPerRowOfPlane(pixel_buffer, 0);
+ const uint8* src_uv = reinterpret_cast<const uint8*>(
+ CVPixelBufferGetBaseAddressOfPlane(pixel_buffer, 1));
+ int src_uv_stride = CVPixelBufferGetBytesPerRowOfPlane(pixel_buffer, 1);
+ int ret = libyuv::NV12ToI420(
+ src_y, src_y_stride, src_uv, src_uv_stride,
+ buffer->data(webrtc::kYPlane), buffer->stride(webrtc::kYPlane),
+ buffer->data(webrtc::kUPlane), buffer->stride(webrtc::kUPlane),
+ buffer->data(webrtc::kVPlane), buffer->stride(webrtc::kVPlane),
+ width, height);
+ CVPixelBufferUnlockBaseAddress(pixel_buffer, kCVPixelBufferLock_ReadOnly);
+ if (ret) {
+ LOG(LS_ERROR) << "Error converting NV12 to I420: " << ret;
+ return nullptr;
+ }
+ return buffer;
+}
+
+// This is the callback function that VideoToolbox calls when decode is
+// complete.
+void VTDecompressionOutputCallback(void* decoder,
+ void* params,
+ OSStatus status,
+ VTDecodeInfoFlags info_flags,
+ CVImageBufferRef image_buffer,
+ CMTime timestamp,
+ CMTime duration) {
+ rtc::scoped_ptr<FrameDecodeParams> decode_params(
+ reinterpret_cast<FrameDecodeParams*>(params));
+ if (status != noErr) {
+ LOG(LS_ERROR) << "Failed to decode frame. Status: " << status;
+ return;
+ }
+ // TODO(tkchin): Handle CVO properly.
+ rtc::scoped_refptr<webrtc::VideoFrameBuffer> buffer =
+ VideoFrameBufferForPixelBuffer(image_buffer);
+ webrtc::VideoFrame decoded_frame(buffer, decode_params->timestamp, 0,
+ webrtc::kVideoRotation_0);
+ decode_params->callback->Decoded(decoded_frame);
+}
+
+} // namespace internal
+
+namespace webrtc {
+
+H264VideoToolboxDecoder::H264VideoToolboxDecoder()
+ : callback_(nullptr),
+ video_format_(nullptr),
+ decompression_session_(nullptr) {
+}
+
+H264VideoToolboxDecoder::~H264VideoToolboxDecoder() {
+ DestroyDecompressionSession();
+ SetVideoFormat(nullptr);
+}
+
+int H264VideoToolboxDecoder::InitDecode(const VideoCodec* video_codec,
+ int number_of_cores) {
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int H264VideoToolboxDecoder::Decode(
+ const EncodedImage& input_image,
+ bool missing_frames,
+ const RTPFragmentationHeader* fragmentation,
+ const CodecSpecificInfo* codec_specific_info,
+ int64_t render_time_ms) {
+ DCHECK(input_image._buffer);
+
+ CMSampleBufferRef sample_buffer = nullptr;
+ if (!H264AnnexBBufferToCMSampleBuffer(input_image._buffer,
+ input_image._length,
+ video_format_,
+ &sample_buffer)) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ DCHECK(sample_buffer);
+ // Check if the video format has changed, and reinitialize decoder if needed.
+ CMVideoFormatDescriptionRef description =
+ CMSampleBufferGetFormatDescription(sample_buffer);
+ if (!CMFormatDescriptionEqual(description, video_format_)) {
+ SetVideoFormat(description);
+ ResetDecompressionSession();
+ }
+ VTDecodeFrameFlags decode_flags =
+ kVTDecodeFrame_EnableAsynchronousDecompression;
+ rtc::scoped_ptr<internal::FrameDecodeParams> frame_decode_params;
+ frame_decode_params.reset(
+ new internal::FrameDecodeParams(callback_, input_image._timeStamp));
+ OSStatus status = VTDecompressionSessionDecodeFrame(
+ decompression_session_, sample_buffer, decode_flags,
+ frame_decode_params.release(), nullptr);
+ CFRelease(sample_buffer);
+ if (status != noErr) {
+ LOG(LS_ERROR) << "Failed to decode frame with code: " << status;
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int H264VideoToolboxDecoder::RegisterDecodeCompleteCallback(
+ DecodedImageCallback* callback) {
+ DCHECK(!callback_);
+ callback_ = callback;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int H264VideoToolboxDecoder::Release() {
+ callback_ = nullptr;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int H264VideoToolboxDecoder::Reset() {
+ ResetDecompressionSession();
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int H264VideoToolboxDecoder::ResetDecompressionSession() {
+ DestroyDecompressionSession();
+
+ // Need to wait for the first SPS to initialize decoder.
+ if (!video_format_) {
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+
+ // Set keys for OpenGL and IOSurface compatibilty, which makes the encoder
+ // create pixel buffers with GPU backed memory. The intent here is to pass
+ // the pixel buffers directly so we avoid a texture upload later during
+ // rendering. This currently is moot because we are converting back to an
+ // I420 frame after decode, but eventually we will be able to plumb
+ // CVPixelBuffers directly to the renderer.
+ // TODO(tkchin): Maybe only set OpenGL/IOSurface keys if we know that that
+ // we can pass CVPixelBuffers as native handles in decoder output.
+ static size_t const attributes_size = 3;
+ CFTypeRef keys[attributes_size] = {
+#if defined(WEBRTC_IOS)
+ kCVPixelBufferOpenGLESCompatibilityKey,
+#elif defined(WEBRTC_MAC)
+ kCVPixelBufferOpenGLCompatibilityKey,
+#endif
+ kCVPixelBufferIOSurfacePropertiesKey,
+ kCVPixelBufferPixelFormatTypeKey
+ };
+ CFDictionaryRef io_surface_value =
+ internal::CreateCFDictionary(nullptr, nullptr, 0);
+ int64_t nv12type = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange;
+ CFNumberRef pixel_format =
+ CFNumberCreate(nullptr, kCFNumberLongType, &nv12type);
+ CFTypeRef values[attributes_size] = {
+ kCFBooleanTrue,
+ io_surface_value,
+ pixel_format
+ };
+ CFDictionaryRef attributes =
+ internal::CreateCFDictionary(keys, values, attributes_size);
+ if (io_surface_value) {
+ CFRelease(io_surface_value);
+ io_surface_value = nullptr;
+ }
+ if (pixel_format) {
+ CFRelease(pixel_format);
+ pixel_format = nullptr;
+ }
+ VTDecompressionOutputCallbackRecord record = {
+ internal::VTDecompressionOutputCallback, this,
+ };
+ OSStatus status =
+ VTDecompressionSessionCreate(nullptr, video_format_, nullptr, attributes,
+ &record, &decompression_session_);
+ CFRelease(attributes);
+ if (status != noErr) {
+ DestroyDecompressionSession();
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ ConfigureDecompressionSession();
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+void H264VideoToolboxDecoder::ConfigureDecompressionSession() {
+ DCHECK(decompression_session_);
+#if defined(WEBRTC_IOS)
+ VTSessionSetProperty(decompression_session_,
+ kVTDecompressionPropertyKey_RealTime, kCFBooleanTrue);
+#endif
+}
+
+void H264VideoToolboxDecoder::DestroyDecompressionSession() {
+ if (decompression_session_) {
+ VTDecompressionSessionInvalidate(decompression_session_);
+ decompression_session_ = nullptr;
+ }
+}
+
+void H264VideoToolboxDecoder::SetVideoFormat(
+ CMVideoFormatDescriptionRef video_format) {
+ if (video_format_ == video_format) {
+ return;
+ }
+ if (video_format_) {
+ CFRelease(video_format_);
+ }
+ video_format_ = video_format;
+ if (video_format_) {
+ CFRetain(video_format_);
+ }
+}
+
+} // namespace webrtc
+
+#endif // defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.h b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.h
new file mode 100644
index 0000000..f54ddb9
--- /dev/null
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_DECODER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_DECODER_H_
+
+#include "webrtc/modules/video_coding/codecs/h264/include/h264.h"
+
+#if defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
+
+#include <VideoToolbox/VideoToolbox.h>
+
+// This file provides a H264 encoder implementation using the VideoToolbox
+// APIs. Since documentation is almost non-existent, this is largely based on
+// the information in the VideoToolbox header files, a talk from WWDC 2014 and
+// experimentation.
+
+namespace webrtc {
+
+class H264VideoToolboxDecoder : public H264Decoder {
+ public:
+ H264VideoToolboxDecoder();
+
+ ~H264VideoToolboxDecoder() override;
+
+ int InitDecode(const VideoCodec* video_codec, int number_of_cores) override;
+
+ int Decode(const EncodedImage& input_image,
+ bool missing_frames,
+ const RTPFragmentationHeader* fragmentation,
+ const CodecSpecificInfo* codec_specific_info,
+ int64_t render_time_ms) override;
+
+ int RegisterDecodeCompleteCallback(DecodedImageCallback* callback) override;
+
+ int Release() override;
+
+ int Reset() override;
+
+ private:
+ int ResetDecompressionSession();
+ void ConfigureDecompressionSession();
+ void DestroyDecompressionSession();
+ void SetVideoFormat(CMVideoFormatDescriptionRef video_format);
+
+ DecodedImageCallback* callback_;
+ CMVideoFormatDescriptionRef video_format_;
+ VTDecompressionSessionRef decompression_session_;
+}; // H264VideoToolboxDecoder
+
+} // namespace webrtc
+
+#endif // defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_DECODER_H_
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc
new file mode 100644
index 0000000..3dfd6cf
--- /dev/null
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc
@@ -0,0 +1,438 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#include "webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.h"
+
+#if defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
+
+#include <string>
+#include <vector>
+
+#include "libyuv/convert_from.h"
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h"
+
+namespace internal {
+
+// Convenience function for creating a dictionary.
+inline CFDictionaryRef CreateCFDictionary(CFTypeRef* keys,
+ CFTypeRef* values,
+ size_t size) {
+ return CFDictionaryCreate(kCFAllocatorDefault, keys, values, size,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks);
+}
+
+// Copies characters from a CFStringRef into a std::string.
+std::string CFStringToString(const CFStringRef cf_string) {
+ DCHECK(cf_string);
+ std::string std_string;
+ // Get the size needed for UTF8 plus terminating character.
+ size_t buffer_size =
+ CFStringGetMaximumSizeForEncoding(CFStringGetLength(cf_string),
+ kCFStringEncodingUTF8) +
+ 1;
+ rtc::scoped_ptr<char[]> buffer(new char[buffer_size]);
+ if (CFStringGetCString(cf_string, buffer.get(), buffer_size,
+ kCFStringEncodingUTF8)) {
+ // Copy over the characters.
+ std_string.assign(buffer.get());
+ }
+ return std_string;
+}
+
+// Convenience function for setting a VT property.
+void SetVTSessionProperty(VTSessionRef session,
+ CFStringRef key,
+ int32_t value) {
+ CFNumberRef cfNum =
+ CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &value);
+ OSStatus status = VTSessionSetProperty(session, key, cfNum);
+ CFRelease(cfNum);
+ if (status != noErr) {
+ std::string key_string = CFStringToString(key);
+ LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string
+ << " to " << value << ": " << status;
+ }
+}
+
+// Convenience function for setting a VT property.
+void SetVTSessionProperty(VTSessionRef session, CFStringRef key, bool value) {
+ CFBooleanRef cf_bool = (value) ? kCFBooleanTrue : kCFBooleanFalse;
+ OSStatus status = VTSessionSetProperty(session, key, cf_bool);
+ if (status != noErr) {
+ std::string key_string = CFStringToString(key);
+ LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string
+ << " to " << value << ": " << status;
+ }
+}
+
+// Convenience function for setting a VT property.
+void SetVTSessionProperty(VTSessionRef session,
+ CFStringRef key,
+ CFStringRef value) {
+ OSStatus status = VTSessionSetProperty(session, key, value);
+ if (status != noErr) {
+ std::string key_string = CFStringToString(key);
+ std::string val_string = CFStringToString(value);
+ LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string
+ << " to " << val_string << ": " << status;
+ }
+}
+
+// Struct that we pass to the encoder per frame to encode. We receive it again
+// in the encoder callback.
+struct FrameEncodeParams {
+ FrameEncodeParams(webrtc::EncodedImageCallback* cb,
+ const webrtc::CodecSpecificInfo* csi,
+ int32_t w,
+ int32_t h,
+ int64_t rtms,
+ uint32_t ts)
+ : callback(cb),
+ width(w),
+ height(h),
+ render_time_ms(rtms),
+ timestamp(ts) {
+ if (csi) {
+ codec_specific_info = *csi;
+ } else {
+ codec_specific_info.codecType = webrtc::kVideoCodecH264;
+ }
+ }
+ webrtc::EncodedImageCallback* callback;
+ webrtc::CodecSpecificInfo codec_specific_info;
+ int32_t width;
+ int32_t height;
+ int64_t render_time_ms;
+ uint32_t timestamp;
+};
+
+// We receive I420Frames as input, but we need to feed CVPixelBuffers into the
+// encoder. This performs the copy and format conversion.
+// TODO(tkchin): See if encoder will accept i420 frames and compare performance.
+bool CopyVideoFrameToPixelBuffer(const webrtc::VideoFrame& frame,
+ CVPixelBufferRef pixel_buffer) {
+ DCHECK(pixel_buffer);
+ DCHECK(CVPixelBufferGetPixelFormatType(pixel_buffer) ==
+ kCVPixelFormatType_420YpCbCr8BiPlanarFullRange);
+ DCHECK(CVPixelBufferGetHeightOfPlane(pixel_buffer, 0) ==
+ static_cast<size_t>(frame.height()));
+ DCHECK(CVPixelBufferGetWidthOfPlane(pixel_buffer, 0) ==
+ static_cast<size_t>(frame.width()));
+
+ CVReturn cvRet = CVPixelBufferLockBaseAddress(pixel_buffer, 0);
+ if (cvRet != kCVReturnSuccess) {
+ LOG(LS_ERROR) << "Failed to lock base address: " << cvRet;
+ return false;
+ }
+ uint8* dst_y = reinterpret_cast<uint8*>(
+ CVPixelBufferGetBaseAddressOfPlane(pixel_buffer, 0));
+ int dst_stride_y = CVPixelBufferGetBytesPerRowOfPlane(pixel_buffer, 0);
+ uint8* dst_uv = reinterpret_cast<uint8*>(
+ CVPixelBufferGetBaseAddressOfPlane(pixel_buffer, 1));
+ int dst_stride_uv = CVPixelBufferGetBytesPerRowOfPlane(pixel_buffer, 1);
+ // Convert I420 to NV12.
+ int ret = libyuv::I420ToNV12(
+ frame.buffer(webrtc::kYPlane), frame.stride(webrtc::kYPlane),
+ frame.buffer(webrtc::kUPlane), frame.stride(webrtc::kUPlane),
+ frame.buffer(webrtc::kVPlane), frame.stride(webrtc::kVPlane),
+ dst_y, dst_stride_y, dst_uv, dst_stride_uv,
+ frame.width(), frame.height());
+ CVPixelBufferUnlockBaseAddress(pixel_buffer, 0);
+ if (ret) {
+ LOG(LS_ERROR) << "Error converting I420 VideoFrame to NV12 :" << ret;
+ return false;
+ }
+ return true;
+}
+
+// This is the callback function that VideoToolbox calls when encode is
+// complete.
+void VTCompressionOutputCallback(void* encoder,
+ void* params,
+ OSStatus status,
+ VTEncodeInfoFlags info_flags,
+ CMSampleBufferRef sample_buffer) {
+ rtc::scoped_ptr<FrameEncodeParams> encode_params(
+ reinterpret_cast<FrameEncodeParams*>(params));
+ if (status != noErr) {
+ LOG(LS_ERROR) << "H264 encoding failed.";
+ return;
+ }
+ if (info_flags & kVTEncodeInfo_FrameDropped) {
+ LOG(LS_INFO) << "H264 encode dropped frame.";
+ }
+
+ bool is_keyframe = false;
+ CFArrayRef attachments =
+ CMSampleBufferGetSampleAttachmentsArray(sample_buffer, 0);
+ if (attachments != nullptr && CFArrayGetCount(attachments)) {
+ CFDictionaryRef attachment =
+ static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(attachments, 0));
+ is_keyframe =
+ !CFDictionaryContainsKey(attachment, kCMSampleAttachmentKey_NotSync);
+ }
+
+ // Convert the sample buffer into a buffer suitable for RTP packetization.
+ // TODO(tkchin): Allocate buffers through a pool.
+ rtc::scoped_ptr<rtc::Buffer> buffer(new rtc::Buffer());
+ rtc::scoped_ptr<webrtc::RTPFragmentationHeader> header;
+ if (!H264CMSampleBufferToAnnexBBuffer(sample_buffer,
+ is_keyframe,
+ buffer.get(),
+ header.accept())) {
+ return;
+ }
+ webrtc::EncodedImage frame(buffer->data(), buffer->size(), buffer->size());
+ frame._encodedWidth = encode_params->width;
+ frame._encodedHeight = encode_params->height;
+ frame._completeFrame = true;
+ frame._frameType = is_keyframe ? webrtc::kKeyFrame : webrtc::kDeltaFrame;
+ frame.capture_time_ms_ = encode_params->render_time_ms;
+ frame._timeStamp = encode_params->timestamp;
+
+ int result = encode_params->callback->Encoded(
+ frame, &(encode_params->codec_specific_info), header.get());
+ if (result != 0) {
+ LOG(LS_ERROR) << "Encoded callback failed: " << result;
+ }
+}
+
+} // namespace internal
+
+namespace webrtc {
+
+H264VideoToolboxEncoder::H264VideoToolboxEncoder()
+ : callback_(nullptr), compression_session_(nullptr) {
+}
+
+H264VideoToolboxEncoder::~H264VideoToolboxEncoder() {
+ DestroyCompressionSession();
+}
+
+int H264VideoToolboxEncoder::InitEncode(const VideoCodec* codec_settings,
+ int number_of_cores,
+ size_t max_payload_size) {
+ DCHECK(codec_settings);
+ DCHECK_EQ(codec_settings->codecType, kVideoCodecH264);
+ // TODO(tkchin): We may need to enforce width/height dimension restrictions
+ // to match what the encoder supports.
+ width_ = codec_settings->width;
+ height_ = codec_settings->height;
+ // We can only set average bitrate on the HW encoder.
+ bitrate_ = codec_settings->startBitrate * 1000;
+
+ // TODO(tkchin): Try setting payload size via
+ // kVTCompressionPropertyKey_MaxH264SliceBytes.
+
+ return ResetCompressionSession();
+}
+
+int H264VideoToolboxEncoder::Encode(
+ const VideoFrame& input_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const std::vector<VideoFrameType>* frame_types) {
+ if (input_image.IsZeroSize()) {
+ // It's possible to get zero sizes as a signal to produce keyframes (this
+ // happens for internal sources). But this shouldn't happen in
+ // webrtcvideoengine2.
+ RTC_NOTREACHED();
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+ if (!callback_ || !compression_session_) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+
+ // Get a pixel buffer from the pool and copy frame data over.
+ CVPixelBufferPoolRef pixel_buffer_pool =
+ VTCompressionSessionGetPixelBufferPool(compression_session_);
+ CVPixelBufferRef pixel_buffer = nullptr;
+ CVReturn ret = CVPixelBufferPoolCreatePixelBuffer(nullptr, pixel_buffer_pool,
+ &pixel_buffer);
+ if (ret != kCVReturnSuccess) {
+ LOG(LS_ERROR) << "Failed to create pixel buffer: " << ret;
+ // We probably want to drop frames here, since failure probably means
+ // that the pool is empty.
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ DCHECK(pixel_buffer);
+ if (!internal::CopyVideoFrameToPixelBuffer(input_image, pixel_buffer)) {
+ LOG(LS_ERROR) << "Failed to copy frame data.";
+ CVBufferRelease(pixel_buffer);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ // Check if we need a keyframe.
+ bool is_keyframe_required = false;
+ if (frame_types) {
+ for (auto frame_type : *frame_types) {
+ if (frame_type == kKeyFrame) {
+ is_keyframe_required = true;
+ break;
+ }
+ }
+ }
+
+ CMTime presentation_time_stamp =
+ CMTimeMake(input_image.render_time_ms(), 1000);
+ CFDictionaryRef frame_properties = nullptr;
+ if (is_keyframe_required) {
+ CFTypeRef keys[] = { kVTEncodeFrameOptionKey_ForceKeyFrame };
+ CFTypeRef values[] = { kCFBooleanTrue };
+ frame_properties = internal::CreateCFDictionary(keys, values, 1);
+ }
+ rtc::scoped_ptr<internal::FrameEncodeParams> encode_params;
+ encode_params.reset(new internal::FrameEncodeParams(
+ callback_, codec_specific_info, width_, height_,
+ input_image.render_time_ms(), input_image.timestamp()));
+ VTCompressionSessionEncodeFrame(
+ compression_session_, pixel_buffer, presentation_time_stamp,
+ kCMTimeInvalid, frame_properties, encode_params.release(), nullptr);
+ if (frame_properties) {
+ CFRelease(frame_properties);
+ }
+ if (pixel_buffer) {
+ CVBufferRelease(pixel_buffer);
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int H264VideoToolboxEncoder::RegisterEncodeCompleteCallback(
+ EncodedImageCallback* callback) {
+ callback_ = callback;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int H264VideoToolboxEncoder::SetChannelParameters(uint32_t packet_loss,
+ int64_t rtt) {
+ // Encoder doesn't know anything about packet loss or rtt so just return.
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int H264VideoToolboxEncoder::SetRates(uint32_t new_bitrate_kbit,
+ uint32_t frame_rate) {
+ bitrate_ = new_bitrate_kbit * 1000;
+ if (compression_session_) {
+ internal::SetVTSessionProperty(compression_session_,
+ kVTCompressionPropertyKey_AverageBitRate,
+ bitrate_);
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int H264VideoToolboxEncoder::Release() {
+ callback_ = nullptr;
+ // Need to reset to that the session is invalidated and won't use the
+ // callback anymore.
+ return ResetCompressionSession();
+}
+
+int H264VideoToolboxEncoder::ResetCompressionSession() {
+ DestroyCompressionSession();
+
+ // Set source image buffer attributes. These attributes will be present on
+ // buffers retrieved from the encoder's pixel buffer pool.
+ const size_t attributes_size = 3;
+ CFTypeRef keys[attributes_size] = {
+#if defined(WEBRTC_IOS)
+ kCVPixelBufferOpenGLESCompatibilityKey,
+#elif defined(WEBRTC_MAC)
+ kCVPixelBufferOpenGLCompatibilityKey,
+#endif
+ kCVPixelBufferIOSurfacePropertiesKey,
+ kCVPixelBufferPixelFormatTypeKey
+ };
+ CFDictionaryRef io_surface_value =
+ internal::CreateCFDictionary(nullptr, nullptr, 0);
+ int64_t nv12type = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange;
+ CFNumberRef pixel_format =
+ CFNumberCreate(nullptr, kCFNumberLongType, &nv12type);
+ CFTypeRef values[attributes_size] = {
+ kCFBooleanTrue,
+ io_surface_value,
+ pixel_format
+ };
+ CFDictionaryRef source_attributes =
+ internal::CreateCFDictionary(keys, values, attributes_size);
+ if (io_surface_value) {
+ CFRelease(io_surface_value);
+ io_surface_value = nullptr;
+ }
+ if (pixel_format) {
+ CFRelease(pixel_format);
+ pixel_format = nullptr;
+ }
+ OSStatus status = VTCompressionSessionCreate(
+ nullptr, // use default allocator
+ width_,
+ height_,
+ kCMVideoCodecType_H264,
+ nullptr, // use default encoder
+ source_attributes,
+ nullptr, // use default compressed data allocator
+ internal::VTCompressionOutputCallback,
+ this,
+ &compression_session_);
+ if (source_attributes) {
+ CFRelease(source_attributes);
+ source_attributes = nullptr;
+ }
+ if (status != noErr) {
+ LOG(LS_ERROR) << "Failed to create compression session: " << status;
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ ConfigureCompressionSession();
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+void H264VideoToolboxEncoder::ConfigureCompressionSession() {
+ DCHECK(compression_session_);
+ internal::SetVTSessionProperty(compression_session_,
+ kVTCompressionPropertyKey_RealTime, true);
+ internal::SetVTSessionProperty(compression_session_,
+ kVTCompressionPropertyKey_ProfileLevel,
+ kVTProfileLevel_H264_Baseline_AutoLevel);
+ internal::SetVTSessionProperty(
+ compression_session_, kVTCompressionPropertyKey_AverageBitRate, bitrate_);
+ internal::SetVTSessionProperty(compression_session_,
+ kVTCompressionPropertyKey_AllowFrameReordering,
+ false);
+ // TODO(tkchin): Look at entropy mode and colorspace matrices.
+ // TODO(tkchin): Investigate to see if there's any way to make this work.
+ // May need it to interop with Android. Currently this call just fails.
+ // On inspecting encoder output on iOS8, this value is set to 6.
+ // internal::SetVTSessionProperty(compression_session_,
+ // kVTCompressionPropertyKey_MaxFrameDelayCount,
+ // 1);
+ // TODO(tkchin): See if enforcing keyframe frequency is beneficial in any
+ // way.
+ // internal::SetVTSessionProperty(
+ // compression_session_,
+ // kVTCompressionPropertyKey_MaxKeyFrameInterval, 240);
+ // internal::SetVTSessionProperty(
+ // compression_session_,
+ // kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration, 240);
+}
+
+void H264VideoToolboxEncoder::DestroyCompressionSession() {
+ if (compression_session_) {
+ VTCompressionSessionInvalidate(compression_session_);
+ CFRelease(compression_session_);
+ compression_session_ = nullptr;
+ }
+}
+
+} // namespace webrtc
+
+#endif // defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.h b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.h
new file mode 100644
index 0000000..28cd63e
--- /dev/null
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_ENCODER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_ENCODER_H_
+
+#include "webrtc/modules/video_coding/codecs/h264/include/h264.h"
+
+#if defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
+
+#include <VideoToolbox/VideoToolbox.h>
+#include <vector>
+
+// This file provides a H264 encoder implementation using the VideoToolbox
+// APIs. Since documentation is almost non-existent, this is largely based on
+// the information in the VideoToolbox header files, a talk from WWDC 2014 and
+// experimentation.
+
+namespace webrtc {
+
+class H264VideoToolboxEncoder : public H264Encoder {
+ public:
+ H264VideoToolboxEncoder();
+
+ ~H264VideoToolboxEncoder() override;
+
+ int InitEncode(const VideoCodec* codec_settings,
+ int number_of_cores,
+ size_t max_payload_size) override;
+
+ int Encode(const VideoFrame& input_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const std::vector<VideoFrameType>* frame_types) override;
+
+ int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
+
+ int SetChannelParameters(uint32_t packet_loss, int64_t rtt) override;
+
+ int SetRates(uint32_t new_bitrate_kbit, uint32_t frame_rate) override;
+
+ int Release() override;
+
+ private:
+ int ResetCompressionSession();
+ void ConfigureCompressionSession();
+ void DestroyCompressionSession();
+
+ webrtc::EncodedImageCallback* callback_;
+ VTCompressionSessionRef compression_session_;
+ int32_t bitrate_; // Bitrate in bits per second.
+ int32_t width_;
+ int32_t height_;
+}; // H264VideoToolboxEncoder
+
+} // namespace webrtc
+
+#endif // defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_ENCODER_H_
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.cc b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.cc
new file mode 100644
index 0000000..7d595a8
--- /dev/null
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.cc
@@ -0,0 +1,356 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#include "webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h"
+
+#if defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <vector>
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+
+namespace webrtc {
+
+const char kAnnexBHeaderBytes[4] = {0, 0, 0, 1};
+const size_t kAvccHeaderByteSize = sizeof(uint32_t);
+
+bool H264CMSampleBufferToAnnexBBuffer(
+ CMSampleBufferRef avcc_sample_buffer,
+ bool is_keyframe,
+ rtc::Buffer* annexb_buffer,
+ webrtc::RTPFragmentationHeader** out_header) {
+ DCHECK(avcc_sample_buffer);
+ DCHECK(out_header);
+ *out_header = nullptr;
+
+ // Get format description from the sample buffer.
+ CMVideoFormatDescriptionRef description =
+ CMSampleBufferGetFormatDescription(avcc_sample_buffer);
+ if (description == nullptr) {
+ LOG(LS_ERROR) << "Failed to get sample buffer's description.";
+ return false;
+ }
+
+ // Get parameter set information.
+ int nalu_header_size = 0;
+ size_t param_set_count = 0;
+ OSStatus status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(
+ description, 0, nullptr, nullptr, ¶m_set_count, &nalu_header_size);
+ if (status != noErr) {
+ LOG(LS_ERROR) << "Failed to get parameter set.";
+ return false;
+ }
+ // TODO(tkchin): handle other potential sizes.
+ DCHECK_EQ(nalu_header_size, 4);
+ DCHECK_EQ(param_set_count, 2u);
+
+ // Truncate any previous data in the buffer without changing its capacity.
+ annexb_buffer->SetSize(0);
+
+ size_t nalu_offset = 0;
+ std::vector<size_t> frag_offsets;
+ std::vector<size_t> frag_lengths;
+
+ // Place all parameter sets at the front of buffer.
+ if (is_keyframe) {
+ size_t param_set_size = 0;
+ const uint8_t* param_set = nullptr;
+ for (size_t i = 0; i < param_set_count; ++i) {
+ status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(
+ description, i, ¶m_set, ¶m_set_size, nullptr, nullptr);
+ if (status != noErr) {
+ LOG(LS_ERROR) << "Failed to get parameter set.";
+ return false;
+ }
+ // Update buffer.
+ annexb_buffer->AppendData(kAnnexBHeaderBytes, sizeof(kAnnexBHeaderBytes));
+ annexb_buffer->AppendData(reinterpret_cast<const char*>(param_set),
+ param_set_size);
+ // Update fragmentation.
+ frag_offsets.push_back(nalu_offset + sizeof(kAnnexBHeaderBytes));
+ frag_lengths.push_back(param_set_size);
+ nalu_offset += sizeof(kAnnexBHeaderBytes) + param_set_size;
+ }
+ }
+
+ // Get block buffer from the sample buffer.
+ CMBlockBufferRef block_buffer =
+ CMSampleBufferGetDataBuffer(avcc_sample_buffer);
+ if (block_buffer == nullptr) {
+ LOG(LS_ERROR) << "Failed to get sample buffer's block buffer.";
+ return false;
+ }
+ CMBlockBufferRef contiguous_buffer = nullptr;
+ // Make sure block buffer is contiguous.
+ if (!CMBlockBufferIsRangeContiguous(block_buffer, 0, 0)) {
+ status = CMBlockBufferCreateContiguous(
+ nullptr, block_buffer, nullptr, nullptr, 0, 0, 0, &contiguous_buffer);
+ if (status != noErr) {
+ LOG(LS_ERROR) << "Failed to flatten non-contiguous block buffer: "
+ << status;
+ return false;
+ }
+ } else {
+ contiguous_buffer = block_buffer;
+ // Retain to make cleanup easier.
+ CFRetain(contiguous_buffer);
+ block_buffer = nullptr;
+ }
+
+ // Now copy the actual data.
+ char* data_ptr = nullptr;
+ size_t block_buffer_size = CMBlockBufferGetDataLength(contiguous_buffer);
+ status = CMBlockBufferGetDataPointer(contiguous_buffer, 0, nullptr, nullptr,
+ &data_ptr);
+ if (status != noErr) {
+ LOG(LS_ERROR) << "Failed to get block buffer data.";
+ CFRelease(contiguous_buffer);
+ return false;
+ }
+ size_t bytes_remaining = block_buffer_size;
+ while (bytes_remaining > 0) {
+ // The size type here must match |nalu_header_size|, we expect 4 bytes.
+ // Read the length of the next packet of data. Must convert from big endian
+ // to host endian.
+ DCHECK_GE(bytes_remaining, (size_t)nalu_header_size);
+ uint32_t* uint32_data_ptr = reinterpret_cast<uint32*>(data_ptr);
+ uint32_t packet_size = CFSwapInt32BigToHost(*uint32_data_ptr);
+ // Update buffer.
+ annexb_buffer->AppendData(kAnnexBHeaderBytes, sizeof(kAnnexBHeaderBytes));
+ annexb_buffer->AppendData(data_ptr + nalu_header_size, packet_size);
+ // Update fragmentation.
+ frag_offsets.push_back(nalu_offset + sizeof(kAnnexBHeaderBytes));
+ frag_lengths.push_back(packet_size);
+ nalu_offset += sizeof(kAnnexBHeaderBytes) + packet_size;
+
+ size_t bytes_written = packet_size + nalu_header_size;
+ bytes_remaining -= bytes_written;
+ data_ptr += bytes_written;
+ }
+ DCHECK_EQ(bytes_remaining, (size_t)0);
+
+ rtc::scoped_ptr<webrtc::RTPFragmentationHeader> header;
+ header.reset(new webrtc::RTPFragmentationHeader());
+ header->VerifyAndAllocateFragmentationHeader(frag_offsets.size());
+ DCHECK_EQ(frag_lengths.size(), frag_offsets.size());
+ for (size_t i = 0; i < frag_offsets.size(); ++i) {
+ header->fragmentationOffset[i] = frag_offsets[i];
+ header->fragmentationLength[i] = frag_lengths[i];
+ header->fragmentationPlType[i] = 0;
+ header->fragmentationTimeDiff[i] = 0;
+ }
+ *out_header = header.release();
+ CFRelease(contiguous_buffer);
+ return true;
+}
+
+bool H264AnnexBBufferToCMSampleBuffer(
+ const uint8_t* annexb_buffer,
+ size_t annexb_buffer_size,
+ CMVideoFormatDescriptionRef video_format,
+ CMSampleBufferRef* out_sample_buffer) {
+ DCHECK(annexb_buffer);
+ DCHECK(out_sample_buffer);
+ *out_sample_buffer = nullptr;
+
+ // The buffer we receive via RTP has 00 00 00 01 start code artifically
+ // embedded by the RTP depacketizer. Extract NALU information.
+ // TODO(tkchin): handle potential case where sps and pps are delivered
+ // separately.
+ uint8_t first_nalu_type = annexb_buffer[4] & 0x1f;
+ bool is_first_nalu_type_sps = first_nalu_type == 0x7;
+
+ AnnexBBufferReader reader(annexb_buffer, annexb_buffer_size);
+ CMVideoFormatDescriptionRef description = nullptr;
+ OSStatus status = noErr;
+ if (is_first_nalu_type_sps) {
+ // Parse the SPS and PPS into a CMVideoFormatDescription.
+ const uint8_t* param_set_ptrs[2] = {};
+ size_t param_set_sizes[2] = {};
+ if (!reader.ReadNalu(¶m_set_ptrs[0], ¶m_set_sizes[0])) {
+ LOG(LS_ERROR) << "Failed to read SPS";
+ return false;
+ }
+ if (!reader.ReadNalu(¶m_set_ptrs[1], ¶m_set_sizes[1])) {
+ LOG(LS_ERROR) << "Failed to read PPS";
+ return false;
+ }
+ status = CMVideoFormatDescriptionCreateFromH264ParameterSets(
+ kCFAllocatorDefault, 2, param_set_ptrs, param_set_sizes, 4,
+ &description);
+ if (status != noErr) {
+ LOG(LS_ERROR) << "Failed to create video format description.";
+ return false;
+ }
+ } else {
+ DCHECK(video_format);
+ description = video_format;
+ // We don't need to retain, but it makes logic easier since we are creating
+ // in the other block.
+ CFRetain(description);
+ }
+
+ // Allocate memory as a block buffer.
+ // TODO(tkchin): figure out how to use a pool.
+ CMBlockBufferRef block_buffer = nullptr;
+ status = CMBlockBufferCreateWithMemoryBlock(
+ nullptr, nullptr, reader.BytesRemaining(), nullptr, nullptr, 0,
+ reader.BytesRemaining(), kCMBlockBufferAssureMemoryNowFlag,
+ &block_buffer);
+ if (status != kCMBlockBufferNoErr) {
+ LOG(LS_ERROR) << "Failed to create block buffer.";
+ CFRelease(description);
+ return false;
+ }
+
+ // Make sure block buffer is contiguous.
+ CMBlockBufferRef contiguous_buffer = nullptr;
+ if (!CMBlockBufferIsRangeContiguous(block_buffer, 0, 0)) {
+ status = CMBlockBufferCreateContiguous(
+ nullptr, block_buffer, nullptr, nullptr, 0, 0, 0, &contiguous_buffer);
+ if (status != noErr) {
+ LOG(LS_ERROR) << "Failed to flatten non-contiguous block buffer: "
+ << status;
+ CFRelease(description);
+ CFRelease(block_buffer);
+ return false;
+ }
+ } else {
+ contiguous_buffer = block_buffer;
+ block_buffer = nullptr;
+ }
+
+ // Get a raw pointer into allocated memory.
+ size_t block_buffer_size = 0;
+ char* data_ptr = nullptr;
+ status = CMBlockBufferGetDataPointer(contiguous_buffer, 0, nullptr,
+ &block_buffer_size, &data_ptr);
+ if (status != kCMBlockBufferNoErr) {
+ LOG(LS_ERROR) << "Failed to get block buffer data pointer.";
+ CFRelease(description);
+ CFRelease(contiguous_buffer);
+ return false;
+ }
+ DCHECK(block_buffer_size == reader.BytesRemaining());
+
+ // Write Avcc NALUs into block buffer memory.
+ AvccBufferWriter writer(reinterpret_cast<uint8_t*>(data_ptr),
+ block_buffer_size);
+ while (reader.BytesRemaining() > 0) {
+ const uint8_t* nalu_data_ptr = nullptr;
+ size_t nalu_data_size = 0;
+ if (reader.ReadNalu(&nalu_data_ptr, &nalu_data_size)) {
+ writer.WriteNalu(nalu_data_ptr, nalu_data_size);
+ }
+ }
+
+ // Create sample buffer.
+ status = CMSampleBufferCreate(nullptr, contiguous_buffer, true, nullptr,
+ nullptr, description, 1, 0, nullptr, 0, nullptr,
+ out_sample_buffer);
+ if (status != noErr) {
+ LOG(LS_ERROR) << "Failed to create sample buffer.";
+ CFRelease(description);
+ CFRelease(contiguous_buffer);
+ return false;
+ }
+ CFRelease(description);
+ CFRelease(contiguous_buffer);
+ return true;
+}
+
+AnnexBBufferReader::AnnexBBufferReader(const uint8_t* annexb_buffer,
+ size_t length)
+ : start_(annexb_buffer), offset_(0), next_offset_(0), length_(length) {
+ DCHECK(annexb_buffer);
+ offset_ = FindNextNaluHeader(start_, length_, 0);
+ next_offset_ =
+ FindNextNaluHeader(start_, length_, offset_ + sizeof(kAnnexBHeaderBytes));
+}
+
+bool AnnexBBufferReader::ReadNalu(const uint8_t** out_nalu,
+ size_t* out_length) {
+ DCHECK(out_nalu);
+ DCHECK(out_length);
+ *out_nalu = nullptr;
+ *out_length = 0;
+
+ size_t data_offset = offset_ + sizeof(kAnnexBHeaderBytes);
+ if (data_offset > length_) {
+ return false;
+ }
+ *out_nalu = start_ + data_offset;
+ *out_length = next_offset_ - data_offset;
+ offset_ = next_offset_;
+ next_offset_ =
+ FindNextNaluHeader(start_, length_, offset_ + sizeof(kAnnexBHeaderBytes));
+ return true;
+}
+
+size_t AnnexBBufferReader::BytesRemaining() const {
+ return length_ - offset_;
+}
+
+size_t AnnexBBufferReader::FindNextNaluHeader(const uint8_t* start,
+ size_t length,
+ size_t offset) const {
+ DCHECK(start);
+ if (offset + sizeof(kAnnexBHeaderBytes) > length) {
+ return length;
+ }
+ // NALUs are separated by an 00 00 00 01 header. Scan the byte stream
+ // starting from the offset for the next such sequence.
+ const uint8_t* current = start + offset;
+ // The loop reads sizeof(kAnnexBHeaderBytes) at a time, so stop when there
+ // aren't enough bytes remaining.
+ const uint8_t* const end = start + length - sizeof(kAnnexBHeaderBytes);
+ while (current < end) {
+ if (current[3] > 1) {
+ current += 4;
+ } else if (current[3] == 1 && current[2] == 0 && current[1] == 0 &&
+ current[0] == 0) {
+ return current - start;
+ } else {
+ ++current;
+ }
+ }
+ return length;
+}
+
+AvccBufferWriter::AvccBufferWriter(uint8_t* const avcc_buffer, size_t length)
+ : start_(avcc_buffer), offset_(0), length_(length) {
+ DCHECK(avcc_buffer);
+}
+
+bool AvccBufferWriter::WriteNalu(const uint8_t* data, size_t data_size) {
+ // Check if we can write this length of data.
+ if (data_size + kAvccHeaderByteSize > BytesRemaining()) {
+ return false;
+ }
+ // Write length header, which needs to be big endian.
+ uint32_t big_endian_length = CFSwapInt32HostToBig(data_size);
+ memcpy(start_ + offset_, &big_endian_length, sizeof(big_endian_length));
+ offset_ += sizeof(big_endian_length);
+ // Write data.
+ memcpy(start_ + offset_, data, data_size);
+ offset_ += data_size;
+ return true;
+}
+
+size_t AvccBufferWriter::BytesRemaining() const {
+ return length_ - offset_;
+}
+
+} // namespace webrtc
+
+#endif // defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h
new file mode 100644
index 0000000..230dea9
--- /dev/null
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_NALU_H
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_NALU_H
+
+#include "webrtc/modules/video_coding/codecs/h264/include/h264.h"
+
+#if defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
+
+#include <CoreMedia/CoreMedia.h>
+
+#include "webrtc/base/buffer.h"
+#include "webrtc/modules/interface/module_common_types.h"
+
+namespace webrtc {
+
+// Converts a sample buffer emitted from the VideoToolbox encoder into a buffer
+// suitable for RTP. The sample buffer is in avcc format whereas the rtp buffer
+// needs to be in Annex B format. Data is written directly to |annexb_buffer|
+// and a new RTPFragmentationHeader is returned in |out_header|.
+bool H264CMSampleBufferToAnnexBBuffer(
+ CMSampleBufferRef avcc_sample_buffer,
+ bool is_keyframe,
+ rtc::Buffer* annexb_buffer,
+ webrtc::RTPFragmentationHeader** out_header);
+
+// Converts a buffer received from RTP into a sample buffer suitable for the
+// VideoToolbox decoder. The RTP buffer is in annex b format whereas the sample
+// buffer is in avcc format.
+// If |is_keyframe| is true then |video_format| is ignored since the format will
+// be read from the buffer. Otherwise |video_format| must be provided.
+// Caller is responsible for releasing the created sample buffer.
+bool H264AnnexBBufferToCMSampleBuffer(
+ const uint8_t* annexb_buffer,
+ size_t annexb_buffer_size,
+ CMVideoFormatDescriptionRef video_format,
+ CMSampleBufferRef* out_sample_buffer);
+
+// Helper class for reading NALUs from an RTP Annex B buffer.
+class AnnexBBufferReader final {
+ public:
+ AnnexBBufferReader(const uint8_t* annexb_buffer, size_t length);
+ ~AnnexBBufferReader() {}
+ AnnexBBufferReader(const AnnexBBufferReader& other) = delete;
+ void operator=(const AnnexBBufferReader& other) = delete;
+
+ // Returns a pointer to the beginning of the next NALU slice without the
+ // header bytes and its length. Returns false if no more slices remain.
+ bool ReadNalu(const uint8_t** out_nalu, size_t* out_length);
+
+ // Returns the number of unread NALU bytes, including the size of the header.
+ // If the buffer has no remaining NALUs this will return zero.
+ size_t BytesRemaining() const;
+
+ private:
+ // Returns the the next offset that contains NALU data.
+ size_t FindNextNaluHeader(const uint8_t* start,
+ size_t length,
+ size_t offset) const;
+
+ const uint8_t* const start_;
+ size_t offset_;
+ size_t next_offset_;
+ const size_t length_;
+};
+
+// Helper class for writing NALUs using avcc format into a buffer.
+class AvccBufferWriter final {
+ public:
+ AvccBufferWriter(uint8_t* const avcc_buffer, size_t length);
+ ~AvccBufferWriter() {}
+ AvccBufferWriter(const AvccBufferWriter& other) = delete;
+ void operator=(const AvccBufferWriter& other) = delete;
+
+ // Writes the data slice into the buffer. Returns false if there isn't
+ // enough space left.
+ bool WriteNalu(const uint8_t* data, size_t data_size);
+
+ // Returns the unused bytes in the buffer.
+ size_t BytesRemaining() const;
+
+ private:
+ uint8_t* const start_;
+ size_t offset_;
+ const size_t length_;
+};
+
+} // namespace webrtc
+
+#endif // defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_NALU_H
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu_unittest.cc b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu_unittest.cc
new file mode 100644
index 0000000..36946f1
--- /dev/null
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu_unittest.cc
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+#include "webrtc/base/arraysize.h"
+#include "webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h"
+
+namespace webrtc {
+
+static const uint8_t NALU_TEST_DATA_0[] = {0xAA, 0xBB, 0xCC};
+static const uint8_t NALU_TEST_DATA_1[] = {0xDE, 0xAD, 0xBE, 0xEF};
+
+TEST(AnnexBBufferReaderTest, TestReadEmptyInput) {
+ const uint8_t annex_b_test_data[] = {0x00};
+ AnnexBBufferReader reader(annex_b_test_data, 0);
+ const uint8_t* nalu = nullptr;
+ size_t nalu_length = 0;
+ EXPECT_EQ(0u, reader.BytesRemaining());
+ EXPECT_FALSE(reader.ReadNalu(&nalu, &nalu_length));
+ EXPECT_EQ(nullptr, nalu);
+ EXPECT_EQ(0u, nalu_length);
+}
+
+TEST(AnnexBBufferReaderTest, TestReadSingleNalu) {
+ const uint8_t annex_b_test_data[] = {0x00, 0x00, 0x00, 0x01, 0xAA};
+ AnnexBBufferReader reader(annex_b_test_data, arraysize(annex_b_test_data));
+ const uint8_t* nalu = nullptr;
+ size_t nalu_length = 0;
+ EXPECT_EQ(arraysize(annex_b_test_data), reader.BytesRemaining());
+ EXPECT_TRUE(reader.ReadNalu(&nalu, &nalu_length));
+ EXPECT_EQ(annex_b_test_data + 4, nalu);
+ EXPECT_EQ(1u, nalu_length);
+ EXPECT_EQ(0u, reader.BytesRemaining());
+ EXPECT_FALSE(reader.ReadNalu(&nalu, &nalu_length));
+ EXPECT_EQ(nullptr, nalu);
+ EXPECT_EQ(0u, nalu_length);
+}
+
+TEST(AnnexBBufferReaderTest, TestReadMissingNalu) {
+ // clang-format off
+ const uint8_t annex_b_test_data[] = {0x01,
+ 0x00, 0x01,
+ 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0xFF};
+ // clang-format on
+ AnnexBBufferReader reader(annex_b_test_data, arraysize(annex_b_test_data));
+ const uint8_t* nalu = nullptr;
+ size_t nalu_length = 0;
+ EXPECT_EQ(0u, reader.BytesRemaining());
+ EXPECT_FALSE(reader.ReadNalu(&nalu, &nalu_length));
+ EXPECT_EQ(nullptr, nalu);
+ EXPECT_EQ(0u, nalu_length);
+}
+
+TEST(AnnexBBufferReaderTest, TestReadMultipleNalus) {
+ // clang-format off
+ const uint8_t annex_b_test_data[] = {0x00, 0x00, 0x00, 0x01, 0xFF,
+ 0x01,
+ 0x00, 0x01,
+ 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0xFF,
+ 0x00, 0x00, 0x00, 0x01, 0xAA, 0xBB};
+ // clang-format on
+ AnnexBBufferReader reader(annex_b_test_data, arraysize(annex_b_test_data));
+ const uint8_t* nalu = nullptr;
+ size_t nalu_length = 0;
+ EXPECT_EQ(arraysize(annex_b_test_data), reader.BytesRemaining());
+ EXPECT_TRUE(reader.ReadNalu(&nalu, &nalu_length));
+ EXPECT_EQ(annex_b_test_data + 4, nalu);
+ EXPECT_EQ(11u, nalu_length);
+ EXPECT_EQ(6u, reader.BytesRemaining());
+ EXPECT_TRUE(reader.ReadNalu(&nalu, &nalu_length));
+ EXPECT_EQ(annex_b_test_data + 19, nalu);
+ EXPECT_EQ(2u, nalu_length);
+ EXPECT_EQ(0u, reader.BytesRemaining());
+ EXPECT_FALSE(reader.ReadNalu(&nalu, &nalu_length));
+ EXPECT_EQ(nullptr, nalu);
+ EXPECT_EQ(0u, nalu_length);
+}
+
+TEST(AvccBufferWriterTest, TestEmptyOutputBuffer) {
+ const uint8_t expected_buffer[] = {0x00};
+ const size_t buffer_size = 1;
+ rtc::scoped_ptr<uint8_t[]> buffer(new uint8_t[buffer_size]);
+ memset(buffer.get(), 0, buffer_size);
+ AvccBufferWriter writer(buffer.get(), 0);
+ EXPECT_EQ(0u, writer.BytesRemaining());
+ EXPECT_FALSE(writer.WriteNalu(NALU_TEST_DATA_0, arraysize(NALU_TEST_DATA_0)));
+ EXPECT_EQ(0,
+ memcmp(expected_buffer, buffer.get(), arraysize(expected_buffer)));
+}
+
+TEST(AvccBufferWriterTest, TestWriteSingleNalu) {
+ const uint8_t expected_buffer[] = {
+ 0x00, 0x00, 0x00, 0x03, 0xAA, 0xBB, 0xCC,
+ };
+ const size_t buffer_size = arraysize(NALU_TEST_DATA_0) + 4;
+ rtc::scoped_ptr<uint8_t[]> buffer(new uint8_t[buffer_size]);
+ AvccBufferWriter writer(buffer.get(), buffer_size);
+ EXPECT_EQ(buffer_size, writer.BytesRemaining());
+ EXPECT_TRUE(writer.WriteNalu(NALU_TEST_DATA_0, arraysize(NALU_TEST_DATA_0)));
+ EXPECT_EQ(0u, writer.BytesRemaining());
+ EXPECT_FALSE(writer.WriteNalu(NALU_TEST_DATA_1, arraysize(NALU_TEST_DATA_1)));
+ EXPECT_EQ(0,
+ memcmp(expected_buffer, buffer.get(), arraysize(expected_buffer)));
+}
+
+TEST(AvccBufferWriterTest, TestWriteMultipleNalus) {
+ // clang-format off
+ const uint8_t expected_buffer[] = {
+ 0x00, 0x00, 0x00, 0x03, 0xAA, 0xBB, 0xCC,
+ 0x00, 0x00, 0x00, 0x04, 0xDE, 0xAD, 0xBE, 0xEF
+ };
+ // clang-format on
+ const size_t buffer_size =
+ arraysize(NALU_TEST_DATA_0) + arraysize(NALU_TEST_DATA_1) + 8;
+ rtc::scoped_ptr<uint8_t[]> buffer(new uint8_t[buffer_size]);
+ AvccBufferWriter writer(buffer.get(), buffer_size);
+ EXPECT_EQ(buffer_size, writer.BytesRemaining());
+ EXPECT_TRUE(writer.WriteNalu(NALU_TEST_DATA_0, arraysize(NALU_TEST_DATA_0)));
+ EXPECT_EQ(buffer_size - (arraysize(NALU_TEST_DATA_0) + 4),
+ writer.BytesRemaining());
+ EXPECT_TRUE(writer.WriteNalu(NALU_TEST_DATA_1, arraysize(NALU_TEST_DATA_1)));
+ EXPECT_EQ(0u, writer.BytesRemaining());
+ EXPECT_EQ(0,
+ memcmp(expected_buffer, buffer.get(), arraysize(expected_buffer)));
+}
+
+TEST(AvccBufferWriterTest, TestOverflow) {
+ const uint8_t expected_buffer[] = {0x00, 0x00, 0x00};
+ const size_t buffer_size = arraysize(NALU_TEST_DATA_0);
+ rtc::scoped_ptr<uint8_t[]> buffer(new uint8_t[buffer_size]);
+ memset(buffer.get(), 0, buffer_size);
+ AvccBufferWriter writer(buffer.get(), buffer_size);
+ EXPECT_EQ(buffer_size, writer.BytesRemaining());
+ EXPECT_FALSE(writer.WriteNalu(NALU_TEST_DATA_0, arraysize(NALU_TEST_DATA_0)));
+ EXPECT_EQ(buffer_size, writer.BytesRemaining());
+ EXPECT_EQ(0,
+ memcmp(expected_buffer, buffer.get(), arraysize(expected_buffer)));
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/codecs/h264/include/h264.h b/webrtc/modules/video_coding/codecs/h264/include/h264.h
new file mode 100644
index 0000000..3f52839
--- /dev/null
+++ b/webrtc/modules/video_coding/codecs/h264/include/h264.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_INCLUDE_H264_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_INCLUDE_H264_H_
+
+#if defined(WEBRTC_IOS) || defined(WEBRTC_MAC)
+
+#include <Availability.h>
+#if (defined(__IPHONE_8_0) && \
+ __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_8_0) || \
+ (defined(__MAC_10_8) && __MAC_OS_X_VERSION_MAX_ALLOWED >= __MAC_10_8)
+#define WEBRTC_VIDEO_TOOLBOX_SUPPORTED 1
+#endif
+
+#endif // defined(WEBRTC_IOS) || defined(WEBRTC_MAC)
+
+#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+
+namespace webrtc {
+
+class H264Encoder : public VideoEncoder {
+ public:
+ static H264Encoder* Create();
+ static bool IsSupported();
+
+ ~H264Encoder() override {}
+};
+
+class H264Decoder : public VideoDecoder {
+ public:
+ static H264Decoder* Create();
+ static bool IsSupported();
+
+ ~H264Decoder() override {}
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_INCLUDE_H264_H_
diff --git a/webrtc/modules/video_coding/main/source/codec_database.cc b/webrtc/modules/video_coding/main/source/codec_database.cc
index 3d887c3..49a018b 100644
--- a/webrtc/modules/video_coding/main/source/codec_database.cc
+++ b/webrtc/modules/video_coding/main/source/codec_database.cc
@@ -14,6 +14,9 @@
#include "webrtc/base/checks.h"
#include "webrtc/engine_configurations.h"
+#ifdef VIDEOCODEC_H264
+#include "webrtc/modules/video_coding/codecs/h264/include/h264.h"
+#endif
#ifdef VIDEOCODEC_I420
#include "webrtc/modules/video_coding/codecs/i420/main/interface/i420.h"
#endif
@@ -661,10 +664,20 @@
return new VCMGenericEncoder(new I420Encoder(), encoder_rate_observer_,
false);
#endif
+#ifdef VIDEOCODEC_H264
+ case kVideoCodecH264:
+ if (H264Encoder::IsSupported()) {
+ return new VCMGenericEncoder(H264Encoder::Create(),
+ encoder_rate_observer_,
+ false);
+ }
+ break;
+#endif
default:
- LOG(LS_WARNING) << "No internal encoder of this type exists.";
- return NULL;
+ break;
}
+ LOG(LS_WARNING) << "No internal encoder of this type exists.";
+ return NULL;
}
void VCMCodecDataBase::DeleteEncoder() {
@@ -691,10 +704,18 @@
case kVideoCodecI420:
return new VCMGenericDecoder(*(new I420Decoder));
#endif
+#ifdef VIDEOCODEC_H264
+ case kVideoCodecH264:
+ if (H264Decoder::IsSupported()) {
+ return new VCMGenericDecoder(*(H264Decoder::Create()));
+ }
+ break;
+#endif
default:
- LOG(LS_WARNING) << "No internal decoder of this type exists.";
- return NULL;
+ break;
}
+ LOG(LS_WARNING) << "No internal decoder of this type exists.";
+ return NULL;
}
const VCMDecoderMapItem* VCMCodecDataBase::FindDecoderItem(
diff --git a/webrtc/modules/video_coding/video_coding.gypi b/webrtc/modules/video_coding/video_coding.gypi
index fd9d37d..b292e0a 100644
--- a/webrtc/modules/video_coding/video_coding.gypi
+++ b/webrtc/modules/video_coding/video_coding.gypi
@@ -12,6 +12,7 @@
'target_name': 'webrtc_video_coding',
'type': 'static_library',
'dependencies': [
+ 'webrtc_h264',
'webrtc_i420',
'<(webrtc_root)/common_video/common_video.gyp:common_video',
'<(webrtc_root)/modules/video_coding/utility/video_coding_utility.gyp:video_coding_utility',
diff --git a/webrtc/video/video_decoder.cc b/webrtc/video/video_decoder.cc
index 9dde1ae..0a5df7d 100644
--- a/webrtc/video/video_decoder.cc
+++ b/webrtc/video/video_decoder.cc
@@ -11,6 +11,7 @@
#include "webrtc/video_decoder.h"
#include "webrtc/base/checks.h"
+#include "webrtc/modules/video_coding/codecs/h264/include/h264.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
#include "webrtc/modules/video_coding/codecs/vp9/include/vp9.h"
#include "webrtc/system_wrappers/interface/logging.h"
@@ -18,6 +19,9 @@
namespace webrtc {
VideoDecoder* VideoDecoder::Create(VideoDecoder::DecoderType codec_type) {
switch (codec_type) {
+ case kH264:
+ DCHECK(H264Decoder::IsSupported());
+ return H264Decoder::Create();
case kVp8:
return VP8Decoder::Create();
case kVp9:
@@ -32,6 +36,8 @@
VideoDecoder::DecoderType CodecTypeToDecoderType(VideoCodecType codec_type) {
switch (codec_type) {
+ case kVideoCodecH264:
+ return VideoDecoder::kH264;
case kVideoCodecVP8:
return VideoDecoder::kVp8;
case kVideoCodecVP9:
diff --git a/webrtc/video/video_encoder.cc b/webrtc/video/video_encoder.cc
index 381b776..fd213f8 100644
--- a/webrtc/video/video_encoder.cc
+++ b/webrtc/video/video_encoder.cc
@@ -11,6 +11,7 @@
#include "webrtc/video_encoder.h"
#include "webrtc/base/checks.h"
+#include "webrtc/modules/video_coding/codecs/h264/include/h264.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
#include "webrtc/modules/video_coding/codecs/vp9/include/vp9.h"
#include "webrtc/system_wrappers/interface/logging.h"
@@ -18,6 +19,9 @@
namespace webrtc {
VideoEncoder* VideoEncoder::Create(VideoEncoder::EncoderType codec_type) {
switch (codec_type) {
+ case kH264:
+ DCHECK(H264Encoder::IsSupported());
+ return H264Encoder::Create();
case kVp8:
return VP8Encoder::Create();
case kVp9:
@@ -32,6 +36,8 @@
VideoEncoder::EncoderType CodecToEncoderType(VideoCodecType codec_type) {
switch (codec_type) {
+ case kVideoCodecH264:
+ return VideoEncoder::kH264;
case kVideoCodecVP8:
return VideoEncoder::kVp8;
case kVideoCodecVP9:
diff --git a/webrtc/video_decoder.h b/webrtc/video_decoder.h
index da3d982..2822677 100644
--- a/webrtc/video_decoder.h
+++ b/webrtc/video_decoder.h
@@ -39,6 +39,7 @@
class VideoDecoder {
public:
enum DecoderType {
+ kH264,
kVp8,
kVp9,
kUnsupportedCodec,
diff --git a/webrtc/video_encoder.h b/webrtc/video_encoder.h
index 87cbb98..776b22b 100644
--- a/webrtc/video_encoder.h
+++ b/webrtc/video_encoder.h
@@ -37,6 +37,7 @@
class VideoEncoder {
public:
enum EncoderType {
+ kH264,
kVp8,
kVp9,
kUnsupportedCodec,