Add sender controlled playout delay limits
This CL adds support for an extension on RTP frames to allow the sender
to specify the minimum and maximum playout delay limits.
The receiver makes a best-effort attempt to keep the capture-to-render delay
within this range. This allows different types of application to specify
different end-to-end delay goals. For example gaming can support rendering
of frames as soon as received on receiver to minimize delay. A movie playback
application can specify a minimum playout delay to allow fixed buffering
in presence of network jitter.
There are no tests at this time and most of testing is done with chromium
webrtc prototype.
On chromoting performance tests, this extension helps bring down end-to-end
delay by about 150 ms on small frames.
BUG=webrtc:5895
Review-Url: https://codereview.webrtc.org/2007743003
Cr-Original-Commit-Position: refs/heads/master@{#13059}
Cr-Mirrored-From: https://chromium.googlesource.com/external/webrtc
Cr-Mirrored-Commit: 6b4b5f37704cb414420d813eb39b90b25f5b2d6c
diff --git a/common_types.cc b/common_types.cc
index 7b99f3c..d08630b 100644
--- a/common_types.cc
+++ b/common_types.cc
@@ -31,8 +31,7 @@
voiceActivity(false),
audioLevel(0),
hasVideoRotation(false),
- videoRotation(0) {
-}
+ videoRotation(0) {}
RTPHeader::RTPHeader()
: markerBit(false),
diff --git a/common_types.h b/common_types.h
index 587b90d..02633ba 100644
--- a/common_types.h
+++ b/common_types.h
@@ -751,6 +751,24 @@
// If unknown, this value will be set to zero.
};
+// Minimum and maximum playout delay values from capture to render.
+// These are best effort values.
+//
+// A value < 0 indicates no change from previous valid value.
+//
+// min = max = 0 indicates that the receiver should try and render
+// frame as soon as possible.
+//
+// min = x, max = y indicates that the receiver is free to adapt
+// in the range (x, y) based on network jitter.
+//
+// Note: Given that this gets embedded in a union, it is up-to the owner to
+// initialize these values.
+struct PlayoutDelay {
+ int min_ms;
+ int max_ms;
+};
+
struct RTPHeaderExtension {
RTPHeaderExtension();
@@ -772,6 +790,8 @@
// ts_126114v120700p.pdf
bool hasVideoRotation;
uint8_t videoRotation;
+
+ PlayoutDelay playout_delay = {-1, -1};
};
struct RTPHeader {
diff --git a/config.cc b/config.cc
index e9c56da..99146eb 100644
--- a/config.cc
+++ b/config.cc
@@ -49,6 +49,14 @@
"http://www.ietf.org/id/draft-holmer-rmcat-transport-wide-cc-extensions-01";
const int RtpExtension::kTransportSequenceNumberDefaultId = 5;
+// This extension allows applications to adaptively limit the playout delay
+// on frames as per the current needs. For example, a gaming application
+// has very different needs on end-to-end delay compared to a video-conference
+// application.
+const char* RtpExtension::kPlayoutDelayUri =
+ "http://www.webrtc.org/experiments/rtp-hdrext/playout-delay";
+const int RtpExtension::kPlayoutDelayDefaultId = 6;
+
bool RtpExtension::IsSupportedForAudio(const std::string& uri) {
return uri == webrtc::RtpExtension::kAbsSendTimeUri ||
uri == webrtc::RtpExtension::kAudioLevelUri ||
@@ -59,7 +67,8 @@
return uri == webrtc::RtpExtension::kTimestampOffsetUri ||
uri == webrtc::RtpExtension::kAbsSendTimeUri ||
uri == webrtc::RtpExtension::kVideoRotationUri ||
- uri == webrtc::RtpExtension::kTransportSequenceNumberUri;
+ uri == webrtc::RtpExtension::kTransportSequenceNumberUri ||
+ uri == webrtc::RtpExtension::kPlayoutDelayUri;
}
VideoStream::VideoStream()
diff --git a/config.h b/config.h
index d5fd1fe..083e778 100644
--- a/config.h
+++ b/config.h
@@ -87,6 +87,9 @@
static const char* kTransportSequenceNumberUri;
static const int kTransportSequenceNumberDefaultId;
+ static const char* kPlayoutDelayUri;
+ static const int kPlayoutDelayDefaultId;
+
std::string uri;
int id;
};
diff --git a/media/engine/webrtcvideoengine2.cc b/media/engine/webrtcvideoengine2.cc
index c6e9968..f327491 100644
--- a/media/engine/webrtcvideoengine2.cc
+++ b/media/engine/webrtcvideoengine2.cc
@@ -564,6 +564,9 @@
webrtc::RtpExtension::kTransportSequenceNumberUri,
webrtc::RtpExtension::kTransportSequenceNumberDefaultId));
}
+ capabilities.header_extensions.push_back(
+ webrtc::RtpExtension(webrtc::RtpExtension::kPlayoutDelayUri,
+ webrtc::RtpExtension::kPlayoutDelayDefaultId));
return capabilities;
}
diff --git a/modules/include/module_common_types.h b/modules/include/module_common_types.h
index 3572cd6..89144af 100644
--- a/modules/include/module_common_types.h
+++ b/modules/include/module_common_types.h
@@ -289,6 +289,8 @@
uint16_t height;
VideoRotation rotation;
+ PlayoutDelay playout_delay;
+
bool isFirstPacket; // first packet in frame
uint8_t simulcastIdx; // Index if the simulcast encoder creating
// this frame, 0 if not using simulcast.
diff --git a/modules/modules.gyp b/modules/modules.gyp
index d29932f..08c81b5 100644
--- a/modules/modules.gyp
+++ b/modules/modules.gyp
@@ -299,6 +299,7 @@
'rtp_rtcp/source/nack_rtx_unittest.cc',
'rtp_rtcp/source/packet_loss_stats_unittest.cc',
'rtp_rtcp/source/producer_fec_unittest.cc',
+ 'rtp_rtcp/source/playout_delay_oracle_unittest.cc',
'rtp_rtcp/source/receive_statistics_unittest.cc',
'rtp_rtcp/source/remote_ntp_time_estimator_unittest.cc',
'rtp_rtcp/source/rtcp_format_remb_unittest.cc',
diff --git a/modules/rtp_rtcp/BUILD.gn b/modules/rtp_rtcp/BUILD.gn
index df6f92e..77cc175 100644
--- a/modules/rtp_rtcp/BUILD.gn
+++ b/modules/rtp_rtcp/BUILD.gn
@@ -35,6 +35,8 @@
"source/mock/mock_rtp_payload_strategy.h",
"source/packet_loss_stats.cc",
"source/packet_loss_stats.h",
+ "source/playout_delay_oracle.cc",
+ "source/playout_delay_oracle.h",
"source/producer_fec.cc",
"source/producer_fec.h",
"source/receive_statistics_impl.cc",
diff --git a/modules/rtp_rtcp/include/rtp_rtcp_defines.h b/modules/rtp_rtcp/include/rtp_rtcp_defines.h
index 79e7677..8ba5e95 100644
--- a/modules/rtp_rtcp/include/rtp_rtcp_defines.h
+++ b/modules/rtp_rtcp/include/rtp_rtcp_defines.h
@@ -67,6 +67,7 @@
kRtpExtensionAbsoluteSendTime,
kRtpExtensionVideoRotation,
kRtpExtensionTransportSequenceNumber,
+ kRtpExtensionPlayoutDelay,
};
enum RTCPAppSubTypes { kAppSubtypeBwe = 0x00 };
diff --git a/modules/rtp_rtcp/rtp_rtcp.gypi b/modules/rtp_rtcp/rtp_rtcp.gypi
index 1c89e54..0c8477f 100644
--- a/modules/rtp_rtcp/rtp_rtcp.gypi
+++ b/modules/rtp_rtcp/rtp_rtcp.gypi
@@ -33,6 +33,8 @@
'source/fec_receiver_impl.h',
'source/packet_loss_stats.cc',
'source/packet_loss_stats.h',
+ 'source/playout_delay_oracle.cc',
+ 'source/playout_delay_oracle.h',
'source/receive_statistics_impl.cc',
'source/receive_statistics_impl.h',
'source/remote_ntp_time_estimator.cc',
diff --git a/modules/rtp_rtcp/source/playout_delay_oracle.cc b/modules/rtp_rtcp/source/playout_delay_oracle.cc
new file mode 100644
index 0000000..ed3af3f
--- /dev/null
+++ b/modules/rtp_rtcp/source/playout_delay_oracle.cc
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/playout_delay_oracle.h"
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/source/rtp_header_extension.h"
+
+namespace webrtc {
+
+PlayoutDelayOracle::PlayoutDelayOracle()
+ : high_sequence_number_(0),
+ send_playout_delay_(false),
+ ssrc_(0),
+ min_playout_delay_ms_(-1),
+ max_playout_delay_ms_(-1) {
+ thread_checker_.DetachFromThread();
+}
+PlayoutDelayOracle::~PlayoutDelayOracle() {}
+
+void PlayoutDelayOracle::UpdateRequest(uint32_t ssrc,
+ PlayoutDelay playout_delay,
+ uint16_t seq_num) {
+ rtc::CritScope lock(&crit_sect_);
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ RTC_DCHECK_LE(playout_delay.min_ms, kPlayoutDelayMaxMs);
+ RTC_DCHECK_LE(playout_delay.max_ms, kPlayoutDelayMaxMs);
+ RTC_DCHECK_LE(playout_delay.min_ms, playout_delay.max_ms);
+ int64_t unwrapped_seq_num = unwrapper_.Unwrap(seq_num);
+ if (playout_delay.min_ms >= 0 &&
+ playout_delay.min_ms != min_playout_delay_ms_) {
+ send_playout_delay_ = true;
+ min_playout_delay_ms_ = playout_delay.min_ms;
+ high_sequence_number_ = unwrapped_seq_num;
+ }
+
+ if (playout_delay.max_ms >= 0 &&
+ playout_delay.max_ms != max_playout_delay_ms_) {
+ send_playout_delay_ = true;
+ max_playout_delay_ms_ = playout_delay.max_ms;
+ high_sequence_number_ = unwrapped_seq_num;
+ }
+ ssrc_ = ssrc;
+}
+
+// If an ACK is received on the packet containing the playout delay extension,
+// we stop sending the extension on future packets.
+void PlayoutDelayOracle::OnReceivedRtcpReportBlocks(
+ const ReportBlockList& report_blocks) {
+ rtc::CritScope lock(&crit_sect_);
+ for (const RTCPReportBlock& report_block : report_blocks) {
+ if ((ssrc_ == report_block.sourceSSRC) && send_playout_delay_ &&
+ (report_block.extendedHighSeqNum > high_sequence_number_)) {
+ send_playout_delay_ = false;
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/modules/rtp_rtcp/source/playout_delay_oracle.h b/modules/rtp_rtcp/source/playout_delay_oracle.h
new file mode 100644
index 0000000..5261415
--- /dev/null
+++ b/modules/rtp_rtcp/source/playout_delay_oracle.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_PLAYOUT_DELAY_ORACLE_H_
+#define WEBRTC_MODULES_RTP_RTCP_SOURCE_PLAYOUT_DELAY_ORACLE_H_
+
+#include "webrtc/base/basictypes.h"
+#include "webrtc/base/criticalsection.h"
+#include "webrtc/base/thread_checker.h"
+#include "webrtc/base/thread_annotations.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+
+namespace webrtc {
+
+// This class tracks the application requests to limit minimum and maximum
+// playout delay and makes a decision on whether the current RTP frame
+// should include the playout out delay extension header.
+//
+// Playout delay can be defined in terms of capture and render time as follows:
+//
+// Render time = Capture time in receiver time + playout delay
+//
+// The application specifies a minimum and maximum limit for the playout delay
+// which are both communicated to the receiver and the receiver can adapt
+// the playout delay within this range based on observed network jitter.
+class PlayoutDelayOracle {
+ public:
+ PlayoutDelayOracle();
+ ~PlayoutDelayOracle();
+
+ // Returns true if the current frame should include the playout delay
+ // extension
+ bool send_playout_delay() const {
+ rtc::CritScope lock(&crit_sect_);
+ return send_playout_delay_;
+ }
+
+ // Returns current minimum playout delay in milliseconds.
+ int min_playout_delay_ms() const {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return min_playout_delay_ms_;
+ }
+
+ // Returns current maximum playout delay in milliseconds.
+ int max_playout_delay_ms() const {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return max_playout_delay_ms_;
+ }
+
+ // Updates the application requested playout delay, current ssrc
+ // and the current sequence number.
+ void UpdateRequest(uint32_t ssrc,
+ PlayoutDelay playout_delay,
+ uint16_t seq_num);
+
+ void OnReceivedRtcpReportBlocks(const ReportBlockList& report_blocks);
+
+ private:
+ // The playout delay information is updated from the encoder thread or
+ // a thread controlled by application in case of external encoder.
+ // The sequence number feedback is updated from the worker thread.
+ // Guards access to data across the two threads.
+ rtc::CriticalSection crit_sect_;
+ // The current highest sequence number on which playout delay has been sent.
+ int64_t high_sequence_number_ GUARDED_BY(crit_sect_);
+ // Indicates whether the playout delay should go on the next frame.
+ bool send_playout_delay_ GUARDED_BY(crit_sect_);
+ // Sender ssrc.
+ uint32_t ssrc_ GUARDED_BY(crit_sect_);
+
+ // Data in this section is accessed on the sending/encoder thread alone.
+ rtc::ThreadChecker thread_checker_;
+ // Sequence number unwrapper.
+ SequenceNumberUnwrapper unwrapper_ ACCESS_ON(thread_checker_);
+ // Min playout delay value on the next frame if |send_playout_delay_| is set.
+ int min_playout_delay_ms_ ACCESS_ON(thread_checker_);
+ // Max playout delay value on the next frame if |send_playout_delay_| is set.
+ int max_playout_delay_ms_ ACCESS_ON(thread_checker_);
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(PlayoutDelayOracle);
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_PLAYOUT_DELAY_ORACLE_H_
diff --git a/modules/rtp_rtcp/source/playout_delay_oracle_unittest.cc b/modules/rtp_rtcp/source/playout_delay_oracle_unittest.cc
new file mode 100644
index 0000000..efcb451
--- /dev/null
+++ b/modules/rtp_rtcp/source/playout_delay_oracle_unittest.cc
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/playout_delay_oracle.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/logging.h"
+
+namespace webrtc {
+
+namespace {
+constexpr int kSsrc = 100;
+constexpr int kSequenceNumber = 100;
+constexpr int kMinPlayoutDelay = 0;
+constexpr int kMaxPlayoutDelay = 150;
+} // namespace
+
+class PlayoutDelayOracleTest : public ::testing::Test {
+ protected:
+ void ReportRTCPFeedback(int ssrc, int seq_num) {
+ RTCPReportBlock report_block;
+ report_block.sourceSSRC = ssrc;
+ report_block.extendedHighSeqNum = seq_num;
+ report_blocks_.push_back(report_block);
+ playout_delay_oracle_.OnReceivedRtcpReportBlocks(report_blocks_);
+ }
+
+ ReportBlockList report_blocks_;
+ PlayoutDelayOracle playout_delay_oracle_;
+};
+
+TEST_F(PlayoutDelayOracleTest, DisabledByDefault) {
+ EXPECT_FALSE(playout_delay_oracle_.send_playout_delay());
+ EXPECT_EQ(playout_delay_oracle_.min_playout_delay_ms(), -1);
+ EXPECT_EQ(playout_delay_oracle_.max_playout_delay_ms(), -1);
+}
+
+TEST_F(PlayoutDelayOracleTest, SendPlayoutDelayUntilSeqNumberExceeds) {
+ PlayoutDelay playout_delay = {kMinPlayoutDelay, kMaxPlayoutDelay};
+ playout_delay_oracle_.UpdateRequest(kSsrc, playout_delay, kSequenceNumber);
+ EXPECT_TRUE(playout_delay_oracle_.send_playout_delay());
+ EXPECT_EQ(playout_delay_oracle_.min_playout_delay_ms(), kMinPlayoutDelay);
+ EXPECT_EQ(playout_delay_oracle_.max_playout_delay_ms(), kMaxPlayoutDelay);
+
+ // Oracle indicates playout delay should be sent if highest sequence number
+ // acked is lower than the sequence number of the first packet containing
+ // playout delay.
+ ReportRTCPFeedback(kSsrc, kSequenceNumber - 1);
+ EXPECT_TRUE(playout_delay_oracle_.send_playout_delay());
+
+ // An invalid ssrc feedback report is dropped by the oracle.
+ ReportRTCPFeedback(kSsrc + 1, kSequenceNumber + 1);
+ EXPECT_TRUE(playout_delay_oracle_.send_playout_delay());
+
+ // Oracle indicates playout delay should not be sent if sequence number
+ // acked on a matching ssrc indicates the receiver has received the playout
+ // delay values.
+ ReportRTCPFeedback(kSsrc, kSequenceNumber + 1);
+ EXPECT_FALSE(playout_delay_oracle_.send_playout_delay());
+}
+
+} // namespace webrtc
diff --git a/modules/rtp_rtcp/source/rtcp_receiver.cc b/modules/rtp_rtcp/source/rtcp_receiver.cc
index 34b2ce3..ae1f875 100644
--- a/modules/rtp_rtcp/source/rtcp_receiver.cc
+++ b/modules/rtp_rtcp/source/rtcp_receiver.cc
@@ -1356,6 +1356,11 @@
now);
}
}
+ if ((rtcpPacketInformation.rtcpPacketTypeFlags & kRtcpSr) ||
+ (rtcpPacketInformation.rtcpPacketTypeFlags & kRtcpRr)) {
+ _rtpRtcp.OnReceivedRtcpReportBlocks(rtcpPacketInformation.report_blocks);
+ }
+
if (_cbTransportFeedbackObserver &&
(rtcpPacketInformation.rtcpPacketTypeFlags & kRtcpTransportFeedback)) {
uint32_t media_source_ssrc =
diff --git a/modules/rtp_rtcp/source/rtp_header_extension.h b/modules/rtp_rtcp/source/rtp_header_extension.h
index beaf989..f7df4bb 100644
--- a/modules/rtp_rtcp/source/rtp_header_extension.h
+++ b/modules/rtp_rtcp/source/rtp_header_extension.h
@@ -26,6 +26,14 @@
const size_t kAbsoluteSendTimeLength = 4;
const size_t kVideoRotationLength = 2;
const size_t kTransportSequenceNumberLength = 3;
+const size_t kPlayoutDelayLength = 4;
+
+// Playout delay in milliseconds. A playout delay limit (min or max)
+// has 12 bits allocated. This allows a range of 0-4095 values which translates
+// to a range of 0-40950 in milliseconds.
+const int kPlayoutDelayGranularityMs = 10;
+// Maximum playout delay value in milliseconds.
+const int kPlayoutDelayMaxMs = 40950;
struct HeaderExtension {
explicit HeaderExtension(RTPExtensionType extension_type)
@@ -58,6 +66,9 @@
case kRtpExtensionTransportSequenceNumber:
length = kTransportSequenceNumberLength;
break;
+ case kRtpExtensionPlayoutDelay:
+ length = kPlayoutDelayLength;
+ break;
default:
assert(false);
}
@@ -79,10 +90,9 @@
int32_t Register(const RTPExtensionType type, const uint8_t id);
- // Active is a concept for a registered rtp header extension which doesn't
- // take effect yet until being activated. Inactive RTP header extensions do
- // not take effect and should not be included in size calculations until they
- // are activated.
+ // Active on an extension indicates whether it is currently being added on
+ // on the RTP packets. The active/inactive status on an extension can change
+ // dynamically depending on the need to convey new information.
int32_t RegisterInactive(const RTPExtensionType type, const uint8_t id);
bool SetActive(const RTPExtensionType type, bool active);
diff --git a/modules/rtp_rtcp/source/rtp_receiver_video.cc b/modules/rtp_rtcp/source/rtp_receiver_video.cc
index 9d76c1a..7667c46 100644
--- a/modules/rtp_rtcp/source/rtp_receiver_video.cc
+++ b/modules/rtp_rtcp/source/rtp_receiver_video.cc
@@ -98,6 +98,9 @@
rtp_header->header.extension.videoRotation);
}
+ rtp_header->type.Video.playout_delay =
+ rtp_header->header.extension.playout_delay;
+
return data_callback_->OnReceivedPayloadData(parsed_payload.payload,
parsed_payload.payload_length,
rtp_header) == 0
diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl.cc b/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
index bd36a52..be8ab34 100644
--- a/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
+++ b/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
@@ -39,6 +39,8 @@
return kRtpExtensionVideoRotation;
if (extension == RtpExtension::kTransportSequenceNumberUri)
return kRtpExtensionTransportSequenceNumber;
+ if (extension == RtpExtension::kPlayoutDelayUri)
+ return kRtpExtensionPlayoutDelay;
RTC_NOTREACHED() << "Looking up unsupported RTP extension.";
return kRtpExtensionNone;
}
@@ -924,6 +926,11 @@
rtp_sender_.OnReceivedNACK(nack_sequence_numbers, rtt);
}
+void ModuleRtpRtcpImpl::OnReceivedRtcpReportBlocks(
+ const ReportBlockList& report_blocks) {
+ rtp_sender_.OnReceivedRtcpReportBlocks(report_blocks);
+}
+
bool ModuleRtpRtcpImpl::LastReceivedNTP(
uint32_t* rtcp_arrival_time_secs, // When we got the last report.
uint32_t* rtcp_arrival_time_frac,
diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl.h b/modules/rtp_rtcp/source/rtp_rtcp_impl.h
index cb47cc7..369cdca 100644
--- a/modules/rtp_rtcp/source/rtp_rtcp_impl.h
+++ b/modules/rtp_rtcp/source/rtp_rtcp_impl.h
@@ -19,6 +19,7 @@
#include "webrtc/base/criticalsection.h"
#include "webrtc/base/gtest_prod_util.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/packet_loss_stats.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_receiver.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_sender.h"
@@ -319,6 +320,7 @@
const override;
void OnReceivedNACK(const std::list<uint16_t>& nack_sequence_numbers);
+ void OnReceivedRtcpReportBlocks(const ReportBlockList& report_blocks);
void OnRequestSendReport();
diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc b/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
index 7e0ac31..98269cf 100644
--- a/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
+++ b/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
@@ -195,13 +195,9 @@
void SendFrame(const RtpRtcpModule* module, uint8_t tid) {
RTPVideoHeaderVP8 vp8_header = {};
vp8_header.temporalIdx = tid;
- RTPVideoHeader rtp_video_header = {codec_.width,
- codec_.height,
- kVideoRotation_0,
- true,
- 0,
- kRtpVideoVp8,
- {vp8_header}};
+ RTPVideoHeader rtp_video_header = {
+ codec_.width, codec_.height, kVideoRotation_0, {-1, -1}, true, 0,
+ kRtpVideoVp8, {vp8_header}};
const uint8_t payload[100] = {0};
EXPECT_EQ(0, module->impl_->SendOutgoingData(kVideoFrameKey,
diff --git a/modules/rtp_rtcp/source/rtp_sender.cc b/modules/rtp_rtcp/source/rtp_sender.cc
index 016a846..fe0e49f 100644
--- a/modules/rtp_rtcp/source/rtp_sender.cc
+++ b/modules/rtp_rtcp/source/rtp_sender.cc
@@ -22,6 +22,7 @@
#include "webrtc/call/rtc_event_log.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_cvo.h"
#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
+#include "webrtc/modules/rtp_rtcp/source/playout_delay_oracle.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_sender_audio.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_sender_video.h"
#include "webrtc/modules/rtp_rtcp/source/time_util.h"
@@ -137,12 +138,13 @@
transmission_time_offset_(0),
absolute_send_time_(0),
rotation_(kVideoRotation_0),
- cvo_mode_(kCVONone),
+ video_rotation_active_(false),
transport_sequence_number_(0),
// NACK.
nack_byte_count_times_(),
nack_byte_count_(),
nack_bitrate_(clock, bitrates_.retransmit_bitrate_observer()),
+ playout_delay_active_(false),
packet_history_(clock),
// Statistics
rtp_stats_callback_(NULL),
@@ -271,11 +273,23 @@
int32_t RTPSender::RegisterRtpHeaderExtension(RTPExtensionType type,
uint8_t id) {
rtc::CritScope lock(&send_critsect_);
- if (type == kRtpExtensionVideoRotation) {
- cvo_mode_ = kCVOInactive;
- return rtp_header_extension_map_.RegisterInactive(type, id);
+ switch (type) {
+ case kRtpExtensionVideoRotation:
+ video_rotation_active_ = false;
+ return rtp_header_extension_map_.RegisterInactive(type, id);
+ case kRtpExtensionPlayoutDelay:
+ playout_delay_active_ = false;
+ return rtp_header_extension_map_.RegisterInactive(type, id);
+ case kRtpExtensionTransmissionTimeOffset:
+ case kRtpExtensionAbsoluteSendTime:
+ case kRtpExtensionAudioLevel:
+ case kRtpExtensionTransportSequenceNumber:
+ return rtp_header_extension_map_.Register(type, id);
+ case kRtpExtensionNone:
+ LOG(LS_ERROR) << "Invalid RTP extension type for registration";
+ return -1;
}
- return rtp_header_extension_map_.Register(type, id);
+ return -1;
}
bool RTPSender::IsRtpHeaderExtensionRegistered(RTPExtensionType type) {
@@ -288,7 +302,7 @@
return rtp_header_extension_map_.Deregister(type);
}
-size_t RTPSender::RtpHeaderExtensionTotalLength() const {
+size_t RTPSender::RtpHeaderExtensionLength() const {
rtc::CritScope lock(&send_critsect_);
return rtp_header_extension_map_.GetTotalLengthInBytes();
}
@@ -386,9 +400,9 @@
rtx = rtx_;
}
if (audio_configured_) {
- return max_payload_length_ - RTPHeaderLength();
+ return max_payload_length_ - RtpHeaderLength();
} else {
- return max_payload_length_ - RTPHeaderLength() // RTP overhead.
+ return max_payload_length_ - RtpHeaderLength() // RTP overhead.
- video_->FECPacketOverhead() // FEC/ULP/RED overhead.
- ((rtx) ? 2 : 0); // RTX overhead.
}
@@ -472,14 +486,14 @@
return 0;
}
-RTPSenderInterface::CVOMode RTPSender::ActivateCVORtpHeaderExtension() {
- if (cvo_mode_ == kCVOInactive) {
+bool RTPSender::ActivateCVORtpHeaderExtension() {
+ if (!video_rotation_active_) {
rtc::CritScope lock(&send_critsect_);
if (rtp_header_extension_map_.SetActive(kRtpExtensionVideoRotation, true)) {
- cvo_mode_ = kCVOActivated;
+ video_rotation_active_ = true;
}
}
- return cvo_mode_;
+ return video_rotation_active_;
}
int32_t RTPSender::SendOutgoingData(FrameType frame_type,
@@ -491,10 +505,12 @@
const RTPFragmentationHeader* fragmentation,
const RTPVideoHeader* rtp_hdr) {
uint32_t ssrc;
+ uint16_t sequence_number;
{
// Drop this packet if we're not sending media packets.
rtc::CritScope lock(&send_critsect_);
ssrc = ssrc_;
+ sequence_number = sequence_number_;
if (!sending_media_) {
return 0;
}
@@ -523,10 +539,25 @@
if (frame_type == kEmptyFrame)
return 0;
- ret_val =
- video_->SendVideo(video_type, frame_type, payload_type,
- capture_timestamp, capture_time_ms, payload_data,
- payload_size, fragmentation, rtp_hdr);
+ if (rtp_hdr) {
+ playout_delay_oracle_.UpdateRequest(ssrc, rtp_hdr->playout_delay,
+ sequence_number);
+ }
+
+ // Update the active/inactive status of playout delay extension based
+ // on what the oracle indicates.
+ {
+ rtc::CritScope lock(&send_critsect_);
+ if (playout_delay_active_ != playout_delay_oracle_.send_playout_delay()) {
+ playout_delay_active_ = playout_delay_oracle_.send_playout_delay();
+ rtp_header_extension_map_.SetActive(kRtpExtensionPlayoutDelay,
+ playout_delay_active_);
+ }
+ }
+
+ ret_val = video_->SendVideo(
+ video_type, frame_type, payload_type, capture_timestamp,
+ capture_time_ms, payload_data, payload_size, fragmentation, rtp_hdr);
}
rtc::CritScope cs(&statistics_crit_);
@@ -833,6 +864,11 @@
}
}
+void RTPSender::OnReceivedRtcpReportBlocks(
+ const ReportBlockList& report_blocks) {
+ playout_delay_oracle_.OnReceivedRtcpReportBlocks(report_blocks);
+}
+
bool RTPSender::ProcessNACKBitRate(uint32_t now) {
uint32_t num = 0;
size_t byte_count = 0;
@@ -1152,11 +1188,11 @@
video_->ProcessBitrate();
}
-size_t RTPSender::RTPHeaderLength() const {
+size_t RTPSender::RtpHeaderLength() const {
rtc::CritScope lock(&send_critsect_);
size_t rtp_header_length = kRtpHeaderLength;
rtp_header_length += sizeof(uint32_t) * csrcs_.size();
- rtp_header_length += RtpHeaderExtensionTotalLength();
+ rtp_header_length += RtpHeaderExtensionLength();
return rtp_header_length;
}
@@ -1283,6 +1319,11 @@
block_length = BuildTransportSequenceNumberExtension(
extension_data, transport_sequence_number_);
break;
+ case kRtpExtensionPlayoutDelay:
+ block_length = BuildPlayoutDelayExtension(
+ extension_data, playout_delay_oracle_.min_playout_delay_ms(),
+ playout_delay_oracle_.max_playout_delay_ms());
+ break;
default:
assert(false);
}
@@ -1458,6 +1499,37 @@
return kTransportSequenceNumberLength;
}
+uint8_t RTPSender::BuildPlayoutDelayExtension(
+ uint8_t* data_buffer,
+ uint16_t min_playout_delay_ms,
+ uint16_t max_playout_delay_ms) const {
+ RTC_DCHECK_LE(min_playout_delay_ms, kPlayoutDelayMaxMs);
+ RTC_DCHECK_LE(max_playout_delay_ms, kPlayoutDelayMaxMs);
+ RTC_DCHECK_LE(min_playout_delay_ms, max_playout_delay_ms);
+ // 0 1 2 3
+ // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // | ID | len=2 | MIN delay | MAX delay |
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ uint8_t id;
+ if (rtp_header_extension_map_.GetId(kRtpExtensionPlayoutDelay, &id) != 0) {
+ // Not registered.
+ return 0;
+ }
+ size_t pos = 0;
+ const uint8_t len = 2;
+ // Convert MS to value to be sent on extension header.
+ uint16_t min_playout = min_playout_delay_ms / kPlayoutDelayGranularityMs;
+ uint16_t max_playout = max_playout_delay_ms / kPlayoutDelayGranularityMs;
+
+ data_buffer[pos++] = (id << 4) + len;
+ data_buffer[pos++] = min_playout >> 4;
+ data_buffer[pos++] = ((min_playout & 0xf) << 4) | (max_playout >> 8);
+ data_buffer[pos++] = max_playout & 0xff;
+ assert(pos == kPlayoutDelayLength);
+ return kPlayoutDelayLength;
+}
+
bool RTPSender::FindHeaderExtensionPosition(RTPExtensionType type,
const uint8_t* rtp_packet,
size_t rtp_packet_length,
diff --git a/modules/rtp_rtcp/source/rtp_sender.h b/modules/rtp_rtcp/source/rtp_sender.h
index 070c282..ffbcb81 100644
--- a/modules/rtp_rtcp/source/rtp_sender.h
+++ b/modules/rtp_rtcp/source/rtp_sender.h
@@ -24,6 +24,7 @@
#include "webrtc/common_types.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/bitrate.h"
+#include "webrtc/modules/rtp_rtcp/source/playout_delay_oracle.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_header_extension.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_packet_history.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_rtcp_config.h"
@@ -42,14 +43,6 @@
RTPSenderInterface() {}
virtual ~RTPSenderInterface() {}
- enum CVOMode {
- kCVONone,
- kCVOInactive, // CVO rtp header extension is registered but haven't
- // received any frame with rotation pending.
- kCVOActivated, // CVO rtp header extension will be present in the rtp
- // packets.
- };
-
virtual uint32_t SSRC() const = 0;
virtual uint32_t Timestamp() const = 0;
@@ -61,7 +54,9 @@
bool timestamp_provided = true,
bool inc_sequence_number = true) = 0;
- virtual size_t RTPHeaderLength() const = 0;
+ // This returns the expected header length taking into consideration
+ // the optional RTP header extensions that may not be currently active.
+ virtual size_t RtpHeaderLength() const = 0;
// Returns the next sequence number to use for a packet and allocates
// 'packets_to_send' number of sequence numbers. It's important all allocated
// sequence numbers are used in sequence to avoid perceived packet loss.
@@ -83,7 +78,7 @@
const RTPHeader& rtp_header,
VideoRotation rotation) const = 0;
virtual bool IsRtpHeaderExtensionRegistered(RTPExtensionType type) = 0;
- virtual CVOMode ActivateCVORtpHeaderExtension() = 0;
+ virtual bool ActivateCVORtpHeaderExtension() = 0;
};
class RTPSender : public RTPSenderInterface {
@@ -170,7 +165,7 @@
bool IsRtpHeaderExtensionRegistered(RTPExtensionType type) override;
int32_t DeregisterRtpHeaderExtension(RTPExtensionType type);
- size_t RtpHeaderExtensionTotalLength() const;
+ size_t RtpHeaderExtensionLength() const;
uint16_t BuildRTPHeaderExtension(uint8_t* data_buffer, bool marker_bit) const;
@@ -180,6 +175,9 @@
uint8_t BuildVideoRotationExtension(uint8_t* data_buffer) const;
uint8_t BuildTransportSequenceNumberExtension(uint8_t* data_buffer,
uint16_t sequence_number) const;
+ uint8_t BuildPlayoutDelayExtension(uint8_t* data_buffer,
+ uint16_t min_playout_delay_ms,
+ uint16_t max_playout_delay_ms) const;
// Verifies that the specified extension is registered, and that it is
// present in rtp packet. If extension is not registered kNotRegistered is
@@ -231,6 +229,9 @@
bool ProcessNACKBitRate(uint32_t now);
+ // Feedback to decide when to stop sending playout delay.
+ void OnReceivedRtcpReportBlocks(const ReportBlockList& report_blocks);
+
// RTX.
void SetRtxStatus(int mode);
int RtxStatus() const;
@@ -249,7 +250,7 @@
const bool timestamp_provided = true,
const bool inc_sequence_number = true) override;
- size_t RTPHeaderLength() const override;
+ size_t RtpHeaderLength() const override;
uint16_t AllocateSequenceNumber(uint16_t packets_to_send) override;
size_t MaxPayloadLength() const override;
@@ -320,7 +321,7 @@
RtpState GetRtpState() const;
void SetRtxRtpState(const RtpState& rtp_state);
RtpState GetRtxRtpState() const;
- CVOMode ActivateCVORtpHeaderExtension() override;
+ bool ActivateCVORtpHeaderExtension() override;
protected:
int32_t CheckPayloadType(int8_t payload_type, RtpVideoCodecTypes* video_type);
@@ -390,6 +391,12 @@
size_t rtp_packet_length,
const RTPHeader& rtp_header) const;
+ void UpdatePlayoutDelayLimits(uint8_t* rtp_packet,
+ size_t rtp_packet_length,
+ const RTPHeader& rtp_header,
+ uint16_t min_playout_delay,
+ uint16_t max_playout_delay) const;
+
bool AllocateTransportSequenceNumber(int* packet_id) const;
void UpdateRtpStats(const uint8_t* buffer,
@@ -460,7 +467,7 @@
int32_t transmission_time_offset_;
uint32_t absolute_send_time_;
VideoRotation rotation_;
- CVOMode cvo_mode_;
+ bool video_rotation_active_;
uint16_t transport_sequence_number_;
// NACK
@@ -468,6 +475,12 @@
size_t nack_byte_count_[NACK_BYTECOUNT_SIZE];
Bitrate nack_bitrate_;
+ // Tracks the current request for playout delay limits from application
+ // and decides whether the current RTP frame should include the playout
+ // delay extension on header.
+ PlayoutDelayOracle playout_delay_oracle_;
+ bool playout_delay_active_ GUARDED_BY(send_critsect_);
+
RTPPacketHistory packet_history_;
// Statistics
diff --git a/modules/rtp_rtcp/source/rtp_sender_unittest.cc b/modules/rtp_rtcp/source/rtp_sender_unittest.cc
index aa882d2..f0b6411 100644
--- a/modules/rtp_rtcp/source/rtp_sender_unittest.cc
+++ b/modules/rtp_rtcp/source/rtp_sender_unittest.cc
@@ -236,7 +236,7 @@
size_t length = static_cast<size_t>(rtp_sender_->BuildRTPheader(
packet_, kPayload, expect_cvo /* marker_bit */, kTimestamp, 0));
if (expect_cvo) {
- ASSERT_EQ(kRtpHeaderSize + rtp_sender_->RtpHeaderExtensionTotalLength(),
+ ASSERT_EQ(kRtpHeaderSize + rtp_sender_->RtpHeaderExtensionLength(),
length);
} else {
ASSERT_EQ(kRtpHeaderSize, length);
@@ -256,63 +256,63 @@
TEST_F(RtpSenderTestWithoutPacer,
RegisterRtpTransmissionTimeOffsetHeaderExtension) {
- EXPECT_EQ(0u, rtp_sender_->RtpHeaderExtensionTotalLength());
+ EXPECT_EQ(0u, rtp_sender_->RtpHeaderExtensionLength());
EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
kRtpExtensionTransmissionTimeOffset,
kTransmissionTimeOffsetExtensionId));
EXPECT_EQ(kRtpOneByteHeaderLength + kTransmissionTimeOffsetLength,
- rtp_sender_->RtpHeaderExtensionTotalLength());
+ rtp_sender_->RtpHeaderExtensionLength());
EXPECT_EQ(0, rtp_sender_->DeregisterRtpHeaderExtension(
kRtpExtensionTransmissionTimeOffset));
- EXPECT_EQ(0u, rtp_sender_->RtpHeaderExtensionTotalLength());
+ EXPECT_EQ(0u, rtp_sender_->RtpHeaderExtensionLength());
}
TEST_F(RtpSenderTestWithoutPacer, RegisterRtpAbsoluteSendTimeHeaderExtension) {
- EXPECT_EQ(0u, rtp_sender_->RtpHeaderExtensionTotalLength());
+ EXPECT_EQ(0u, rtp_sender_->RtpHeaderExtensionLength());
EXPECT_EQ(
0, rtp_sender_->RegisterRtpHeaderExtension(kRtpExtensionAbsoluteSendTime,
kAbsoluteSendTimeExtensionId));
EXPECT_EQ(RtpUtility::Word32Align(kRtpOneByteHeaderLength +
kAbsoluteSendTimeLength),
- rtp_sender_->RtpHeaderExtensionTotalLength());
+ rtp_sender_->RtpHeaderExtensionLength());
EXPECT_EQ(0, rtp_sender_->DeregisterRtpHeaderExtension(
kRtpExtensionAbsoluteSendTime));
- EXPECT_EQ(0u, rtp_sender_->RtpHeaderExtensionTotalLength());
+ EXPECT_EQ(0u, rtp_sender_->RtpHeaderExtensionLength());
}
TEST_F(RtpSenderTestWithoutPacer, RegisterRtpAudioLevelHeaderExtension) {
- EXPECT_EQ(0u, rtp_sender_->RtpHeaderExtensionTotalLength());
+ EXPECT_EQ(0u, rtp_sender_->RtpHeaderExtensionLength());
EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(kRtpExtensionAudioLevel,
kAudioLevelExtensionId));
EXPECT_EQ(
RtpUtility::Word32Align(kRtpOneByteHeaderLength + kAudioLevelLength),
- rtp_sender_->RtpHeaderExtensionTotalLength());
+ rtp_sender_->RtpHeaderExtensionLength());
EXPECT_EQ(0,
rtp_sender_->DeregisterRtpHeaderExtension(kRtpExtensionAudioLevel));
- EXPECT_EQ(0u, rtp_sender_->RtpHeaderExtensionTotalLength());
+ EXPECT_EQ(0u, rtp_sender_->RtpHeaderExtensionLength());
}
TEST_F(RtpSenderTestWithoutPacer, RegisterRtpHeaderExtensions) {
- EXPECT_EQ(0u, rtp_sender_->RtpHeaderExtensionTotalLength());
+ EXPECT_EQ(0u, rtp_sender_->RtpHeaderExtensionLength());
EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
kRtpExtensionTransmissionTimeOffset,
kTransmissionTimeOffsetExtensionId));
EXPECT_EQ(RtpUtility::Word32Align(kRtpOneByteHeaderLength +
kTransmissionTimeOffsetLength),
- rtp_sender_->RtpHeaderExtensionTotalLength());
+ rtp_sender_->RtpHeaderExtensionLength());
EXPECT_EQ(
0, rtp_sender_->RegisterRtpHeaderExtension(kRtpExtensionAbsoluteSendTime,
kAbsoluteSendTimeExtensionId));
EXPECT_EQ(RtpUtility::Word32Align(kRtpOneByteHeaderLength +
kTransmissionTimeOffsetLength +
kAbsoluteSendTimeLength),
- rtp_sender_->RtpHeaderExtensionTotalLength());
+ rtp_sender_->RtpHeaderExtensionLength());
EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(kRtpExtensionAudioLevel,
kAudioLevelExtensionId));
EXPECT_EQ(RtpUtility::Word32Align(
kRtpOneByteHeaderLength + kTransmissionTimeOffsetLength +
kAbsoluteSendTimeLength + kAudioLevelLength),
- rtp_sender_->RtpHeaderExtensionTotalLength());
+ rtp_sender_->RtpHeaderExtensionLength());
EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
kRtpExtensionVideoRotation, kVideoRotationExtensionId));
EXPECT_TRUE(rtp_sender_->ActivateCVORtpHeaderExtension());
@@ -320,7 +320,7 @@
kTransmissionTimeOffsetLength +
kAbsoluteSendTimeLength +
kAudioLevelLength + kVideoRotationLength),
- rtp_sender_->RtpHeaderExtensionTotalLength());
+ rtp_sender_->RtpHeaderExtensionLength());
// Deregister starts.
EXPECT_EQ(0, rtp_sender_->DeregisterRtpHeaderExtension(
@@ -328,35 +328,35 @@
EXPECT_EQ(RtpUtility::Word32Align(kRtpOneByteHeaderLength +
kAbsoluteSendTimeLength +
kAudioLevelLength + kVideoRotationLength),
- rtp_sender_->RtpHeaderExtensionTotalLength());
+ rtp_sender_->RtpHeaderExtensionLength());
EXPECT_EQ(0, rtp_sender_->DeregisterRtpHeaderExtension(
kRtpExtensionAbsoluteSendTime));
EXPECT_EQ(RtpUtility::Word32Align(kRtpOneByteHeaderLength +
kAudioLevelLength + kVideoRotationLength),
- rtp_sender_->RtpHeaderExtensionTotalLength());
+ rtp_sender_->RtpHeaderExtensionLength());
EXPECT_EQ(0,
rtp_sender_->DeregisterRtpHeaderExtension(kRtpExtensionAudioLevel));
EXPECT_EQ(
RtpUtility::Word32Align(kRtpOneByteHeaderLength + kVideoRotationLength),
- rtp_sender_->RtpHeaderExtensionTotalLength());
+ rtp_sender_->RtpHeaderExtensionLength());
EXPECT_EQ(
0, rtp_sender_->DeregisterRtpHeaderExtension(kRtpExtensionVideoRotation));
- EXPECT_EQ(0u, rtp_sender_->RtpHeaderExtensionTotalLength());
+ EXPECT_EQ(0u, rtp_sender_->RtpHeaderExtensionLength());
}
TEST_F(RtpSenderTestWithoutPacer, RegisterRtpVideoRotationHeaderExtension) {
- EXPECT_EQ(0u, rtp_sender_->RtpHeaderExtensionTotalLength());
+ EXPECT_EQ(0u, rtp_sender_->RtpHeaderExtensionLength());
EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
kRtpExtensionVideoRotation, kVideoRotationExtensionId));
- EXPECT_EQ(0u, rtp_sender_->RtpHeaderExtensionTotalLength());
+ EXPECT_EQ(0u, rtp_sender_->RtpHeaderExtensionLength());
EXPECT_TRUE(rtp_sender_->ActivateCVORtpHeaderExtension());
EXPECT_EQ(
RtpUtility::Word32Align(kRtpOneByteHeaderLength + kVideoRotationLength),
- rtp_sender_->RtpHeaderExtensionTotalLength());
+ rtp_sender_->RtpHeaderExtensionLength());
EXPECT_EQ(
0, rtp_sender_->DeregisterRtpHeaderExtension(kRtpExtensionVideoRotation));
- EXPECT_EQ(0u, rtp_sender_->RtpHeaderExtensionTotalLength());
+ EXPECT_EQ(0u, rtp_sender_->RtpHeaderExtensionLength());
}
TEST_F(RtpSenderTestWithoutPacer, BuildRTPPacket) {
@@ -393,8 +393,7 @@
size_t length = static_cast<size_t>(rtp_sender_->BuildRTPheader(
packet_, kPayload, kMarkerBit, kTimestamp, 0));
- ASSERT_EQ(kRtpHeaderSize + rtp_sender_->RtpHeaderExtensionTotalLength(),
- length);
+ ASSERT_EQ(kRtpHeaderSize + rtp_sender_->RtpHeaderExtensionLength(), length);
// Verify
webrtc::RtpUtility::RtpHeaderParser rtp_parser(packet_, length);
@@ -433,8 +432,7 @@
size_t length = static_cast<size_t>(rtp_sender_->BuildRTPheader(
packet_, kPayload, kMarkerBit, kTimestamp, 0));
- ASSERT_EQ(kRtpHeaderSize + rtp_sender_->RtpHeaderExtensionTotalLength(),
- length);
+ ASSERT_EQ(kRtpHeaderSize + rtp_sender_->RtpHeaderExtensionLength(), length);
// Verify
webrtc::RtpUtility::RtpHeaderParser rtp_parser(packet_, length);
@@ -461,8 +459,7 @@
size_t length = static_cast<size_t>(rtp_sender_->BuildRTPheader(
packet_, kPayload, kMarkerBit, kTimestamp, 0));
- ASSERT_EQ(kRtpHeaderSize + rtp_sender_->RtpHeaderExtensionTotalLength(),
- length);
+ ASSERT_EQ(kRtpHeaderSize + rtp_sender_->RtpHeaderExtensionLength(), length);
// Verify
webrtc::RtpUtility::RtpHeaderParser rtp_parser(packet_, length);
@@ -546,8 +543,7 @@
size_t length = static_cast<size_t>(
rtp_sender_->BuildRTPheader(packet_, kPayload, true, kTimestamp, 0));
- ASSERT_EQ(kRtpHeaderSize + rtp_sender_->RtpHeaderExtensionTotalLength(),
- length);
+ ASSERT_EQ(kRtpHeaderSize + rtp_sender_->RtpHeaderExtensionLength(), length);
// Verify
webrtc::RtpUtility::RtpHeaderParser rtp_parser(packet_, length);
@@ -594,8 +590,7 @@
size_t length = static_cast<size_t>(rtp_sender_->BuildRTPheader(
packet_, kPayload, kMarkerBit, kTimestamp, 0));
- ASSERT_EQ(kRtpHeaderSize + rtp_sender_->RtpHeaderExtensionTotalLength(),
- length);
+ ASSERT_EQ(kRtpHeaderSize + rtp_sender_->RtpHeaderExtensionLength(), length);
// Verify
webrtc::RtpUtility::RtpHeaderParser rtp_parser(packet_, length);
@@ -682,8 +677,7 @@
size_t length = static_cast<size_t>(rtp_sender_->BuildRTPheader(
packet_, kPayload, kMarkerBit, kTimestamp, 0));
- ASSERT_EQ(kRtpHeaderSize + rtp_sender_->RtpHeaderExtensionTotalLength(),
- length);
+ ASSERT_EQ(kRtpHeaderSize + rtp_sender_->RtpHeaderExtensionLength(), length);
// Verify
webrtc::RtpUtility::RtpHeaderParser rtp_parser(packet_, length);
@@ -1595,7 +1589,7 @@
EXPECT_EQ(
RtpUtility::Word32Align(kRtpOneByteHeaderLength + kVideoRotationLength),
- rtp_sender_->RtpHeaderExtensionTotalLength());
+ rtp_sender_->RtpHeaderExtensionLength());
rtp_sender_video_->SendVideo(kRtpVideoGeneric, kVideoFrameKey, kPayload,
kTimestamp, 0, packet_, sizeof(packet_), nullptr,
diff --git a/modules/rtp_rtcp/source/rtp_sender_video.cc b/modules/rtp_rtcp/source/rtp_sender_video.cc
index 3dab6a4..3affb31 100644
--- a/modules/rtp_rtcp/source/rtp_sender_video.cc
+++ b/modules/rtp_rtcp/source/rtp_sender_video.cc
@@ -199,8 +199,8 @@
// This reason for the header extensions to be included here is that
// from an FEC viewpoint, they are part of the payload to be protected.
// (The base RTP header is already protected by the FEC header.)
- overhead = REDForFECHeaderLength + (_rtpSender.RTPHeaderLength() -
- kRtpHeaderSize);
+ return ForwardErrorCorrection::PacketOverhead() + REDForFECHeaderLength +
+ (_rtpSender.RtpHeaderLength() - kRtpHeaderSize);
}
if (fec_enabled_)
overhead += ForwardErrorCorrection::PacketOverhead();
@@ -249,12 +249,12 @@
// Register CVO rtp header extension at the first time when we receive a frame
// with pending rotation.
- RTPSenderInterface::CVOMode cvo_mode = RTPSenderInterface::kCVONone;
+ bool video_rotation_active = false;
if (video_header && video_header->rotation != kVideoRotation_0) {
- cvo_mode = _rtpSender.ActivateCVORtpHeaderExtension();
+ video_rotation_active = _rtpSender.ActivateCVORtpHeaderExtension();
}
- uint16_t rtp_header_length = _rtpSender.RTPHeaderLength();
+ int rtp_header_length = _rtpSender.RtpHeaderLength();
size_t payload_bytes_to_send = payloadSize;
const uint8_t* data = payloadData;
@@ -271,15 +271,16 @@
while (!last) {
uint8_t dataBuffer[IP_PACKET_SIZE] = {0};
size_t payload_bytes_in_packet = 0;
+
if (!packetizer->NextPacket(&dataBuffer[rtp_header_length],
&payload_bytes_in_packet, &last)) {
return -1;
}
// Write RTP header.
- // Set marker bit true if this is the last packet in frame.
_rtpSender.BuildRTPheader(
dataBuffer, payloadType, last, captureTimeStamp, capture_time_ms);
+
// According to
// http://www.etsi.org/deliver/etsi_ts/126100_126199/126114/12.07.00_60/
// ts_126114v120700p.pdf Section 7.4.5:
@@ -294,7 +295,7 @@
if (!video_header) {
RTC_DCHECK(!_rtpSender.IsRtpHeaderExtensionRegistered(
kRtpExtensionVideoRotation));
- } else if (cvo_mode == RTPSenderInterface::kCVOActivated) {
+ } else if (video_rotation_active) {
// Checking whether CVO header extension is registered will require taking
// a lock. It'll be a no-op if it's not registered.
// TODO(guoweis): For now, all packets sent will carry the CVO such that
diff --git a/modules/rtp_rtcp/source/rtp_utility.cc b/modules/rtp_rtcp/source/rtp_utility.cc
index bdae3c4..131b54a 100644
--- a/modules/rtp_rtcp/source/rtp_utility.cc
+++ b/modules/rtp_rtcp/source/rtp_utility.cc
@@ -248,6 +248,10 @@
header->extension.hasVideoRotation = false;
header->extension.videoRotation = 0;
+ // May not be present in packet.
+ header->extension.playout_delay.min_ms = -1;
+ header->extension.playout_delay.max_ms = -1;
+
if (X) {
/* RTP header extension, RFC 3550.
0 1 2 3
@@ -407,6 +411,25 @@
header->extension.hasTransportSequenceNumber = true;
break;
}
+ case kRtpExtensionPlayoutDelay: {
+ if (len != 2) {
+ LOG(LS_WARNING) << "Incorrect playout delay len: " << len;
+ return;
+ }
+ // 0 1 2 3
+ // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // | ID | len=2 | MIN delay | MAX delay |
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ int min_playout_delay = (ptr[0] << 4) | ((ptr[1] >> 4) & 0xf);
+ int max_playout_delay = ((ptr[1] & 0xf) << 8) | ptr[2];
+ header->extension.playout_delay.min_ms =
+ min_playout_delay * kPlayoutDelayGranularityMs;
+ header->extension.playout_delay.max_ms =
+ max_playout_delay * kPlayoutDelayGranularityMs;
+ break;
+ }
default: {
LOG(LS_WARNING) << "Extension type not implemented: " << type;
return;
diff --git a/modules/video_coding/decoding_state_unittest.cc b/modules/video_coding/decoding_state_unittest.cc
index 5f5d0d3..3942dab 100644
--- a/modules/video_coding/decoding_state_unittest.cc
+++ b/modules/video_coding/decoding_state_unittest.cc
@@ -36,8 +36,8 @@
packet.timestamp = 1;
packet.seqNum = 0xffff;
packet.frameType = kVideoFrameDelta;
- packet.codecSpecificHeader.codec = kRtpVideoVp8;
- packet.codecSpecificHeader.codecHeader.VP8.pictureId = 0x007F;
+ packet.video_header.codec = kRtpVideoVp8;
+ packet.video_header.codecHeader.VP8.pictureId = 0x007F;
FrameData frame_data;
frame_data.rtt_ms = 0;
frame_data.rolling_average_packets_per_frame = -1;
@@ -53,17 +53,17 @@
packet.frameType = kVideoFrameDelta;
// Use pictureId
packet.isFirstPacket = false;
- packet.codecSpecificHeader.codecHeader.VP8.pictureId = 0x0002;
+ packet.video_header.codecHeader.VP8.pictureId = 0x0002;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
frame.Reset();
- packet.codecSpecificHeader.codecHeader.VP8.pictureId = 0;
+ packet.video_header.codecHeader.VP8.pictureId = 0;
packet.seqNum = 10;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
// Use sequence numbers.
- packet.codecSpecificHeader.codecHeader.VP8.pictureId = kNoPictureId;
+ packet.video_header.codecHeader.VP8.pictureId = kNoPictureId;
frame.Reset();
packet.seqNum = dec_state.sequence_num() - 1u;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
@@ -82,9 +82,9 @@
// Insert packet with temporal info.
dec_state.Reset();
frame.Reset();
- packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
- packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
- packet.codecSpecificHeader.codecHeader.VP8.pictureId = 0;
+ packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
+ packet.video_header.codecHeader.VP8.temporalIdx = 0;
+ packet.video_header.codecHeader.VP8.pictureId = 0;
packet.seqNum = 1;
packet.timestamp = 1;
EXPECT_TRUE(dec_state.full_sync());
@@ -93,9 +93,9 @@
EXPECT_TRUE(dec_state.full_sync());
frame.Reset();
// 1 layer up - still good.
- packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
- packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 1;
- packet.codecSpecificHeader.codecHeader.VP8.pictureId = 1;
+ packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
+ packet.video_header.codecHeader.VP8.temporalIdx = 1;
+ packet.video_header.codecHeader.VP8.pictureId = 1;
packet.seqNum = 2;
packet.timestamp = 2;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
@@ -104,18 +104,18 @@
EXPECT_TRUE(dec_state.full_sync());
frame.Reset();
// Lost non-base layer packet => should update sync parameter.
- packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
- packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 3;
- packet.codecSpecificHeader.codecHeader.VP8.pictureId = 3;
+ packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
+ packet.video_header.codecHeader.VP8.temporalIdx = 3;
+ packet.video_header.codecHeader.VP8.pictureId = 3;
packet.seqNum = 4;
packet.timestamp = 4;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
// Now insert the next non-base layer (belonging to a next tl0PicId).
frame.Reset();
- packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 1;
- packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 2;
- packet.codecSpecificHeader.codecHeader.VP8.pictureId = 4;
+ packet.video_header.codecHeader.VP8.tl0PicIdx = 1;
+ packet.video_header.codecHeader.VP8.temporalIdx = 2;
+ packet.video_header.codecHeader.VP8.pictureId = 4;
packet.seqNum = 5;
packet.timestamp = 5;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
@@ -125,9 +125,9 @@
EXPECT_TRUE(dec_state.full_sync());
// Next base layer (dropped interim non-base layers) - should update sync.
frame.Reset();
- packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 1;
- packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
- packet.codecSpecificHeader.codecHeader.VP8.pictureId = 5;
+ packet.video_header.codecHeader.VP8.tl0PicIdx = 1;
+ packet.video_header.codecHeader.VP8.temporalIdx = 0;
+ packet.video_header.codecHeader.VP8.pictureId = 5;
packet.seqNum = 6;
packet.timestamp = 6;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
@@ -137,18 +137,18 @@
// Check wrap for temporal layers.
frame.Reset();
- packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0x00FF;
- packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
- packet.codecSpecificHeader.codecHeader.VP8.pictureId = 6;
+ packet.video_header.codecHeader.VP8.tl0PicIdx = 0x00FF;
+ packet.video_header.codecHeader.VP8.temporalIdx = 0;
+ packet.video_header.codecHeader.VP8.pictureId = 6;
packet.seqNum = 7;
packet.timestamp = 7;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
dec_state.SetState(&frame);
EXPECT_FALSE(dec_state.full_sync());
frame.Reset();
- packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0x0000;
- packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
- packet.codecSpecificHeader.codecHeader.VP8.pictureId = 7;
+ packet.video_header.codecHeader.VP8.tl0PicIdx = 0x0000;
+ packet.video_header.codecHeader.VP8.temporalIdx = 0;
+ packet.video_header.codecHeader.VP8.pictureId = 7;
packet.seqNum = 8;
packet.timestamp = 8;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
@@ -211,12 +211,12 @@
VCMFrameBuffer frame;
VCMPacket packet;
packet.frameType = kVideoFrameDelta;
- packet.codecSpecificHeader.codec = kRtpVideoVp8;
+ packet.video_header.codec = kRtpVideoVp8;
packet.timestamp = 0;
packet.seqNum = 0;
- packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
- packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
- packet.codecSpecificHeader.codecHeader.VP8.pictureId = 0;
+ packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
+ packet.video_header.codecHeader.VP8.temporalIdx = 0;
+ packet.video_header.codecHeader.VP8.pictureId = 0;
FrameData frame_data;
frame_data.rtt_ms = 0;
frame_data.rolling_average_packets_per_frame = -1;
@@ -226,9 +226,9 @@
frame.Reset();
packet.timestamp = 1;
packet.seqNum = 1;
- packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
- packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 1;
- packet.codecSpecificHeader.codecHeader.VP8.pictureId = 1;
+ packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
+ packet.video_header.codecHeader.VP8.temporalIdx = 1;
+ packet.video_header.codecHeader.VP8.pictureId = 1;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
dec_state.SetState(&frame);
@@ -238,9 +238,9 @@
frame.Reset();
packet.timestamp = 3;
packet.seqNum = 3;
- packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
- packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 3;
- packet.codecSpecificHeader.codecHeader.VP8.pictureId = 3;
+ packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
+ packet.video_header.codecHeader.VP8.temporalIdx = 3;
+ packet.video_header.codecHeader.VP8.pictureId = 3;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
dec_state.SetState(&frame);
@@ -249,9 +249,9 @@
frame.Reset();
packet.timestamp = 4;
packet.seqNum = 4;
- packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 1;
- packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
- packet.codecSpecificHeader.codecHeader.VP8.pictureId = 4;
+ packet.video_header.codecHeader.VP8.tl0PicIdx = 1;
+ packet.video_header.codecHeader.VP8.temporalIdx = 0;
+ packet.video_header.codecHeader.VP8.pictureId = 4;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
dec_state.SetState(&frame);
@@ -263,9 +263,9 @@
packet.isFirstPacket = 1;
packet.timestamp = 5;
packet.seqNum = 5;
- packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 2;
- packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
- packet.codecSpecificHeader.codecHeader.VP8.pictureId = 5;
+ packet.video_header.codecHeader.VP8.tl0PicIdx = 2;
+ packet.video_header.codecHeader.VP8.temporalIdx = 0;
+ packet.video_header.codecHeader.VP8.pictureId = 5;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
dec_state.SetState(&frame);
@@ -276,9 +276,9 @@
packet.frameType = kVideoFrameDelta;
packet.timestamp = 6;
packet.seqNum = 6;
- packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 3;
- packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
- packet.codecSpecificHeader.codecHeader.VP8.pictureId = 6;
+ packet.video_header.codecHeader.VP8.tl0PicIdx = 3;
+ packet.video_header.codecHeader.VP8.temporalIdx = 0;
+ packet.video_header.codecHeader.VP8.pictureId = 6;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
EXPECT_TRUE(dec_state.full_sync());
@@ -287,9 +287,9 @@
packet.isFirstPacket = 1;
packet.timestamp = 8;
packet.seqNum = 8;
- packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 4;
- packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
- packet.codecSpecificHeader.codecHeader.VP8.pictureId = 8;
+ packet.video_header.codecHeader.VP8.tl0PicIdx = 4;
+ packet.video_header.codecHeader.VP8.temporalIdx = 0;
+ packet.video_header.codecHeader.VP8.pictureId = 8;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
EXPECT_TRUE(dec_state.full_sync());
@@ -302,10 +302,10 @@
packet.isFirstPacket = 1;
packet.timestamp = 9;
packet.seqNum = 9;
- packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 4;
- packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 2;
- packet.codecSpecificHeader.codecHeader.VP8.pictureId = 9;
- packet.codecSpecificHeader.codecHeader.VP8.layerSync = true;
+ packet.video_header.codecHeader.VP8.tl0PicIdx = 4;
+ packet.video_header.codecHeader.VP8.temporalIdx = 2;
+ packet.video_header.codecHeader.VP8.pictureId = 9;
+ packet.video_header.codecHeader.VP8.layerSync = true;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
dec_state.SetState(&frame);
EXPECT_TRUE(dec_state.full_sync());
@@ -323,10 +323,10 @@
packet.markerBit = 1;
packet.timestamp = 0;
packet.seqNum = 0;
- packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
- packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
- packet.codecSpecificHeader.codecHeader.VP8.pictureId = 0;
- packet.codecSpecificHeader.codecHeader.VP8.layerSync = false;
+ packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
+ packet.video_header.codecHeader.VP8.temporalIdx = 0;
+ packet.video_header.codecHeader.VP8.pictureId = 0;
+ packet.video_header.codecHeader.VP8.layerSync = false;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
dec_state.SetState(&frame);
EXPECT_TRUE(dec_state.full_sync());
@@ -337,10 +337,10 @@
packet.markerBit = 0;
packet.timestamp = 1;
packet.seqNum = 1;
- packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
- packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 2;
- packet.codecSpecificHeader.codecHeader.VP8.pictureId = 1;
- packet.codecSpecificHeader.codecHeader.VP8.layerSync = true;
+ packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
+ packet.video_header.codecHeader.VP8.temporalIdx = 2;
+ packet.video_header.codecHeader.VP8.pictureId = 1;
+ packet.video_header.codecHeader.VP8.layerSync = true;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
// Layer 1
@@ -350,10 +350,10 @@
packet.markerBit = 1;
packet.timestamp = 2;
packet.seqNum = 3;
- packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
- packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 1;
- packet.codecSpecificHeader.codecHeader.VP8.pictureId = 2;
- packet.codecSpecificHeader.codecHeader.VP8.layerSync = true;
+ packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
+ packet.video_header.codecHeader.VP8.temporalIdx = 1;
+ packet.video_header.codecHeader.VP8.pictureId = 2;
+ packet.video_header.codecHeader.VP8.layerSync = true;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
EXPECT_TRUE(dec_state.full_sync());
@@ -365,12 +365,12 @@
VCMPacket packet;
frame.Reset();
packet.frameType = kVideoFrameKey;
- packet.codecSpecificHeader.codec = kRtpVideoVp8;
+ packet.video_header.codec = kRtpVideoVp8;
packet.timestamp = 0;
packet.seqNum = 0;
- packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
- packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
- packet.codecSpecificHeader.codecHeader.VP8.pictureId = 0;
+ packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
+ packet.video_header.codecHeader.VP8.temporalIdx = 0;
+ packet.video_header.codecHeader.VP8.pictureId = 0;
FrameData frame_data;
frame_data.rtt_ms = 0;
frame_data.rolling_average_packets_per_frame = -1;
@@ -384,8 +384,8 @@
packet.frameType = kVideoFrameDelta;
packet.timestamp += 3000;
++packet.seqNum;
- packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 1;
- packet.codecSpecificHeader.codecHeader.VP8.pictureId = 2;
+ packet.video_header.codecHeader.VP8.temporalIdx = 1;
+ packet.video_header.codecHeader.VP8.pictureId = 2;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
dec_state.SetState(&frame);
@@ -418,12 +418,12 @@
VCMFrameBuffer frame;
VCMPacket packet;
packet.frameType = kVideoFrameDelta;
- packet.codecSpecificHeader.codec = kRtpVideoVp8;
+ packet.video_header.codec = kRtpVideoVp8;
packet.timestamp = 0;
packet.seqNum = 0;
- packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
- packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
- packet.codecSpecificHeader.codecHeader.VP8.pictureId = 0;
+ packet.video_header.codecHeader.VP8.tl0PicIdx = 0;
+ packet.video_header.codecHeader.VP8.temporalIdx = 0;
+ packet.video_header.codecHeader.VP8.pictureId = 0;
FrameData frame_data;
frame_data.rtt_ms = 0;
frame_data.rolling_average_packets_per_frame = -1;
@@ -433,15 +433,15 @@
frame.Reset();
++packet.timestamp;
++packet.seqNum;
- packet.codecSpecificHeader.codecHeader.VP8.temporalIdx++;
- packet.codecSpecificHeader.codecHeader.VP8.pictureId++;
+ packet.video_header.codecHeader.VP8.temporalIdx++;
+ packet.video_header.codecHeader.VP8.pictureId++;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
frame.Reset();
// Testing only gap in tl0PicIdx when tl0PicIdx in continuous.
- packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx += 3;
- packet.codecSpecificHeader.codecHeader.VP8.temporalIdx++;
- packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 1;
+ packet.video_header.codecHeader.VP8.tl0PicIdx += 3;
+ packet.video_header.codecHeader.VP8.temporalIdx++;
+ packet.video_header.codecHeader.VP8.tl0PicIdx = 1;
EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
}
@@ -456,9 +456,9 @@
uint8_t data[] = "I need a data pointer for this test!";
packet.sizeBytes = sizeof(data);
packet.dataPtr = data;
- packet.codecSpecificHeader.codec = kRtpVideoVp9;
+ packet.video_header.codec = kRtpVideoVp9;
- RTPVideoHeaderVP9& vp9_hdr = packet.codecSpecificHeader.codecHeader.VP9;
+ RTPVideoHeaderVP9& vp9_hdr = packet.video_header.codecHeader.VP9;
vp9_hdr.picture_id = 10;
vp9_hdr.flexible_mode = true;
@@ -499,9 +499,9 @@
uint8_t data[] = "I need a data pointer for this test!";
packet.sizeBytes = sizeof(data);
packet.dataPtr = data;
- packet.codecSpecificHeader.codec = kRtpVideoVp9;
+ packet.video_header.codec = kRtpVideoVp9;
- RTPVideoHeaderVP9& vp9_hdr = packet.codecSpecificHeader.codecHeader.VP9;
+ RTPVideoHeaderVP9& vp9_hdr = packet.video_header.codecHeader.VP9;
vp9_hdr.picture_id = 10;
vp9_hdr.flexible_mode = true;
@@ -554,9 +554,9 @@
uint8_t data[] = "I need a data pointer for this test!";
packet.sizeBytes = sizeof(data);
packet.dataPtr = data;
- packet.codecSpecificHeader.codec = kRtpVideoVp9;
+ packet.video_header.codec = kRtpVideoVp9;
- RTPVideoHeaderVP9& vp9_hdr = packet.codecSpecificHeader.codecHeader.VP9;
+ RTPVideoHeaderVP9& vp9_hdr = packet.video_header.codecHeader.VP9;
vp9_hdr.picture_id = 10;
vp9_hdr.flexible_mode = true;
diff --git a/modules/video_coding/frame_buffer.cc b/modules/video_coding/frame_buffer.cc
index 11db785..61db0d1 100644
--- a/modules/video_coding/frame_buffer.cc
+++ b/modules/video_coding/frame_buffer.cc
@@ -129,7 +129,7 @@
// Don't copy payload specific data for empty packets (e.g padding packets).
if (packet.sizeBytes > 0)
- CopyCodecSpecific(&packet.codecSpecificHeader);
+ CopyCodecSpecific(&packet.video_header);
int retVal =
_sessionInfo.InsertPacket(packet, _buffer, decode_error_mode, frame_data);
@@ -153,10 +153,14 @@
// (HEVC)).
if (packet.markerBit) {
RTC_DCHECK(!_rotation_set);
- _rotation = packet.codecSpecificHeader.rotation;
+ _rotation = packet.video_header.rotation;
_rotation_set = true;
}
+ if (packet.isFirstPacket) {
+ playout_delay_ = packet.video_header.playout_delay;
+ }
+
if (_sessionInfo.complete()) {
SetState(kStateComplete);
return kCompleteSession;
diff --git a/modules/video_coding/frame_object.cc b/modules/video_coding/frame_object.cc
index 650b65f..b46f816 100644
--- a/modules/video_coding/frame_object.cc
+++ b/modules/video_coding/frame_object.cc
@@ -71,7 +71,7 @@
VCMPacket* packet = packet_buffer_->GetPacket(first_seq_num_);
if (!packet)
return nullptr;
- return &packet->codecSpecificHeader.codecHeader;
+ return &packet->video_header.codecHeader;
}
} // namespace video_coding
diff --git a/modules/video_coding/jitter_buffer.cc b/modules/video_coding/jitter_buffer.cc
index 9c50a94..a5a964a 100644
--- a/modules/video_coding/jitter_buffer.cc
+++ b/modules/video_coding/jitter_buffer.cc
@@ -124,10 +124,10 @@
}
bool Vp9SsMap::Insert(const VCMPacket& packet) {
- if (!packet.codecSpecificHeader.codecHeader.VP9.ss_data_available)
+ if (!packet.video_header.codecHeader.VP9.ss_data_available)
return false;
- ss_map_[packet.timestamp] = packet.codecSpecificHeader.codecHeader.VP9.gof;
+ ss_map_[packet.timestamp] = packet.video_header.codecHeader.VP9.gof;
return true;
}
@@ -175,7 +175,7 @@
// TODO(asapersson): Update according to updates in RTP payload profile.
bool Vp9SsMap::UpdatePacket(VCMPacket* packet) {
- uint8_t gof_idx = packet->codecSpecificHeader.codecHeader.VP9.gof_idx;
+ uint8_t gof_idx = packet->video_header.codecHeader.VP9.gof_idx;
if (gof_idx == kNoGofIdx)
return false; // No update needed.
@@ -186,7 +186,7 @@
if (gof_idx >= it->second.num_frames_in_gof)
return false; // Assume corresponding SS not yet received.
- RTPVideoHeaderVP9* vp9 = &packet->codecSpecificHeader.codecHeader.VP9;
+ RTPVideoHeaderVP9* vp9 = &packet->video_header.codecHeader.VP9;
vp9->temporal_idx = it->second.temporal_idx[gof_idx];
vp9->temporal_up_switch = it->second.temporal_up_switch[gof_idx];
@@ -497,12 +497,11 @@
// Returns immediately or a |max_wait_time_ms| ms event hang waiting for a
// complete frame, |max_wait_time_ms| decided by caller.
-bool VCMJitterBuffer::NextCompleteTimestamp(uint32_t max_wait_time_ms,
- uint32_t* timestamp) {
+VCMEncodedFrame* VCMJitterBuffer::NextCompleteFrame(uint32_t max_wait_time_ms) {
crit_sect_->Enter();
if (!running_) {
crit_sect_->Leave();
- return false;
+ return nullptr;
}
CleanUpOldOrEmptyFrames();
@@ -520,7 +519,7 @@
// Are we shutting down the jitter buffer?
if (!running_) {
crit_sect_->Leave();
- return false;
+ return nullptr;
}
// Finding oldest frame ready for decoder.
CleanUpOldOrEmptyFrames();
@@ -538,11 +537,11 @@
if (decodable_frames_.empty() ||
decodable_frames_.Front()->GetState() != kStateComplete) {
crit_sect_->Leave();
- return false;
+ return nullptr;
}
- *timestamp = decodable_frames_.Front()->TimeStamp();
+ VCMEncodedFrame* encoded_frame = decodable_frames_.Front();
crit_sect_->Leave();
- return true;
+ return encoded_frame;
}
bool VCMJitterBuffer::NextMaybeIncompleteTimestamp(uint32_t* timestamp) {
diff --git a/modules/video_coding/jitter_buffer.h b/modules/video_coding/jitter_buffer.h
index e36f2cd..a3e9ace 100644
--- a/modules/video_coding/jitter_buffer.h
+++ b/modules/video_coding/jitter_buffer.h
@@ -152,9 +152,8 @@
bool CompleteSequenceWithNextFrame();
// Wait |max_wait_time_ms| for a complete frame to arrive.
- // The function returns true once such a frame is found, its corresponding
- // timestamp is returned. Otherwise, returns false.
- bool NextCompleteTimestamp(uint32_t max_wait_time_ms, uint32_t* timestamp);
+ // If found, a pointer to the frame is returned. Returns nullptr otherwise.
+ VCMEncodedFrame* NextCompleteFrame(uint32_t max_wait_time_ms);
// Locates a frame for decoding (even an incomplete) without delay.
// The function returns true once such a frame is found, its corresponding
diff --git a/modules/video_coding/jitter_buffer_unittest.cc b/modules/video_coding/jitter_buffer_unittest.cc
index 56e4116..e75d177 100644
--- a/modules/video_coding/jitter_buffer_unittest.cc
+++ b/modules/video_coding/jitter_buffer_unittest.cc
@@ -42,13 +42,13 @@
packet_.markerBit = true;
packet_.frameType = kVideoFrameKey;
packet_.codec = kVideoCodecVP9;
- packet_.codecSpecificHeader.codec = kRtpVideoVp9;
- packet_.codecSpecificHeader.codecHeader.VP9.flexible_mode = false;
- packet_.codecSpecificHeader.codecHeader.VP9.gof_idx = 0;
- packet_.codecSpecificHeader.codecHeader.VP9.temporal_idx = kNoTemporalIdx;
- packet_.codecSpecificHeader.codecHeader.VP9.temporal_up_switch = false;
- packet_.codecSpecificHeader.codecHeader.VP9.ss_data_available = true;
- packet_.codecSpecificHeader.codecHeader.VP9.gof.SetGofInfoVP9(
+ packet_.video_header.codec = kRtpVideoVp9;
+ packet_.video_header.codecHeader.VP9.flexible_mode = false;
+ packet_.video_header.codecHeader.VP9.gof_idx = 0;
+ packet_.video_header.codecHeader.VP9.temporal_idx = kNoTemporalIdx;
+ packet_.video_header.codecHeader.VP9.temporal_up_switch = false;
+ packet_.video_header.codecHeader.VP9.ss_data_available = true;
+ packet_.video_header.codecHeader.VP9.gof.SetGofInfoVP9(
kTemporalStructureMode3); // kTemporalStructureMode3: 0-2-1-2..
}
@@ -62,7 +62,7 @@
}
TEST_F(Vp9SsMapTest, Insert_NoSsData) {
- packet_.codecSpecificHeader.codecHeader.VP9.ss_data_available = false;
+ packet_.video_header.codecHeader.VP9.ss_data_available = false;
EXPECT_FALSE(map_.Insert(packet_));
}
@@ -139,53 +139,53 @@
}
TEST_F(Vp9SsMapTest, UpdatePacket_NoSsData) {
- packet_.codecSpecificHeader.codecHeader.VP9.gof_idx = 0;
+ packet_.video_header.codecHeader.VP9.gof_idx = 0;
EXPECT_FALSE(map_.UpdatePacket(&packet_));
}
TEST_F(Vp9SsMapTest, UpdatePacket_NoGofIdx) {
EXPECT_TRUE(map_.Insert(packet_));
- packet_.codecSpecificHeader.codecHeader.VP9.gof_idx = kNoGofIdx;
+ packet_.video_header.codecHeader.VP9.gof_idx = kNoGofIdx;
EXPECT_FALSE(map_.UpdatePacket(&packet_));
}
TEST_F(Vp9SsMapTest, UpdatePacket_InvalidGofIdx) {
EXPECT_TRUE(map_.Insert(packet_));
- packet_.codecSpecificHeader.codecHeader.VP9.gof_idx = 4;
+ packet_.video_header.codecHeader.VP9.gof_idx = 4;
EXPECT_FALSE(map_.UpdatePacket(&packet_));
}
TEST_F(Vp9SsMapTest, UpdatePacket) {
EXPECT_TRUE(map_.Insert(packet_)); // kTemporalStructureMode3: 0-2-1-2..
- packet_.codecSpecificHeader.codecHeader.VP9.gof_idx = 0;
+ packet_.video_header.codecHeader.VP9.gof_idx = 0;
EXPECT_TRUE(map_.UpdatePacket(&packet_));
- EXPECT_EQ(0, packet_.codecSpecificHeader.codecHeader.VP9.temporal_idx);
- EXPECT_FALSE(packet_.codecSpecificHeader.codecHeader.VP9.temporal_up_switch);
- EXPECT_EQ(1U, packet_.codecSpecificHeader.codecHeader.VP9.num_ref_pics);
- EXPECT_EQ(4, packet_.codecSpecificHeader.codecHeader.VP9.pid_diff[0]);
+ EXPECT_EQ(0, packet_.video_header.codecHeader.VP9.temporal_idx);
+ EXPECT_FALSE(packet_.video_header.codecHeader.VP9.temporal_up_switch);
+ EXPECT_EQ(1U, packet_.video_header.codecHeader.VP9.num_ref_pics);
+ EXPECT_EQ(4, packet_.video_header.codecHeader.VP9.pid_diff[0]);
- packet_.codecSpecificHeader.codecHeader.VP9.gof_idx = 1;
+ packet_.video_header.codecHeader.VP9.gof_idx = 1;
EXPECT_TRUE(map_.UpdatePacket(&packet_));
- EXPECT_EQ(2, packet_.codecSpecificHeader.codecHeader.VP9.temporal_idx);
- EXPECT_TRUE(packet_.codecSpecificHeader.codecHeader.VP9.temporal_up_switch);
- EXPECT_EQ(1U, packet_.codecSpecificHeader.codecHeader.VP9.num_ref_pics);
- EXPECT_EQ(1, packet_.codecSpecificHeader.codecHeader.VP9.pid_diff[0]);
+ EXPECT_EQ(2, packet_.video_header.codecHeader.VP9.temporal_idx);
+ EXPECT_TRUE(packet_.video_header.codecHeader.VP9.temporal_up_switch);
+ EXPECT_EQ(1U, packet_.video_header.codecHeader.VP9.num_ref_pics);
+ EXPECT_EQ(1, packet_.video_header.codecHeader.VP9.pid_diff[0]);
- packet_.codecSpecificHeader.codecHeader.VP9.gof_idx = 2;
+ packet_.video_header.codecHeader.VP9.gof_idx = 2;
EXPECT_TRUE(map_.UpdatePacket(&packet_));
- EXPECT_EQ(1, packet_.codecSpecificHeader.codecHeader.VP9.temporal_idx);
- EXPECT_TRUE(packet_.codecSpecificHeader.codecHeader.VP9.temporal_up_switch);
- EXPECT_EQ(1U, packet_.codecSpecificHeader.codecHeader.VP9.num_ref_pics);
- EXPECT_EQ(2, packet_.codecSpecificHeader.codecHeader.VP9.pid_diff[0]);
+ EXPECT_EQ(1, packet_.video_header.codecHeader.VP9.temporal_idx);
+ EXPECT_TRUE(packet_.video_header.codecHeader.VP9.temporal_up_switch);
+ EXPECT_EQ(1U, packet_.video_header.codecHeader.VP9.num_ref_pics);
+ EXPECT_EQ(2, packet_.video_header.codecHeader.VP9.pid_diff[0]);
- packet_.codecSpecificHeader.codecHeader.VP9.gof_idx = 3;
+ packet_.video_header.codecHeader.VP9.gof_idx = 3;
EXPECT_TRUE(map_.UpdatePacket(&packet_));
- EXPECT_EQ(2, packet_.codecSpecificHeader.codecHeader.VP9.temporal_idx);
- EXPECT_FALSE(packet_.codecSpecificHeader.codecHeader.VP9.temporal_up_switch);
- EXPECT_EQ(2U, packet_.codecSpecificHeader.codecHeader.VP9.num_ref_pics);
- EXPECT_EQ(1, packet_.codecSpecificHeader.codecHeader.VP9.pid_diff[0]);
- EXPECT_EQ(2, packet_.codecSpecificHeader.codecHeader.VP9.pid_diff[1]);
+ EXPECT_EQ(2, packet_.video_header.codecHeader.VP9.temporal_idx);
+ EXPECT_FALSE(packet_.video_header.codecHeader.VP9.temporal_up_switch);
+ EXPECT_EQ(2U, packet_.video_header.codecHeader.VP9.num_ref_pics);
+ EXPECT_EQ(1, packet_.video_header.codecHeader.VP9.pid_diff[0]);
+ EXPECT_EQ(2, packet_.video_header.codecHeader.VP9.pid_diff[1]);
}
class ProcessThreadMock : public ProcessThread {
@@ -246,12 +246,10 @@
}
VCMEncodedFrame* DecodeCompleteFrame() {
- uint32_t timestamp = 0;
- bool found_frame = jitter_buffer_->NextCompleteTimestamp(10, ×tamp);
+ VCMEncodedFrame* found_frame = jitter_buffer_->NextCompleteFrame(10);
if (!found_frame)
- return NULL;
- VCMEncodedFrame* frame = jitter_buffer_->ExtractAndSetDecode(timestamp);
- return frame;
+ return nullptr;
+ return jitter_buffer_->ExtractAndSetDecode(found_frame->TimeStamp());
}
VCMEncodedFrame* DecodeIncompleteFrame() {
@@ -409,12 +407,12 @@
}
bool DecodeCompleteFrame() {
- uint32_t timestamp = 0;
- bool found_frame = jitter_buffer_->NextCompleteTimestamp(0, ×tamp);
+ VCMEncodedFrame* found_frame = jitter_buffer_->NextCompleteFrame(0);
if (!found_frame)
return false;
- VCMEncodedFrame* frame = jitter_buffer_->ExtractAndSetDecode(timestamp);
+ VCMEncodedFrame* frame =
+ jitter_buffer_->ExtractAndSetDecode(found_frame->TimeStamp());
bool ret = (frame != NULL);
jitter_buffer_->ReleaseFrame(frame);
return ret;
@@ -932,23 +930,23 @@
bool re = false;
packet_->codec = kVideoCodecVP9;
- packet_->codecSpecificHeader.codec = kRtpVideoVp9;
+ packet_->video_header.codec = kRtpVideoVp9;
packet_->isFirstPacket = true;
packet_->markerBit = true;
- packet_->codecSpecificHeader.codecHeader.VP9.flexible_mode = false;
- packet_->codecSpecificHeader.codecHeader.VP9.spatial_idx = 0;
- packet_->codecSpecificHeader.codecHeader.VP9.beginning_of_frame = true;
- packet_->codecSpecificHeader.codecHeader.VP9.end_of_frame = true;
- packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = false;
+ packet_->video_header.codecHeader.VP9.flexible_mode = false;
+ packet_->video_header.codecHeader.VP9.spatial_idx = 0;
+ packet_->video_header.codecHeader.VP9.beginning_of_frame = true;
+ packet_->video_header.codecHeader.VP9.end_of_frame = true;
+ packet_->video_header.codecHeader.VP9.temporal_up_switch = false;
packet_->seqNum = 65485;
packet_->timestamp = 1000;
packet_->frameType = kVideoFrameKey;
- packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 5;
- packet_->codecSpecificHeader.codecHeader.VP9.tl0_pic_idx = 200;
- packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 0;
- packet_->codecSpecificHeader.codecHeader.VP9.ss_data_available = true;
- packet_->codecSpecificHeader.codecHeader.VP9.gof.SetGofInfoVP9(
+ packet_->video_header.codecHeader.VP9.picture_id = 5;
+ packet_->video_header.codecHeader.VP9.tl0_pic_idx = 200;
+ packet_->video_header.codecHeader.VP9.temporal_idx = 0;
+ packet_->video_header.codecHeader.VP9.ss_data_available = true;
+ packet_->video_header.codecHeader.VP9.gof.SetGofInfoVP9(
kTemporalStructureMode3); // kTemporalStructureMode3: 0-2-1-2..
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
@@ -956,10 +954,10 @@
packet_->seqNum = 65489;
packet_->timestamp = 13000;
packet_->frameType = kVideoFrameDelta;
- packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 9;
- packet_->codecSpecificHeader.codecHeader.VP9.tl0_pic_idx = 201;
- packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 0;
- packet_->codecSpecificHeader.codecHeader.VP9.ss_data_available = false;
+ packet_->video_header.codecHeader.VP9.picture_id = 9;
+ packet_->video_header.codecHeader.VP9.tl0_pic_idx = 201;
+ packet_->video_header.codecHeader.VP9.temporal_idx = 0;
+ packet_->video_header.codecHeader.VP9.ss_data_available = false;
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
@@ -985,29 +983,29 @@
bool re = false;
packet_->codec = kVideoCodecVP9;
- packet_->codecSpecificHeader.codec = kRtpVideoVp9;
+ packet_->video_header.codec = kRtpVideoVp9;
packet_->isFirstPacket = true;
packet_->markerBit = true;
- packet_->codecSpecificHeader.codecHeader.VP9.flexible_mode = false;
- packet_->codecSpecificHeader.codecHeader.VP9.spatial_idx = 0;
- packet_->codecSpecificHeader.codecHeader.VP9.beginning_of_frame = true;
- packet_->codecSpecificHeader.codecHeader.VP9.end_of_frame = true;
- packet_->codecSpecificHeader.codecHeader.VP9.tl0_pic_idx = 200;
+ packet_->video_header.codecHeader.VP9.flexible_mode = false;
+ packet_->video_header.codecHeader.VP9.spatial_idx = 0;
+ packet_->video_header.codecHeader.VP9.beginning_of_frame = true;
+ packet_->video_header.codecHeader.VP9.end_of_frame = true;
+ packet_->video_header.codecHeader.VP9.tl0_pic_idx = 200;
packet_->seqNum = 65486;
packet_->timestamp = 6000;
packet_->frameType = kVideoFrameDelta;
- packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 6;
- packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 2;
- packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = true;
+ packet_->video_header.codecHeader.VP9.picture_id = 6;
+ packet_->video_header.codecHeader.VP9.temporal_idx = 2;
+ packet_->video_header.codecHeader.VP9.temporal_up_switch = true;
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
packet_->seqNum = 65487;
packet_->timestamp = 9000;
packet_->frameType = kVideoFrameDelta;
- packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 7;
- packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 1;
- packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = true;
+ packet_->video_header.codecHeader.VP9.picture_id = 7;
+ packet_->video_header.codecHeader.VP9.temporal_idx = 1;
+ packet_->video_header.codecHeader.VP9.temporal_up_switch = true;
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
// Insert first frame with SS data.
@@ -1016,11 +1014,11 @@
packet_->frameType = kVideoFrameKey;
packet_->width = 352;
packet_->height = 288;
- packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 5;
- packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 0;
- packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = false;
- packet_->codecSpecificHeader.codecHeader.VP9.ss_data_available = true;
- packet_->codecSpecificHeader.codecHeader.VP9.gof.SetGofInfoVP9(
+ packet_->video_header.codecHeader.VP9.picture_id = 5;
+ packet_->video_header.codecHeader.VP9.temporal_idx = 0;
+ packet_->video_header.codecHeader.VP9.temporal_up_switch = false;
+ packet_->video_header.codecHeader.VP9.ss_data_available = true;
+ packet_->video_header.codecHeader.VP9.gof.SetGofInfoVP9(
kTemporalStructureMode3); // kTemporalStructureMode3: 0-2-1-2..
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
@@ -1061,31 +1059,31 @@
bool re = false;
packet_->codec = kVideoCodecVP9;
- packet_->codecSpecificHeader.codec = kRtpVideoVp9;
- packet_->codecSpecificHeader.codecHeader.VP9.flexible_mode = false;
- packet_->codecSpecificHeader.codecHeader.VP9.beginning_of_frame = true;
- packet_->codecSpecificHeader.codecHeader.VP9.end_of_frame = true;
- packet_->codecSpecificHeader.codecHeader.VP9.tl0_pic_idx = 200;
+ packet_->video_header.codec = kRtpVideoVp9;
+ packet_->video_header.codecHeader.VP9.flexible_mode = false;
+ packet_->video_header.codecHeader.VP9.beginning_of_frame = true;
+ packet_->video_header.codecHeader.VP9.end_of_frame = true;
+ packet_->video_header.codecHeader.VP9.tl0_pic_idx = 200;
packet_->isFirstPacket = true;
packet_->markerBit = false;
packet_->seqNum = 65486;
packet_->timestamp = 6000;
packet_->frameType = kVideoFrameDelta;
- packet_->codecSpecificHeader.codecHeader.VP9.spatial_idx = 0;
- packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 6;
- packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 1;
- packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = true;
+ packet_->video_header.codecHeader.VP9.spatial_idx = 0;
+ packet_->video_header.codecHeader.VP9.picture_id = 6;
+ packet_->video_header.codecHeader.VP9.temporal_idx = 1;
+ packet_->video_header.codecHeader.VP9.temporal_up_switch = true;
EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_, &re));
packet_->isFirstPacket = false;
packet_->markerBit = true;
packet_->seqNum = 65487;
packet_->frameType = kVideoFrameDelta;
- packet_->codecSpecificHeader.codecHeader.VP9.spatial_idx = 1;
- packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 6;
- packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 1;
- packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = true;
+ packet_->video_header.codecHeader.VP9.spatial_idx = 1;
+ packet_->video_header.codecHeader.VP9.picture_id = 6;
+ packet_->video_header.codecHeader.VP9.temporal_idx = 1;
+ packet_->video_header.codecHeader.VP9.temporal_up_switch = true;
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
packet_->isFirstPacket = false;
@@ -1093,10 +1091,10 @@
packet_->seqNum = 65485;
packet_->timestamp = 3000;
packet_->frameType = kVideoFrameKey;
- packet_->codecSpecificHeader.codecHeader.VP9.spatial_idx = 1;
- packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 5;
- packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 0;
- packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = false;
+ packet_->video_header.codecHeader.VP9.spatial_idx = 1;
+ packet_->video_header.codecHeader.VP9.picture_id = 5;
+ packet_->video_header.codecHeader.VP9.temporal_idx = 0;
+ packet_->video_header.codecHeader.VP9.temporal_up_switch = false;
EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_, &re));
// Insert first frame with SS data.
@@ -1106,12 +1104,12 @@
packet_->frameType = kVideoFrameKey;
packet_->width = 352;
packet_->height = 288;
- packet_->codecSpecificHeader.codecHeader.VP9.spatial_idx = 0;
- packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 5;
- packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 0;
- packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = false;
- packet_->codecSpecificHeader.codecHeader.VP9.ss_data_available = true;
- packet_->codecSpecificHeader.codecHeader.VP9.gof.SetGofInfoVP9(
+ packet_->video_header.codecHeader.VP9.spatial_idx = 0;
+ packet_->video_header.codecHeader.VP9.picture_id = 5;
+ packet_->video_header.codecHeader.VP9.temporal_idx = 0;
+ packet_->video_header.codecHeader.VP9.temporal_up_switch = false;
+ packet_->video_header.codecHeader.VP9.ss_data_available = true;
+ packet_->video_header.codecHeader.VP9.gof.SetGofInfoVP9(
kTemporalStructureMode2); // kTemporalStructureMode3: 0-1-0-1..
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
@@ -1177,7 +1175,7 @@
EXPECT_EQ(kIncomplete,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
uint32_t timestamp = 0;
- EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, ×tamp));
+ EXPECT_EQ(jitter_buffer_->NextCompleteFrame(0), nullptr);
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(×tamp));
packet_->isFirstPacket = false;
@@ -1185,7 +1183,7 @@
packet_->seqNum++;
EXPECT_EQ(kIncomplete,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
- EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, ×tamp));
+ EXPECT_EQ(jitter_buffer_->NextCompleteFrame(0), nullptr);
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(×tamp));
}
@@ -1210,7 +1208,7 @@
EXPECT_EQ(kDecodableSession,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
- EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, ×tamp));
+ EXPECT_EQ(jitter_buffer_->NextCompleteFrame(0), nullptr);
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(×tamp));
// Insert second frame
@@ -1219,7 +1217,7 @@
EXPECT_EQ(kDecodableSession,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
- EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, ×tamp));
+ EXPECT_EQ(jitter_buffer_->NextCompleteFrame(0), nullptr);
EXPECT_TRUE(jitter_buffer_->NextMaybeIncompleteTimestamp(×tamp));
packet_->isFirstPacket = false;
@@ -1227,14 +1225,14 @@
packet_->seqNum++;
EXPECT_EQ(kDecodableSession,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
- EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, ×tamp));
+ EXPECT_EQ(jitter_buffer_->NextCompleteFrame(0), nullptr);
EXPECT_TRUE(jitter_buffer_->NextMaybeIncompleteTimestamp(×tamp));
}
packet_->seqNum++;
EXPECT_EQ(kDecodableSession,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
- EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, ×tamp));
+ EXPECT_EQ(jitter_buffer_->NextCompleteFrame(0), nullptr);
EXPECT_TRUE(jitter_buffer_->NextMaybeIncompleteTimestamp(×tamp));
frame_out = DecodeIncompleteFrame();
@@ -1276,7 +1274,7 @@
EXPECT_EQ(kIncomplete,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
uint32_t timestamp;
- EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, ×tamp));
+ EXPECT_EQ(jitter_buffer_->NextCompleteFrame(0), nullptr);
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(×tamp));
// Insert second frame - an incomplete key frame.
@@ -1287,7 +1285,7 @@
EXPECT_EQ(kIncomplete,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
- EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, ×tamp));
+ EXPECT_EQ(jitter_buffer_->NextCompleteFrame(0), nullptr);
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(×tamp));
// Insert a few more packets. Make sure we're waiting for the key frame to be
@@ -1297,7 +1295,7 @@
packet_->seqNum++;
EXPECT_EQ(kIncomplete,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
- EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, ×tamp));
+ EXPECT_EQ(jitter_buffer_->NextCompleteFrame(0), nullptr);
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(×tamp));
}
@@ -1340,7 +1338,7 @@
EXPECT_EQ(kIncomplete,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
uint32_t timestamp;
- EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, ×tamp));
+ EXPECT_EQ(jitter_buffer_->NextCompleteFrame(0), nullptr);
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(×tamp));
// Insert second frame with the first packet missing. Make sure we're waiting
@@ -1350,14 +1348,14 @@
EXPECT_EQ(kIncomplete,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
- EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, ×tamp));
+ EXPECT_EQ(jitter_buffer_->NextCompleteFrame(0), nullptr);
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(×tamp));
for (int i = 0; i < 5; ++i) {
packet_->seqNum++;
EXPECT_EQ(kIncomplete,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
- EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, ×tamp));
+ EXPECT_EQ(jitter_buffer_->NextCompleteFrame(0), nullptr);
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(×tamp));
}
@@ -1366,7 +1364,7 @@
packet_->seqNum -= 6;
EXPECT_EQ(kDecodableSession,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
- EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, ×tamp));
+ EXPECT_EQ(jitter_buffer_->NextCompleteFrame(0), nullptr);
EXPECT_TRUE(jitter_buffer_->NextMaybeIncompleteTimestamp(×tamp));
frame_out = DecodeIncompleteFrame();
@@ -1387,9 +1385,10 @@
EXPECT_EQ(kCompleteSession,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
uint32_t next_timestamp;
- EXPECT_TRUE(jitter_buffer_->NextCompleteTimestamp(0, &next_timestamp));
- EXPECT_EQ(packet_->timestamp, next_timestamp);
- VCMEncodedFrame* frame = jitter_buffer_->ExtractAndSetDecode(next_timestamp);
+ VCMEncodedFrame* frame = jitter_buffer_->NextCompleteFrame(0);
+ EXPECT_NE(frame, nullptr);
+ EXPECT_EQ(packet_->timestamp, frame->TimeStamp());
+ frame = jitter_buffer_->ExtractAndSetDecode(frame->TimeStamp());
EXPECT_TRUE(frame != NULL);
jitter_buffer_->ReleaseFrame(frame);
@@ -1413,7 +1412,7 @@
packet_->timestamp = timestamp_;
EXPECT_EQ(kDecodableSession,
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
- EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &next_timestamp));
+ EXPECT_EQ(jitter_buffer_->NextCompleteFrame(0), nullptr);
EXPECT_TRUE(jitter_buffer_->NextMaybeIncompleteTimestamp(&next_timestamp));
EXPECT_EQ(packet_->timestamp - 33 * 90, next_timestamp);
}
diff --git a/modules/video_coding/packet.cc b/modules/video_coding/packet.cc
index e8c06a6..aa27c5d 100644
--- a/modules/video_coding/packet.cc
+++ b/modules/video_coding/packet.cc
@@ -32,7 +32,9 @@
insertStartCode(false),
width(0),
height(0),
- codecSpecificHeader() {}
+ video_header() {
+ video_header.playout_delay = {-1, -1};
+}
VCMPacket::VCMPacket(const uint8_t* ptr,
const size_t size,
@@ -45,7 +47,6 @@
sizeBytes(size),
markerBit(rtpHeader.header.markerBit),
timesNacked(-1),
-
frameType(rtpHeader.frameType),
codec(kVideoCodecUnknown),
isFirstPacket(rtpHeader.type.Video.isFirstPacket),
@@ -53,8 +54,18 @@
insertStartCode(false),
width(rtpHeader.type.Video.width),
height(rtpHeader.type.Video.height),
- codecSpecificHeader(rtpHeader.type.Video) {
+ video_header(rtpHeader.type.Video) {
CopyCodecSpecifics(rtpHeader.type.Video);
+
+ if (markerBit) {
+ video_header.rotation = rtpHeader.type.Video.rotation;
+ }
+ // Playout decisions are made entirely based on first packet in a frame.
+ if (isFirstPacket) {
+ video_header.playout_delay = rtpHeader.type.Video.playout_delay;
+ } else {
+ video_header.playout_delay = {-1, -1};
+ }
}
VCMPacket::VCMPacket(const uint8_t* ptr,
@@ -70,7 +81,6 @@
sizeBytes(size),
markerBit(mBit),
timesNacked(-1),
-
frameType(kVideoFrameDelta),
codec(kVideoCodecUnknown),
isFirstPacket(false),
@@ -78,7 +88,7 @@
insertStartCode(false),
width(0),
height(0),
- codecSpecificHeader() {}
+ video_header() {}
void VCMPacket::Reset() {
payloadType = 0;
@@ -96,13 +106,10 @@
insertStartCode = false;
width = 0;
height = 0;
- memset(&codecSpecificHeader, 0, sizeof(RTPVideoHeader));
+ memset(&video_header, 0, sizeof(RTPVideoHeader));
}
void VCMPacket::CopyCodecSpecifics(const RTPVideoHeader& videoHeader) {
- if (markerBit) {
- codecSpecificHeader.rotation = videoHeader.rotation;
- }
switch (videoHeader.codec) {
case kRtpVideoVp8:
// Handle all packets within a frame as depending on the previous packet
diff --git a/modules/video_coding/packet.h b/modules/video_coding/packet.h
index bb62cb9..ea9cc86 100644
--- a/modules/video_coding/packet.h
+++ b/modules/video_coding/packet.h
@@ -50,7 +50,7 @@
// packet.
int width;
int height;
- RTPVideoHeader codecSpecificHeader;
+ RTPVideoHeader video_header;
protected:
void CopyCodecSpecifics(const RTPVideoHeader& videoHeader);
diff --git a/modules/video_coding/packet_buffer_unittest.cc b/modules/video_coding/packet_buffer_unittest.cc
index 753b5b4..cffd392 100644
--- a/modules/video_coding/packet_buffer_unittest.cc
+++ b/modules/video_coding/packet_buffer_unittest.cc
@@ -98,10 +98,10 @@
packet.markerBit = last;
packet.sizeBytes = data_size;
packet.dataPtr = data;
- packet.codecSpecificHeader.codecHeader.VP8.pictureId = pid % (1 << 15);
- packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = tid;
- packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = tl0;
- packet.codecSpecificHeader.codecHeader.VP8.layerSync = sync;
+ packet.video_header.codecHeader.VP8.pictureId = pid % (1 << 15);
+ packet.video_header.codecHeader.VP8.temporalIdx = tid;
+ packet.video_header.codecHeader.VP8.tl0PicIdx = tl0;
+ packet.video_header.codecHeader.VP8.layerSync = sync;
EXPECT_TRUE(packet_buffer_->InsertPacket(packet));
}
@@ -127,15 +127,15 @@
packet.markerBit = last;
packet.sizeBytes = data_size;
packet.dataPtr = data;
- packet.codecSpecificHeader.codecHeader.VP9.flexible_mode = false;
- packet.codecSpecificHeader.codecHeader.VP9.picture_id = pid % (1 << 15);
- packet.codecSpecificHeader.codecHeader.VP9.temporal_idx = tid;
- packet.codecSpecificHeader.codecHeader.VP9.spatial_idx = sid;
- packet.codecSpecificHeader.codecHeader.VP9.tl0_pic_idx = tl0;
- packet.codecSpecificHeader.codecHeader.VP9.temporal_up_switch = up;
+ packet.video_header.codecHeader.VP9.flexible_mode = false;
+ packet.video_header.codecHeader.VP9.picture_id = pid % (1 << 15);
+ packet.video_header.codecHeader.VP9.temporal_idx = tid;
+ packet.video_header.codecHeader.VP9.spatial_idx = sid;
+ packet.video_header.codecHeader.VP9.tl0_pic_idx = tl0;
+ packet.video_header.codecHeader.VP9.temporal_up_switch = up;
if (ss != nullptr) {
- packet.codecSpecificHeader.codecHeader.VP9.ss_data_available = true;
- packet.codecSpecificHeader.codecHeader.VP9.gof = *ss;
+ packet.video_header.codecHeader.VP9.ss_data_available = true;
+ packet.video_header.codecHeader.VP9.gof = *ss;
}
EXPECT_TRUE(packet_buffer_->InsertPacket(packet));
@@ -163,15 +163,15 @@
packet.markerBit = last;
packet.sizeBytes = data_size;
packet.dataPtr = data;
- packet.codecSpecificHeader.codecHeader.VP9.inter_layer_predicted = inter;
- packet.codecSpecificHeader.codecHeader.VP9.flexible_mode = true;
- packet.codecSpecificHeader.codecHeader.VP9.picture_id = pid % (1 << 15);
- packet.codecSpecificHeader.codecHeader.VP9.temporal_idx = tid;
- packet.codecSpecificHeader.codecHeader.VP9.spatial_idx = sid;
- packet.codecSpecificHeader.codecHeader.VP9.tl0_pic_idx = tl0;
- packet.codecSpecificHeader.codecHeader.VP9.num_ref_pics = refs.size();
+ packet.video_header.codecHeader.VP9.inter_layer_predicted = inter;
+ packet.video_header.codecHeader.VP9.flexible_mode = true;
+ packet.video_header.codecHeader.VP9.picture_id = pid % (1 << 15);
+ packet.video_header.codecHeader.VP9.temporal_idx = tid;
+ packet.video_header.codecHeader.VP9.spatial_idx = sid;
+ packet.video_header.codecHeader.VP9.tl0_pic_idx = tl0;
+ packet.video_header.codecHeader.VP9.num_ref_pics = refs.size();
for (size_t i = 0; i < refs.size(); ++i)
- packet.codecSpecificHeader.codecHeader.VP9.pid_diff[i] = refs[i];
+ packet.video_header.codecHeader.VP9.pid_diff[i] = refs[i];
EXPECT_TRUE(packet_buffer_->InsertPacket(packet));
}
diff --git a/modules/video_coding/receiver.cc b/modules/video_coding/receiver.cc
index 1954df9..b516694 100644
--- a/modules/video_coding/receiver.cc
+++ b/modules/video_coding/receiver.cc
@@ -144,15 +144,26 @@
bool prefer_late_decoding) {
const int64_t start_time_ms = clock_->TimeInMilliseconds();
uint32_t frame_timestamp = 0;
+ int min_playout_delay_ms = -1;
+ int max_playout_delay_ms = -1;
// Exhaust wait time to get a complete frame for decoding.
- bool found_frame =
- jitter_buffer_.NextCompleteTimestamp(max_wait_time_ms, &frame_timestamp);
+ VCMEncodedFrame* found_frame =
+ jitter_buffer_.NextCompleteFrame(max_wait_time_ms);
- if (!found_frame)
- found_frame = jitter_buffer_.NextMaybeIncompleteTimestamp(&frame_timestamp);
+ if (found_frame) {
+ frame_timestamp = found_frame->TimeStamp();
+ min_playout_delay_ms = found_frame->EncodedImage().playout_delay_.min_ms;
+ max_playout_delay_ms = found_frame->EncodedImage().playout_delay_.max_ms;
+ } else {
+ if (!jitter_buffer_.NextMaybeIncompleteTimestamp(&frame_timestamp))
+ return nullptr;
+ }
- if (!found_frame)
- return NULL;
+ if (min_playout_delay_ms >= 0)
+ timing_->set_min_playout_delay(min_playout_delay_ms);
+
+ if (max_playout_delay_ms >= 0)
+ timing_->set_max_playout_delay(max_playout_delay_ms);
// We have a frame - Set timing and render timestamp.
timing_->SetJitterDelay(jitter_buffer_.EstimatedJitterMs());
diff --git a/modules/video_coding/session_info.cc b/modules/video_coding/session_info.cc
index 8701098..b11f690 100644
--- a/modules/video_coding/session_info.cc
+++ b/modules/video_coding/session_info.cc
@@ -60,10 +60,10 @@
int VCMSessionInfo::PictureId() const {
if (packets_.empty())
return kNoPictureId;
- if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp8) {
- return packets_.front().codecSpecificHeader.codecHeader.VP8.pictureId;
- } else if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp9) {
- return packets_.front().codecSpecificHeader.codecHeader.VP9.picture_id;
+ if (packets_.front().video_header.codec == kRtpVideoVp8) {
+ return packets_.front().video_header.codecHeader.VP8.pictureId;
+ } else if (packets_.front().video_header.codec == kRtpVideoVp9) {
+ return packets_.front().video_header.codecHeader.VP9.picture_id;
} else {
return kNoPictureId;
}
@@ -72,10 +72,10 @@
int VCMSessionInfo::TemporalId() const {
if (packets_.empty())
return kNoTemporalIdx;
- if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp8) {
- return packets_.front().codecSpecificHeader.codecHeader.VP8.temporalIdx;
- } else if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp9) {
- return packets_.front().codecSpecificHeader.codecHeader.VP9.temporal_idx;
+ if (packets_.front().video_header.codec == kRtpVideoVp8) {
+ return packets_.front().video_header.codecHeader.VP8.temporalIdx;
+ } else if (packets_.front().video_header.codec == kRtpVideoVp9) {
+ return packets_.front().video_header.codecHeader.VP9.temporal_idx;
} else {
return kNoTemporalIdx;
}
@@ -84,11 +84,10 @@
bool VCMSessionInfo::LayerSync() const {
if (packets_.empty())
return false;
- if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp8) {
- return packets_.front().codecSpecificHeader.codecHeader.VP8.layerSync;
- } else if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp9) {
- return packets_.front()
- .codecSpecificHeader.codecHeader.VP9.temporal_up_switch;
+ if (packets_.front().video_header.codec == kRtpVideoVp8) {
+ return packets_.front().video_header.codecHeader.VP8.layerSync;
+ } else if (packets_.front().video_header.codec == kRtpVideoVp9) {
+ return packets_.front().video_header.codecHeader.VP9.temporal_up_switch;
} else {
return false;
}
@@ -97,36 +96,34 @@
int VCMSessionInfo::Tl0PicId() const {
if (packets_.empty())
return kNoTl0PicIdx;
- if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp8) {
- return packets_.front().codecSpecificHeader.codecHeader.VP8.tl0PicIdx;
- } else if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp9) {
- return packets_.front().codecSpecificHeader.codecHeader.VP9.tl0_pic_idx;
+ if (packets_.front().video_header.codec == kRtpVideoVp8) {
+ return packets_.front().video_header.codecHeader.VP8.tl0PicIdx;
+ } else if (packets_.front().video_header.codec == kRtpVideoVp9) {
+ return packets_.front().video_header.codecHeader.VP9.tl0_pic_idx;
} else {
return kNoTl0PicIdx;
}
}
bool VCMSessionInfo::NonReference() const {
- if (packets_.empty() ||
- packets_.front().codecSpecificHeader.codec != kRtpVideoVp8)
+ if (packets_.empty() || packets_.front().video_header.codec != kRtpVideoVp8)
return false;
- return packets_.front().codecSpecificHeader.codecHeader.VP8.nonReference;
+ return packets_.front().video_header.codecHeader.VP8.nonReference;
}
void VCMSessionInfo::SetGofInfo(const GofInfoVP9& gof_info, size_t idx) {
- if (packets_.empty() ||
- packets_.front().codecSpecificHeader.codec != kRtpVideoVp9 ||
- packets_.front().codecSpecificHeader.codecHeader.VP9.flexible_mode) {
+ if (packets_.empty() || packets_.front().video_header.codec != kRtpVideoVp9 ||
+ packets_.front().video_header.codecHeader.VP9.flexible_mode) {
return;
}
- packets_.front().codecSpecificHeader.codecHeader.VP9.temporal_idx =
+ packets_.front().video_header.codecHeader.VP9.temporal_idx =
gof_info.temporal_idx[idx];
- packets_.front().codecSpecificHeader.codecHeader.VP9.temporal_up_switch =
+ packets_.front().video_header.codecHeader.VP9.temporal_up_switch =
gof_info.temporal_up_switch[idx];
- packets_.front().codecSpecificHeader.codecHeader.VP9.num_ref_pics =
+ packets_.front().video_header.codecHeader.VP9.num_ref_pics =
gof_info.num_ref_pics[idx];
for (uint8_t i = 0; i < gof_info.num_ref_pics[idx]; ++i) {
- packets_.front().codecSpecificHeader.codecHeader.VP9.pid_diff[i] =
+ packets_.front().video_header.codecHeader.VP9.pid_diff[i] =
gof_info.pid_diff[idx][i];
}
}
@@ -175,9 +172,8 @@
// header supplied by the H264 depacketizer.
const size_t kH264NALHeaderLengthInBytes = 1;
const size_t kLengthFieldLength = 2;
- if (packet.codecSpecificHeader.codec == kRtpVideoH264 &&
- packet.codecSpecificHeader.codecHeader.H264.packetization_type ==
- kH264StapA) {
+ if (packet.video_header.codec == kRtpVideoH264 &&
+ packet.video_header.codecHeader.H264.packetization_type == kH264StapA) {
size_t required_length = 0;
const uint8_t* nalu_ptr = packet_buffer + kH264NALHeaderLengthInBytes;
while (nalu_ptr < packet_buffer + packet.sizeBytes) {
@@ -344,8 +340,7 @@
return new_length;
PacketIterator it = FindNextPartitionBeginning(packets_.begin());
while (it != packets_.end()) {
- const int partition_id =
- (*it).codecSpecificHeader.codecHeader.VP8.partitionId;
+ const int partition_id = (*it).video_header.codecHeader.VP8.partitionId;
PacketIterator partition_end = FindPartitionEnd(it);
fragmentation->fragmentationOffset[partition_id] =
(*it).dataPtr - frame_buffer;
@@ -381,7 +376,7 @@
VCMSessionInfo::PacketIterator VCMSessionInfo::FindNextPartitionBeginning(
PacketIterator it) const {
while (it != packets_.end()) {
- if ((*it).codecSpecificHeader.codecHeader.VP8.beginningOfPartition) {
+ if ((*it).video_header.codecHeader.VP8.beginningOfPartition) {
return it;
}
++it;
@@ -393,13 +388,10 @@
PacketIterator it) const {
assert((*it).codec == kVideoCodecVP8);
PacketIterator prev_it = it;
- const int partition_id =
- (*it).codecSpecificHeader.codecHeader.VP8.partitionId;
+ const int partition_id = (*it).video_header.codecHeader.VP8.partitionId;
while (it != packets_.end()) {
- bool beginning =
- (*it).codecSpecificHeader.codecHeader.VP8.beginningOfPartition;
- int current_partition_id =
- (*it).codecSpecificHeader.codecHeader.VP8.partitionId;
+ bool beginning = (*it).video_header.codecHeader.VP8.beginningOfPartition;
+ int current_partition_id = (*it).video_header.codecHeader.VP8.partitionId;
bool packet_loss_found = (!beginning && !InSequence(it, prev_it));
if (packet_loss_found ||
(beginning && current_partition_id != partition_id)) {
diff --git a/modules/video_coding/timing.cc b/modules/video_coding/timing.cc
index 6542ef5..b20a18f 100644
--- a/modules/video_coding/timing.cc
+++ b/modules/video_coding/timing.cc
@@ -28,6 +28,7 @@
codec_timer_(new VCMCodecTimer()),
render_delay_ms_(kDefaultRenderDelayMs),
min_playout_delay_ms_(0),
+ max_playout_delay_ms_(10000),
jitter_delay_ms_(0),
current_delay_ms_(0),
last_decode_ms_(0),
@@ -91,17 +92,32 @@
codec_timer_.reset(new VCMCodecTimer());
}
-void VCMTiming::set_render_delay(uint32_t render_delay_ms) {
+void VCMTiming::set_render_delay(int render_delay_ms) {
CriticalSectionScoped cs(crit_sect_);
render_delay_ms_ = render_delay_ms;
}
-void VCMTiming::set_min_playout_delay(uint32_t min_playout_delay_ms) {
+void VCMTiming::set_min_playout_delay(int min_playout_delay_ms) {
CriticalSectionScoped cs(crit_sect_);
min_playout_delay_ms_ = min_playout_delay_ms;
}
-void VCMTiming::SetJitterDelay(uint32_t jitter_delay_ms) {
+int VCMTiming::min_playout_delay() {
+ CriticalSectionScoped cs(crit_sect_);
+ return min_playout_delay_ms_;
+}
+
+void VCMTiming::set_max_playout_delay(int max_playout_delay_ms) {
+ CriticalSectionScoped cs(crit_sect_);
+ max_playout_delay_ms_ = max_playout_delay_ms;
+}
+
+int VCMTiming::max_playout_delay() {
+ CriticalSectionScoped cs(crit_sect_);
+ return max_playout_delay_ms_;
+}
+
+void VCMTiming::SetJitterDelay(int jitter_delay_ms) {
CriticalSectionScoped cs(crit_sect_);
if (jitter_delay_ms != jitter_delay_ms_) {
jitter_delay_ms_ = jitter_delay_ms;
@@ -114,7 +130,7 @@
void VCMTiming::UpdateCurrentDelay(uint32_t frame_timestamp) {
CriticalSectionScoped cs(crit_sect_);
- uint32_t target_delay_ms = TargetDelayInternal();
+ int target_delay_ms = TargetDelayInternal();
if (current_delay_ms_ == 0) {
// Not initialized, set current delay to target.
@@ -147,7 +163,7 @@
delay_diff_ms = std::max(delay_diff_ms, -max_change_ms);
delay_diff_ms = std::min(delay_diff_ms, max_change_ms);
- current_delay_ms_ = current_delay_ms_ + static_cast<int32_t>(delay_diff_ms);
+ current_delay_ms_ = current_delay_ms_ + delay_diff_ms;
}
prev_frame_timestamp_ = frame_timestamp;
}
@@ -163,7 +179,7 @@
return;
}
if (current_delay_ms_ + delayed_ms <= target_delay_ms) {
- current_delay_ms_ += static_cast<uint32_t>(delayed_ms);
+ current_delay_ms_ += delayed_ms;
} else {
current_delay_ms_ = target_delay_ms;
}
@@ -211,14 +227,21 @@
estimated_complete_time_ms = now_ms;
}
- // Make sure that we have at least the playout delay.
- uint32_t actual_delay = std::max(current_delay_ms_, min_playout_delay_ms_);
+ if (min_playout_delay_ms_ == 0 && max_playout_delay_ms_ == 0) {
+ // Render as soon as possible
+ return now_ms;
+ }
+
+ // Make sure the actual delay stays in the range of |min_playout_delay_ms_|
+ // and |max_playout_delay_ms_|.
+ int actual_delay = std::max(current_delay_ms_, min_playout_delay_ms_);
+ actual_delay = std::min(actual_delay, max_playout_delay_ms_);
return estimated_complete_time_ms + actual_delay;
}
// Must be called from inside a critical section.
-int64_t VCMTiming::RequiredDecodeTimeMs() const {
- const int64_t decode_time_ms = codec_timer_->RequiredDecodeTimeMs();
+int VCMTiming::RequiredDecodeTimeMs() const {
+ const int decode_time_ms = codec_timer_->RequiredDecodeTimeMs();
assert(decode_time_ms >= 0);
return decode_time_ms;
}
@@ -254,16 +277,14 @@
0;
}
-uint32_t VCMTiming::TargetVideoDelay() const {
+int VCMTiming::TargetVideoDelay() const {
CriticalSectionScoped cs(crit_sect_);
return TargetDelayInternal();
}
-uint32_t VCMTiming::TargetDelayInternal() const {
+int VCMTiming::TargetDelayInternal() const {
return std::max(min_playout_delay_ms_,
- jitter_delay_ms_ +
- static_cast<uint32_t>(RequiredDecodeTimeMs()) +
- render_delay_ms_);
+ jitter_delay_ms_ + RequiredDecodeTimeMs() + render_delay_ms_);
}
void VCMTiming::GetTimings(int* decode_ms,
@@ -275,7 +296,7 @@
int* render_delay_ms) const {
CriticalSectionScoped cs(crit_sect_);
*decode_ms = last_decode_ms_;
- *max_decode_ms = static_cast<int>(RequiredDecodeTimeMs());
+ *max_decode_ms = RequiredDecodeTimeMs();
*current_delay_ms = current_delay_ms_;
*target_delay_ms = TargetDelayInternal();
*jitter_buffer_ms = jitter_delay_ms_;
diff --git a/modules/video_coding/timing.h b/modules/video_coding/timing.h
index e593c9a..85a3dd0 100644
--- a/modules/video_coding/timing.h
+++ b/modules/video_coding/timing.h
@@ -35,14 +35,23 @@
void ResetDecodeTime();
// Set the amount of time needed to render an image. Defaults to 10 ms.
- void set_render_delay(uint32_t render_delay_ms);
+ void set_render_delay(int render_delay_ms);
// Set the minimum time the video must be delayed on the receiver to
// get the desired jitter buffer level.
- void SetJitterDelay(uint32_t required_delay_ms);
+ void SetJitterDelay(int required_delay_ms);
- // Set the minimum playout delay required to sync video with audio.
- void set_min_playout_delay(uint32_t min_playout_delay);
+ // Set the minimum playout delay from capture to render in ms.
+ void set_min_playout_delay(int min_playout_delay_ms);
+
+ // Returns the minimum playout delay from capture to render in ms.
+ int min_playout_delay();
+
+ // Set the maximum playout delay from capture to render in ms.
+ void set_max_playout_delay(int max_playout_delay_ms);
+
+ // Returns the maximum playout delay from capture to render in ms.
+ int max_playout_delay();
// Increases or decreases the current delay to get closer to the target delay.
// Calculates how long it has been since the previous call to this function,
@@ -77,7 +86,7 @@
// Returns the current target delay which is required delay + decode time +
// render delay.
- uint32_t TargetVideoDelay() const;
+ int TargetVideoDelay() const;
// Calculates whether or not there is enough time to decode a frame given a
// certain amount of processing time.
@@ -96,11 +105,10 @@
enum { kDelayMaxChangeMsPerS = 100 };
protected:
- int64_t RequiredDecodeTimeMs() const
- EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+ int RequiredDecodeTimeMs() const EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
int64_t RenderTimeMsInternal(uint32_t frame_timestamp, int64_t now_ms) const
EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
- uint32_t TargetDelayInternal() const EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+ int TargetDelayInternal() const EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
private:
void UpdateHistograms() const;
@@ -110,10 +118,16 @@
bool master_ GUARDED_BY(crit_sect_);
TimestampExtrapolator* ts_extrapolator_ GUARDED_BY(crit_sect_);
std::unique_ptr<VCMCodecTimer> codec_timer_ GUARDED_BY(crit_sect_);
- uint32_t render_delay_ms_ GUARDED_BY(crit_sect_);
- uint32_t min_playout_delay_ms_ GUARDED_BY(crit_sect_);
- uint32_t jitter_delay_ms_ GUARDED_BY(crit_sect_);
- uint32_t current_delay_ms_ GUARDED_BY(crit_sect_);
+ int render_delay_ms_ GUARDED_BY(crit_sect_);
+ // Best-effort playout delay range for frames from capture to render.
+ // The receiver tries to keep the delay between |min_playout_delay_ms_|
+ // and |max_playout_delay_ms_| taking the network jitter into account.
+ // A special case is where min_playout_delay_ms_ = max_playout_delay_ms_ = 0,
+ // in which case the receiver tries to play the frames as they arrive.
+ int min_playout_delay_ms_ GUARDED_BY(crit_sect_);
+ int max_playout_delay_ms_ GUARDED_BY(crit_sect_);
+ int jitter_delay_ms_ GUARDED_BY(crit_sect_);
+ int current_delay_ms_ GUARDED_BY(crit_sect_);
int last_decode_ms_ GUARDED_BY(crit_sect_);
uint32_t prev_frame_timestamp_ GUARDED_BY(crit_sect_);
diff --git a/modules/video_coding/timing_unittest.cc b/modules/video_coding/timing_unittest.cc
index 51ef354..3e3b642 100644
--- a/modules/video_coding/timing_unittest.cc
+++ b/modules/video_coding/timing_unittest.cc
@@ -104,7 +104,7 @@
clock.TimeInMilliseconds());
EXPECT_EQ(waitTime, jitterDelayMs);
- uint32_t minTotalDelayMs = 200;
+ int minTotalDelayMs = 200;
timing.set_min_playout_delay(minTotalDelayMs);
clock.AdvanceTimeMilliseconds(5000);
timeStamp += 5 * 90000;
diff --git a/video/payload_router.cc b/video/payload_router.cc
index 3be5882..2143902 100644
--- a/video/payload_router.cc
+++ b/video/payload_router.cc
@@ -152,6 +152,7 @@
if (codec_specific_info)
CopyCodecSpecific(codec_specific_info, &rtp_video_header);
rtp_video_header.rotation = encoded_image.rotation_;
+ rtp_video_header.playout_delay = encoded_image.playout_delay_;
RTC_DCHECK_LT(rtp_video_header.simulcastIdx, rtp_modules_.size());
// The simulcast index might actually be larger than the number of modules
diff --git a/video/rtp_stream_receiver.cc b/video/rtp_stream_receiver.cc
index 188cf74..eb6fc8b 100644
--- a/video/rtp_stream_receiver.cc
+++ b/video/rtp_stream_receiver.cc
@@ -442,6 +442,8 @@
rtp_header.type.Video.rotation =
ConvertCVOByteToVideoRotation(header.extension.videoRotation);
}
+ rtp_header.type.Video.playout_delay = header.extension.playout_delay;
+
OnReceivedPayloadData(nullptr, 0, &rtp_header);
}
diff --git a/video_frame.h b/video_frame.h
index 4dc3411..db06a96 100644
--- a/video_frame.h
+++ b/video_frame.h
@@ -179,6 +179,11 @@
bool _completeFrame = false;
AdaptReason adapt_reason_;
int qp_ = -1; // Quantizer value.
+
+ // When an application indicates non-zero values here, it is taken as an
+ // indication that all future frames will be constrained with those limits
+ // until the application indicates a change again.
+ PlayoutDelay playout_delay_ = {-1, -1};
};
} // namespace webrtc