Implement timing frames.
Timing information is gathered in EncodedImage,
starting at encoders. Then it's sent using RTP header extension. In the
end, it's gathered at the GenericDecoder. Actual reporting and tests
will be in the next CLs.
BUG=webrtc:7594
Review-Url: https://codereview.webrtc.org/2911193002
Cr-Original-Commit-Position: refs/heads/master@{#18659}
Cr-Mirrored-From: https://chromium.googlesource.com/external/webrtc
Cr-Mirrored-Commit: 04f4d126f84dbbb915f52d91807cabdabf08d483
diff --git a/video/rtp_video_stream_receiver.cc b/video/rtp_video_stream_receiver.cc
index f1e530d..6a55a3d 100644
--- a/video/rtp_video_stream_receiver.cc
+++ b/video/rtp_video_stream_receiver.cc
@@ -10,8 +10,8 @@
#include "webrtc/video/rtp_video_stream_receiver.h"
-#include <vector>
#include <utility>
+#include <vector>
#include "webrtc/base/checks.h"
#include "webrtc/base/location.h"
@@ -239,6 +239,7 @@
VCMPacket packet(payload_data, payload_size, rtp_header_with_ntp);
packet.timesNacked =
nack_module_ ? nack_module_->OnReceivedPacket(packet) : -1;
+ packet.receive_time_ms = clock_->TimeInMilliseconds();
// In the case of a video stream without picture ids and no rtx the
// RtpFrameReferenceFinder will need to know about padding to
@@ -520,6 +521,11 @@
if (header.extension.hasVideoContentType) {
rtp_header.type.Video.content_type = header.extension.videoContentType;
}
+ rtp_header.type.Video.video_timing = {0u, 0u, 0u, 0u, 0u, 0u, false};
+ if (header.extension.has_video_timing) {
+ rtp_header.type.Video.video_timing = header.extension.video_timing;
+ rtp_header.type.Video.video_timing.is_timing_frame = true;
+ }
rtp_header.type.Video.playout_delay = header.extension.playout_delay;
OnReceivedPayloadData(nullptr, 0, &rtp_header);