Make the rtp timestamp member of EncodedImage private
A followup to https://webrtc-review.googlesource.com/c/src/+/82160,
which added accessor methods.
Bug: webrtc:9378
Change-Id: Id3cff46cde3a5a3fb6d6edd4e8dac26193e6481c
Reviewed-on: https://webrtc-review.googlesource.com/95103
Reviewed-by: Sebastian Jansson <srte@webrtc.org>
Reviewed-by: Erik Språng <sprang@webrtc.org>
Reviewed-by: Magnus Jedvert <magjed@webrtc.org>
Commit-Queue: Niels Moller <nisse@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#24705}
diff --git a/common_video/include/video_frame.h b/common_video/include/video_frame.h
index 371a8f0..565870e 100644
--- a/common_video/include/video_frame.h
+++ b/common_video/include/video_frame.h
@@ -40,10 +40,10 @@
// TODO(nisse): Change style to timestamp(), set_timestamp(), for consistency
// with the VideoFrame class.
// Set frame timestamp (90kHz).
- void SetTimestamp(uint32_t timestamp) { _timeStamp = timestamp; }
+ void SetTimestamp(uint32_t timestamp) { timestamp_rtp_ = timestamp; }
// Get frame timestamp (90kHz).
- uint32_t Timestamp() const { return _timeStamp; }
+ uint32_t Timestamp() const { return timestamp_rtp_; }
void SetEncodeTime(int64_t encode_start_ms, int64_t encode_finish_ms);
@@ -60,9 +60,6 @@
uint32_t _encodedWidth = 0;
uint32_t _encodedHeight = 0;
- // TODO(nisse): Make private, once users have been updated
- // to use accessor methods.
- uint32_t _timeStamp = 0;
// NTP time of the capture time in local timebase in milliseconds.
int64_t ntp_time_ms_ = 0;
int64_t capture_time_ms_ = 0;
@@ -93,6 +90,7 @@
} timing_;
private:
+ uint32_t timestamp_rtp_ = 0;
// -1 means not set. Use a plain int rather than optional, to keep this class
// copyable with memcpy.
int spatial_index_ = -1;
diff --git a/media/engine/simulcast_encoder_adapter_unittest.cc b/media/engine/simulcast_encoder_adapter_unittest.cc
index 3999840..8ab7383 100644
--- a/media/engine/simulcast_encoder_adapter_unittest.cc
+++ b/media/engine/simulcast_encoder_adapter_unittest.cc
@@ -342,7 +342,7 @@
last_encoded_image_simulcast_index_ =
encoded_image.SpatialIndex().value_or(-1);
- return Result(Result::OK, encoded_image._timeStamp);
+ return Result(Result::OK, encoded_image.Timestamp());
}
bool GetLastEncodedImageInfo(int* out_width,
diff --git a/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc b/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc
index 648e412..cc588d0 100644
--- a/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc
+++ b/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc
@@ -131,10 +131,11 @@
MultiplexImage image = MultiplexEncodedImagePacker::Unpack(input_image);
if (supports_augmenting_data_) {
- RTC_DCHECK(decoded_augmenting_data_.find(input_image._timeStamp) ==
+ RTC_DCHECK(decoded_augmenting_data_.find(input_image.Timestamp()) ==
decoded_augmenting_data_.end());
decoded_augmenting_data_.emplace(
- std::piecewise_construct, std::forward_as_tuple(input_image._timeStamp),
+ std::piecewise_construct,
+ std::forward_as_tuple(input_image.Timestamp()),
std::forward_as_tuple(std::move(image.augmenting_data),
image.augmenting_data_size));
}
diff --git a/test/fake_encoder.cc b/test/fake_encoder.cc
index 2075399..b02d6c1 100644
--- a/test/fake_encoder.cc
+++ b/test/fake_encoder.cc
@@ -107,7 +107,7 @@
memcpy(encoded_buffer.get(), encoded_buffer_, frame_info.layers[i].size);
EncodedImage encoded(encoded_buffer.get(), frame_info.layers[i].size,
sizeof(encoded_buffer_));
- encoded._timeStamp = input_image.timestamp();
+ encoded.SetTimestamp(input_image.timestamp());
encoded.capture_time_ms_ = input_image.render_time_ms();
encoded._frameType =
frame_info.keyframe ? kVideoFrameKey : kVideoFrameDelta;
diff --git a/test/fake_vp8_encoder.cc b/test/fake_vp8_encoder.cc
index 04dff00..afd16d4 100644
--- a/test/fake_vp8_encoder.cc
+++ b/test/fake_vp8_encoder.cc
@@ -102,11 +102,12 @@
uint8_t stream_idx = encoded_image.SpatialIndex().value_or(0);
CodecSpecificInfo overrided_specific_info;
TemporalLayers::FrameConfig tl_config =
- temporal_layers_[stream_idx]->UpdateLayerConfig(encoded_image._timeStamp);
+ temporal_layers_[stream_idx]->UpdateLayerConfig(
+ encoded_image.Timestamp());
PopulateCodecSpecific(&overrided_specific_info, tl_config,
encoded_image._frameType, stream_idx,
- encoded_image._timeStamp);
- temporal_layers_[stream_idx]->FrameEncoded(encoded_image._timeStamp,
+ encoded_image.Timestamp());
+ temporal_layers_[stream_idx]->FrameEncoded(encoded_image.Timestamp(),
encoded_image._length, -1);
return callback_->OnEncodedImage(encoded_image, &overrided_specific_info,
diff --git a/video/send_statistics_proxy.cc b/video/send_statistics_proxy.cc
index fe4d887..a80b5a5 100644
--- a/video/send_statistics_proxy.cc
+++ b/video/send_statistics_proxy.cc
@@ -260,7 +260,7 @@
}
}
- auto it = encoded_frames_.find(encoded_frame._timeStamp);
+ auto it = encoded_frames_.find(encoded_frame.Timestamp());
if (it == encoded_frames_.end()) {
// First frame with this timestamp.
encoded_frames_.insert(