Add accessor methods for RTP timestamp of EncodedImage.

Intention is to make the member private, but downstream callers
must be updated to use the accessor methods first.

Bug: webrtc:9378
Change-Id: I3495bd8d545b7234fbea10abfd14f082caa420b6
Reviewed-on: https://webrtc-review.googlesource.com/82160
Reviewed-by: Magnus Jedvert <magjed@webrtc.org>
Reviewed-by: Erik Språng <sprang@webrtc.org>
Reviewed-by: Sebastian Jansson <srte@webrtc.org>
Reviewed-by: Philip Eliasson <philipel@webrtc.org>
Commit-Queue: Niels Moller <nisse@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#24352}
diff --git a/api/video/encoded_frame.cc b/api/video/encoded_frame.cc
index f9152b2..26a794e 100644
--- a/api/video/encoded_frame.cc
+++ b/api/video/encoded_frame.cc
@@ -17,13 +17,5 @@
   return 0;
 }
 
-uint32_t EncodedFrame::Timestamp() const {
-  return timestamp;
-}
-
-void EncodedFrame::SetTimestamp(uint32_t rtp_timestamp) {
-  timestamp = rtp_timestamp;
-}
-
 }  // namespace video_coding
 }  // namespace webrtc
diff --git a/api/video/encoded_frame.h b/api/video/encoded_frame.h
index 1b0a26a..b8462c6 100644
--- a/api/video/encoded_frame.h
+++ b/api/video/encoded_frame.h
@@ -58,10 +58,6 @@
 
   virtual bool GetBitstream(uint8_t* destination) const = 0;
 
-  // The capture timestamp of this frame, using the 90 kHz RTP clock.
-  virtual uint32_t Timestamp() const;
-  virtual void SetTimestamp(uint32_t rtp_timestamp);
-
   // When this frame was received.
   virtual int64_t ReceivedTime() const = 0;
 
@@ -78,7 +74,6 @@
   bool is_keyframe() const { return num_references == 0; }
 
   VideoLayerFrameId id;
-  uint32_t timestamp = 0;
 
   // TODO(philipel): Add simple modify/access functions to prevent adding too
   // many |references|.
diff --git a/call/rtp_video_sender.cc b/call/rtp_video_sender.cc
index d9dcb87..23116b2 100644
--- a/call/rtp_video_sender.cc
+++ b/call/rtp_video_sender.cc
@@ -331,7 +331,7 @@
   }
   bool send_result = rtp_modules_[stream_index]->SendOutgoingData(
       encoded_image._frameType, rtp_config_.payload_type,
-      encoded_image._timeStamp, encoded_image.capture_time_ms_,
+      encoded_image.Timestamp(), encoded_image.capture_time_ms_,
       encoded_image._buffer, encoded_image._length, fragmentation,
       &rtp_video_header, &frame_id);
   if (!send_result)
diff --git a/call/rtp_video_sender_unittest.cc b/call/rtp_video_sender_unittest.cc
index b819e69..9e3290d 100644
--- a/call/rtp_video_sender_unittest.cc
+++ b/call/rtp_video_sender_unittest.cc
@@ -140,7 +140,7 @@
 TEST(RtpVideoSenderTest, SendOnOneModule) {
   uint8_t payload = 'a';
   EncodedImage encoded_image;
-  encoded_image._timeStamp = 1;
+  encoded_image.SetTimestamp(1);
   encoded_image.capture_time_ms_ = 2;
   encoded_image._frameType = kVideoFrameKey;
   encoded_image._buffer = &payload;
@@ -170,7 +170,7 @@
 TEST(RtpVideoSenderTest, SendSimulcastSetActive) {
   uint8_t payload = 'a';
   EncodedImage encoded_image;
-  encoded_image._timeStamp = 1;
+  encoded_image.SetTimestamp(1);
   encoded_image.capture_time_ms_ = 2;
   encoded_image._frameType = kVideoFrameKey;
   encoded_image._buffer = &payload;
@@ -217,7 +217,7 @@
 TEST(RtpVideoSenderTest, SendSimulcastSetActiveModules) {
   uint8_t payload = 'a';
   EncodedImage encoded_image;
-  encoded_image._timeStamp = 1;
+  encoded_image.SetTimestamp(1);
   encoded_image.capture_time_ms_ = 2;
   encoded_image._frameType = kVideoFrameKey;
   encoded_image._buffer = &payload;
diff --git a/common_video/include/video_frame.h b/common_video/include/video_frame.h
index ccb709f..371a8f0 100644
--- a/common_video/include/video_frame.h
+++ b/common_video/include/video_frame.h
@@ -37,6 +37,14 @@
   EncodedImage(const EncodedImage&);
   EncodedImage(uint8_t* buffer, size_t length, size_t size);
 
+  // TODO(nisse): Change style to timestamp(), set_timestamp(), for consistency
+  // with the VideoFrame class.
+  // Set frame timestamp (90kHz).
+  void SetTimestamp(uint32_t timestamp) { _timeStamp = timestamp; }
+
+  // Get frame timestamp (90kHz).
+  uint32_t Timestamp() const { return _timeStamp; }
+
   void SetEncodeTime(int64_t encode_start_ms, int64_t encode_finish_ms);
 
   absl::optional<int> SpatialIndex() const {
@@ -52,6 +60,8 @@
 
   uint32_t _encodedWidth = 0;
   uint32_t _encodedHeight = 0;
+  // TODO(nisse): Make private, once users have been updated
+  // to use accessor methods.
   uint32_t _timeStamp = 0;
   // NTP time of the capture time in local timebase in milliseconds.
   int64_t ntp_time_ms_ = 0;
diff --git a/media/engine/simulcast_encoder_adapter_unittest.cc b/media/engine/simulcast_encoder_adapter_unittest.cc
index 5f1eebe..410b85e 100644
--- a/media/engine/simulcast_encoder_adapter_unittest.cc
+++ b/media/engine/simulcast_encoder_adapter_unittest.cc
@@ -343,7 +343,7 @@
       last_encoded_image_simulcast_index_ =
           codec_specific_info->codecSpecific.VP8.simulcastIdx;
     }
-    return Result(Result::OK, encoded_image._timeStamp);
+    return Result(Result::OK, encoded_image.Timestamp());
   }
 
   bool GetLastEncodedImageInfo(int* out_width,
diff --git a/modules/video_coding/codecs/h264/h264_decoder_impl.cc b/modules/video_coding/codecs/h264/h264_decoder_impl.cc
index ae3de06..73d4583 100644
--- a/modules/video_coding/codecs/h264/h264_decoder_impl.cc
+++ b/modules/video_coding/codecs/h264/h264_decoder_impl.cc
@@ -304,7 +304,7 @@
       VideoFrame::Builder()
           .set_video_frame_buffer(input_frame->video_frame_buffer())
           .set_timestamp_us(input_frame->timestamp_us())
-          .set_timestamp_rtp(input_image._timeStamp)
+          .set_timestamp_rtp(input_image.Timestamp())
           .set_rotation(input_frame->rotation())
           .set_color_space(color_space)
           .build();
diff --git a/modules/video_coding/codecs/h264/h264_encoder_impl.cc b/modules/video_coding/codecs/h264/h264_encoder_impl.cc
index 0b84e3a..4fc9b4f 100644
--- a/modules/video_coding/codecs/h264/h264_encoder_impl.cc
+++ b/modules/video_coding/codecs/h264/h264_encoder_impl.cc
@@ -496,7 +496,7 @@
 
     encoded_images_[i]._encodedWidth = configurations_[i].width;
     encoded_images_[i]._encodedHeight = configurations_[i].height;
-    encoded_images_[i]._timeStamp = input_frame.timestamp();
+    encoded_images_[i].SetTimestamp(input_frame.timestamp());
     encoded_images_[i].ntp_time_ms_ = input_frame.ntp_time_ms();
     encoded_images_[i].capture_time_ms_ = input_frame.render_time_ms();
     encoded_images_[i].rotation_ = input_frame.rotation();
diff --git a/modules/video_coding/codecs/i420/i420.cc b/modules/video_coding/codecs/i420/i420.cc
index 565a39e..7c498b1 100644
--- a/modules/video_coding/codecs/i420/i420.cc
+++ b/modules/video_coding/codecs/i420/i420.cc
@@ -84,7 +84,7 @@
   }
 
   _encodedImage._frameType = kVideoFrameKey;
-  _encodedImage._timeStamp = inputImage.timestamp();
+  _encodedImage.SetTimestamp(inputImage.timestamp());
   _encodedImage._encodedHeight = inputImage.height();
   _encodedImage._encodedWidth = inputImage.width();
 
@@ -200,7 +200,7 @@
     return WEBRTC_VIDEO_CODEC_MEMORY;
   }
 
-  VideoFrame decoded_image(frame_buffer, inputImage._timeStamp, 0,
+  VideoFrame decoded_image(frame_buffer, inputImage.Timestamp(), 0,
                            webrtc::kVideoRotation_0);
   _decodeCompleteCallback->Decoded(decoded_image);
   return WEBRTC_VIDEO_CODEC_OK;
diff --git a/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc b/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc
index e0d0618..1a775af 100644
--- a/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc
+++ b/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc
@@ -139,10 +139,10 @@
   }
 
   if (image.component_count == 1) {
-    RTC_DCHECK(decoded_data_.find(input_image._timeStamp) ==
+    RTC_DCHECK(decoded_data_.find(input_image.Timestamp()) ==
                decoded_data_.end());
     decoded_data_.emplace(std::piecewise_construct,
-                          std::forward_as_tuple(input_image._timeStamp),
+                          std::forward_as_tuple(input_image.Timestamp()),
                           std::forward_as_tuple(kAXXStream));
   }
   int32_t rv = 0;
diff --git a/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc b/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc
index 63cd6da..fd316cf 100644
--- a/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc
+++ b/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc
@@ -258,7 +258,7 @@
     image_component.codec_type = frame_headers[i].codec_type;
 
     EncodedImage encoded_image = combined_image;
-    encoded_image._timeStamp = combined_image._timeStamp;
+    encoded_image.SetTimestamp(combined_image.Timestamp());
     encoded_image._frameType = frame_headers[i].frame_type;
     encoded_image._size =
         static_cast<size_t>(frame_headers[i].bitstream_length);
diff --git a/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc b/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc
index 6f921e3..4733b3a 100644
--- a/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc
+++ b/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc
@@ -258,7 +258,8 @@
               encodedImage._length);
 
   rtc::CritScope cs(&crit_);
-  const auto& stashed_image_itr = stashed_images_.find(encodedImage._timeStamp);
+  const auto& stashed_image_itr =
+      stashed_images_.find(encodedImage.Timestamp());
   const auto& stashed_image_next_itr = std::next(stashed_image_itr, 1);
   RTC_DCHECK(stashed_image_itr != stashed_images_.end());
   MultiplexImage& stashed_image = stashed_image_itr->second;
diff --git a/modules/video_coding/codecs/test/videoprocessor.cc b/modules/video_coding/codecs/test/videoprocessor.cc
index 5a1269c..a60ddf4 100644
--- a/modules/video_coding/codecs/test/videoprocessor.cc
+++ b/modules/video_coding/codecs/test/videoprocessor.cc
@@ -352,7 +352,7 @@
   GetLayerIndices(codec_specific, &spatial_idx, &temporal_idx);
 
   FrameStatistics* frame_stat =
-      stats_->GetFrameWithTimestamp(encoded_image._timeStamp, spatial_idx);
+      stats_->GetFrameWithTimestamp(encoded_image.Timestamp(), spatial_idx);
   const size_t frame_number = frame_stat->frame_number;
 
   // Ensure that the encode order is monotonically increasing, within this
@@ -428,7 +428,7 @@
         if (!layer_dropped) {
           base_image = &merged_encoded_frames_[i];
           base_stat =
-              stats_->GetFrameWithTimestamp(encoded_image._timeStamp, i);
+              stats_->GetFrameWithTimestamp(encoded_image.Timestamp(), i);
         } else if (base_image && !base_stat->non_ref_for_inter_layer_pred) {
           DecodeFrame(*base_image, i);
         }
@@ -526,7 +526,7 @@
                                  size_t spatial_idx) {
   RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_);
   FrameStatistics* frame_stat =
-      stats_->GetFrameWithTimestamp(encoded_image._timeStamp, spatial_idx);
+      stats_->GetFrameWithTimestamp(encoded_image.Timestamp(), spatial_idx);
 
   frame_stat->decode_start_ns = rtc::TimeNanos();
   frame_stat->decode_return_code =
@@ -551,7 +551,7 @@
     for (int base_idx = static_cast<int>(spatial_idx) - 1; base_idx >= 0;
          --base_idx) {
       EncodedImage lower_layer = merged_encoded_frames_.at(base_idx);
-      if (lower_layer._timeStamp == encoded_image._timeStamp) {
+      if (lower_layer.Timestamp() == encoded_image.Timestamp()) {
         base_image = lower_layer;
         break;
       }
diff --git a/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc b/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc
index 49959ef..298a593 100644
--- a/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc
+++ b/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc
@@ -254,7 +254,7 @@
   vpx_codec_err_t vpx_ret =
       vpx_codec_control(decoder_, VPXD_GET_LAST_QUANTIZER, &qp);
   RTC_DCHECK_EQ(vpx_ret, VPX_CODEC_OK);
-  ret = ReturnFrame(img, input_image._timeStamp, input_image.ntp_time_ms_, qp);
+  ret = ReturnFrame(img, input_image.Timestamp(), input_image.ntp_time_ms_, qp);
   if (ret != 0) {
     // Reset to avoid requesting key frames too often.
     if (ret < 0 && propagation_cnt_ > 0)
diff --git a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
index 95abc5c..3826e14 100644
--- a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
+++ b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
@@ -881,7 +881,7 @@
         break;
       }
     }
-    encoded_images_[encoder_idx]._timeStamp = input_image.timestamp();
+    encoded_images_[encoder_idx].SetTimestamp(input_image.timestamp());
     encoded_images_[encoder_idx].capture_time_ms_ =
         input_image.render_time_ms();
     encoded_images_[encoder_idx].rotation_ = input_image.rotation();
diff --git a/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc b/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
index 116d06c..be92b34 100644
--- a/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
+++ b/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
@@ -136,7 +136,7 @@
   CodecSpecificInfo codec_specific_info;
   EncodeAndWaitForFrame(*input_frame, &encoded_frame, &codec_specific_info);
 
-  EXPECT_EQ(kInitialTimestampRtp, encoded_frame._timeStamp);
+  EXPECT_EQ(kInitialTimestampRtp, encoded_frame.Timestamp());
   EXPECT_EQ(kInitialTimestampMs, encoded_frame.capture_time_ms_);
   EXPECT_EQ(kWidth, static_cast<int>(encoded_frame._encodedWidth));
   EXPECT_EQ(kHeight, static_cast<int>(encoded_frame._encodedHeight));
diff --git a/modules/video_coding/codecs/vp9/vp9_impl.cc b/modules/video_coding/codecs/vp9/vp9_impl.cc
index bdae580..e6df457 100644
--- a/modules/video_coding/codecs/vp9/vp9_impl.cc
+++ b/modules/video_coding/codecs/vp9/vp9_impl.cc
@@ -1009,7 +1009,7 @@
   }
 
   TRACE_COUNTER1("webrtc", "EncodedFrameSize", encoded_image_._length);
-  encoded_image_._timeStamp = input_image_->timestamp();
+  encoded_image_.SetTimestamp(input_image_->timestamp());
   encoded_image_.capture_time_ms_ = input_image_->render_time_ms();
   encoded_image_.rotation_ = input_image_->rotation();
   encoded_image_.content_type_ = (codec_.mode == VideoCodecMode::kScreensharing)
@@ -1046,9 +1046,9 @@
 
     if (end_of_picture) {
       const uint32_t timestamp_ms =
-          1000 * encoded_image_._timeStamp / kVideoPayloadTypeFrequency;
+          1000 * encoded_image_.Timestamp() / kVideoPayloadTypeFrequency;
       output_framerate_.Update(1, timestamp_ms);
-      last_encoded_frame_rtp_timestamp_ = encoded_image_._timeStamp;
+      last_encoded_frame_rtp_timestamp_ = encoded_image_.Timestamp();
     }
   }
 }
@@ -1190,7 +1190,7 @@
       vpx_codec_control(decoder_, VPXD_GET_LAST_QUANTIZER, &qp);
   RTC_DCHECK_EQ(vpx_ret, VPX_CODEC_OK);
   int ret =
-      ReturnFrame(img, input_image._timeStamp, input_image.ntp_time_ms_, qp);
+      ReturnFrame(img, input_image.Timestamp(), input_image.ntp_time_ms_, qp);
   if (ret != 0) {
     return ret;
   }
diff --git a/modules/video_coding/decoding_state.cc b/modules/video_coding/decoding_state.cc
index 23bf668..1d54063 100644
--- a/modules/video_coding/decoding_state.cc
+++ b/modules/video_coding/decoding_state.cc
@@ -58,7 +58,7 @@
   assert(frame != NULL);
   if (in_initial_state_)
     return false;
-  return !IsNewerTimestamp(frame->TimeStamp(), time_stamp_);
+  return !IsNewerTimestamp(frame->Timestamp(), time_stamp_);
 }
 
 bool VCMDecodingState::IsOldPacket(const VCMPacket* packet) const {
@@ -73,7 +73,7 @@
   if (!UsingFlexibleMode(frame))
     UpdateSyncState(frame);
   sequence_num_ = static_cast<uint16_t>(frame->GetHighSeqNum());
-  time_stamp_ = frame->TimeStamp();
+  time_stamp_ = frame->Timestamp();
   picture_id_ = frame->PictureId();
   temporal_id_ = frame->TemporalId();
   tl0_pic_id_ = frame->Tl0PicId();
@@ -143,7 +143,7 @@
     // Continuous empty packets or continuous frames can be dropped if we
     // advance the sequence number.
     sequence_num_ = frame->GetHighSeqNum();
-    time_stamp_ = frame->TimeStamp();
+    time_stamp_ = frame->Timestamp();
     return true;
   }
   return false;
diff --git a/modules/video_coding/encoded_frame.cc b/modules/video_coding/encoded_frame.cc
index 91ee871..c53a737 100644
--- a/modules/video_coding/encoded_frame.cc
+++ b/modules/video_coding/encoded_frame.cc
@@ -35,8 +35,8 @@
 }
 
 void VCMEncodedFrame::Reset() {
+  SetTimestamp(0);
   _renderTimeMs = -1;
-  _timeStamp = 0;
   _payloadType = 0;
   _frameType = kVideoFrameDelta;
   _encodedWidth = 0;
diff --git a/modules/video_coding/encoded_frame.h b/modules/video_coding/encoded_frame.h
index 252dea4..a08eb07 100644
--- a/modules/video_coding/encoded_frame.h
+++ b/modules/video_coding/encoded_frame.h
@@ -64,10 +64,12 @@
    *   Get frame length
    */
   size_t Length() const { return _length; }
+
   /**
-   *   Get frame timestamp (90kHz)
+   *   Frame RTP timestamp (90kHz)
    */
-  uint32_t TimeStamp() const { return _timeStamp; }
+  using EncodedImage::Timestamp;
+  using EncodedImage::SetTimestamp;
   /**
    *   Get render time in milliseconds
    */
diff --git a/modules/video_coding/frame_buffer.cc b/modules/video_coding/frame_buffer.cc
index b9241b9..9c2819e 100644
--- a/modules/video_coding/frame_buffer.cc
+++ b/modules/video_coding/frame_buffer.cc
@@ -87,7 +87,7 @@
   if (kStateEmpty == _state) {
     // First packet (empty and/or media) inserted into this frame.
     // store some info and set some initial values.
-    _timeStamp = packet.timestamp;
+    SetTimestamp(packet.timestamp);
     // We only take the ntp timestamp of the first packet of a frame.
     ntp_time_ms_ = packet.ntp_time_ms_;
     _codec = packet.codec;
@@ -213,7 +213,6 @@
 void VCMFrameBuffer::Reset() {
   TRACE_EVENT0("webrtc", "VCMFrameBuffer::Reset");
   _length = 0;
-  _timeStamp = 0;
   _sessionInfo.Reset();
   _payloadType = 0;
   _nackCount = 0;
diff --git a/modules/video_coding/frame_buffer2.cc b/modules/video_coding/frame_buffer2.cc
index f13ed35..8bb241a 100644
--- a/modules/video_coding/frame_buffer2.cc
+++ b/modules/video_coding/frame_buffer2.cc
@@ -117,7 +117,8 @@
 
         next_frame_it_ = frame_it;
         if (frame->RenderTime() == -1)
-          frame->SetRenderTime(timing_->RenderTimeMs(frame->timestamp, now_ms));
+          frame->SetRenderTime(
+              timing_->RenderTimeMs(frame->Timestamp(), now_ms));
         wait_ms = timing_->MaxWaitingTime(frame->RenderTime(), now_ms);
 
         // This will cause the frame buffer to prefer high framerate rather
@@ -146,7 +147,7 @@
       if (!frame->delayed_by_retransmission()) {
         int64_t frame_delay;
 
-        if (inter_frame_delay_.CalculateDelay(frame->timestamp, &frame_delay,
+        if (inter_frame_delay_.CalculateDelay(frame->Timestamp(), &frame_delay,
                                               frame->ReceivedTime())) {
           jitter_estimator_->UpdateEstimate(frame_delay, frame->size());
         }
@@ -163,7 +164,7 @@
       if (HasBadRenderTiming(*frame, now_ms)) {
         jitter_estimator_->Reset();
         timing_->Reset();
-        frame->SetRenderTime(timing_->RenderTimeMs(frame->timestamp, now_ms));
+        frame->SetRenderTime(timing_->RenderTimeMs(frame->Timestamp(), now_ms));
       }
 
       UpdateJitterDelay();
@@ -177,17 +178,17 @@
         const VideoLayerFrameId& frame_key = next_frame_it_->first;
 
         const bool frame_is_higher_spatial_layer_of_last_decoded_frame =
-            last_decoded_frame_timestamp_ == frame->timestamp &&
+            last_decoded_frame_timestamp_ == frame->Timestamp() &&
             last_decoded_frame_key.picture_id == frame_key.picture_id &&
             last_decoded_frame_key.spatial_layer < frame_key.spatial_layer;
 
-        if (AheadOrAt(last_decoded_frame_timestamp_, frame->timestamp) &&
+        if (AheadOrAt(last_decoded_frame_timestamp_, frame->Timestamp()) &&
             !frame_is_higher_spatial_layer_of_last_decoded_frame) {
           // TODO(brandtr): Consider clearing the entire buffer when we hit
           // these conditions.
           RTC_LOG(LS_WARNING)
               << "Frame with (timestamp:picture_id:spatial_id) ("
-              << frame->timestamp << ":" << frame->id.picture_id << ":"
+              << frame->Timestamp() << ":" << frame->id.picture_id << ":"
               << static_cast<int>(frame->id.spatial_layer) << ")"
               << " sent to decoder after frame with"
               << " (timestamp:picture_id:spatial_id) ("
@@ -198,7 +199,7 @@
       }
 
       AdvanceLastDecodedFrame(next_frame_it_);
-      last_decoded_frame_timestamp_ = frame->timestamp;
+      last_decoded_frame_timestamp_ = frame->Timestamp();
       *frame_out = std::move(frame);
       return kFrameFound;
     }
@@ -297,7 +298,7 @@
     timing_->set_max_playout_delay(playout_delay.max_ms);
 
   if (!frame.delayed_by_retransmission())
-    timing_->IncomingTimestamp(frame.timestamp, frame.ReceivedTime());
+    timing_->IncomingTimestamp(frame.Timestamp(), frame.ReceivedTime());
 }
 
 int64_t FrameBuffer::InsertFrame(std::unique_ptr<EncodedFrame> frame) {
@@ -343,7 +344,7 @@
 
   if (last_decoded_frame_it_ != frames_.end() &&
       id <= last_decoded_frame_it_->first) {
-    if (AheadOf(frame->timestamp, last_decoded_frame_timestamp_) &&
+    if (AheadOf(frame->Timestamp(), last_decoded_frame_timestamp_) &&
         frame->is_keyframe()) {
       // If this frame has a newer timestamp but an earlier picture id then we
       // assume there has been a jump in the picture id due to some encoder
diff --git a/modules/video_coding/frame_buffer2_unittest.cc b/modules/video_coding/frame_buffer2_unittest.cc
index 1378be5..357ba86 100644
--- a/modules/video_coding/frame_buffer2_unittest.cc
+++ b/modules/video_coding/frame_buffer2_unittest.cc
@@ -90,8 +90,6 @@
  public:
   bool GetBitstream(uint8_t* destination) const override { return true; }
 
-  uint32_t Timestamp() const override { return timestamp; }
-
   int64_t ReceivedTime() const override { return 0; }
 
   int64_t RenderTime() const override { return _renderTimeMs; }
@@ -165,7 +163,7 @@
     std::unique_ptr<FrameObjectFake> frame(new FrameObjectFake());
     frame->id.picture_id = picture_id;
     frame->id.spatial_layer = spatial_layer;
-    frame->timestamp = ts_ms * 90;
+    frame->SetTimestamp(ts_ms * 90);
     frame->num_references = references.size();
     frame->inter_layer_predicted = inter_layer_predicted;
     for (size_t r = 0; r < references.size(); ++r)
@@ -520,7 +518,7 @@
     frame->SetSize(kFrameSize);
     frame->id.picture_id = pid;
     frame->id.spatial_layer = 0;
-    frame->timestamp = ts;
+    frame->SetTimestamp(ts);
     frame->num_references = 0;
     frame->inter_layer_predicted = false;
 
diff --git a/modules/video_coding/frame_object.cc b/modules/video_coding/frame_object.cc
index b990cdd..57c60ca 100644
--- a/modules/video_coding/frame_object.cc
+++ b/modules/video_coding/frame_object.cc
@@ -26,7 +26,6 @@
     : packet_buffer_(packet_buffer),
       first_seq_num_(first_seq_num),
       last_seq_num_(last_seq_num),
-      timestamp_(0),
       received_time_(received_time),
       times_nacked_(times_nacked) {
   VCMPacket* first_packet = packet_buffer_->GetPacket(first_seq_num);
@@ -41,7 +40,7 @@
   CopyCodecSpecific(&first_packet->video_header);
   _completeFrame = true;
   _payloadType = first_packet->payloadType;
-  _timeStamp = first_packet->timestamp;
+  SetTimestamp(first_packet->timestamp);
   ntp_time_ms_ = first_packet->ntp_time_ms_;
   _frameType = first_packet->frameType;
 
@@ -69,7 +68,7 @@
   _encodedHeight = first_packet->height;
 
   // EncodedFrame members
-  timestamp = first_packet->timestamp;
+  SetTimestamp(first_packet->timestamp);
 
   VCMPacket* last_packet = packet_buffer_->GetPacket(last_seq_num);
   RTC_CHECK(last_packet);
@@ -140,10 +139,6 @@
   return packet_buffer_->GetBitstream(*this, destination);
 }
 
-uint32_t RtpFrameObject::Timestamp() const {
-  return timestamp_;
-}
-
 int64_t RtpFrameObject::ReceivedTime() const {
   return received_time_;
 }
diff --git a/modules/video_coding/frame_object.h b/modules/video_coding/frame_object.h
index 8980984..6c6480a 100644
--- a/modules/video_coding/frame_object.h
+++ b/modules/video_coding/frame_object.h
@@ -37,7 +37,6 @@
   enum FrameType frame_type() const;
   VideoCodecType codec_type() const;
   bool GetBitstream(uint8_t* destination) const override;
-  uint32_t Timestamp() const override;
   int64_t ReceivedTime() const override;
   int64_t RenderTime() const override;
   bool delayed_by_retransmission() const override;
@@ -49,7 +48,6 @@
   VideoCodecType codec_type_;
   uint16_t first_seq_num_;
   uint16_t last_seq_num_;
-  uint32_t timestamp_;
   int64_t received_time_;
 
   // Equal to times nacked of the packet with the highet times nacked
diff --git a/modules/video_coding/generic_decoder.cc b/modules/video_coding/generic_decoder.cc
index eb16400..92e53da 100644
--- a/modules/video_coding/generic_decoder.cc
+++ b/modules/video_coding/generic_decoder.cc
@@ -211,7 +211,7 @@
 
 int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, int64_t nowMs) {
   TRACE_EVENT1("webrtc", "VCMGenericDecoder::Decode", "timestamp",
-               frame.EncodedImage()._timeStamp);
+               frame.Timestamp());
   _frameInfos[_nextFrameInfoIdx].decodeStartTimeMs = nowMs;
   _frameInfos[_nextFrameInfoIdx].renderTimeMs = frame.RenderTimeMs();
   _frameInfos[_nextFrameInfoIdx].rotation = frame.rotation();
@@ -225,7 +225,7 @@
   } else {
     _frameInfos[_nextFrameInfoIdx].content_type = _last_keyframe_content_type;
   }
-  _callback->Map(frame.TimeStamp(), &_frameInfos[_nextFrameInfoIdx]);
+  _callback->Map(frame.Timestamp(), &_frameInfos[_nextFrameInfoIdx]);
 
   _nextFrameInfoIdx = (_nextFrameInfoIdx + 1) % kDecoderFrameMemoryLength;
   int32_t ret = decoder_->Decode(frame.EncodedImage(), frame.MissingFrame(),
@@ -234,13 +234,13 @@
   _callback->OnDecoderImplementationName(decoder_->ImplementationName());
   if (ret < WEBRTC_VIDEO_CODEC_OK) {
     RTC_LOG(LS_WARNING) << "Failed to decode frame with timestamp "
-                        << frame.TimeStamp() << ", error code: " << ret;
-    _callback->Pop(frame.TimeStamp());
+                        << frame.Timestamp() << ", error code: " << ret;
+    _callback->Pop(frame.Timestamp());
     return ret;
   } else if (ret == WEBRTC_VIDEO_CODEC_NO_OUTPUT ||
              ret == WEBRTC_VIDEO_CODEC_REQUEST_SLI) {
     // No output
-    _callback->Pop(frame.TimeStamp());
+    _callback->Pop(frame.Timestamp());
   }
   return ret;
 }
diff --git a/modules/video_coding/generic_encoder.cc b/modules/video_coding/generic_encoder.cc
index 5cea0a5..a8999fc 100644
--- a/modules/video_coding/generic_encoder.cc
+++ b/modules/video_coding/generic_encoder.cc
@@ -265,13 +265,14 @@
     // Because some hardware encoders don't preserve capture timestamp we
     // use RTP timestamps here.
     while (!encode_start_list->empty() &&
-           IsNewerTimestamp(encoded_image->_timeStamp,
+           IsNewerTimestamp(encoded_image->Timestamp(),
                             encode_start_list->front().rtp_timestamp)) {
       post_encode_callback_->OnDroppedFrame(DropReason::kDroppedByEncoder);
       encode_start_list->pop_front();
     }
     if (encode_start_list->size() > 0 &&
-        encode_start_list->front().rtp_timestamp == encoded_image->_timeStamp) {
+        encode_start_list->front().rtp_timestamp ==
+            encoded_image->Timestamp()) {
       result.emplace(encode_start_list->front().encode_start_time_ms);
       if (encoded_image->capture_time_ms_ !=
           encode_start_list->front().capture_time_ms) {
@@ -365,8 +366,8 @@
     int64_t clock_offset_ms = now_ms - encoded_image->timing_.encode_finish_ms;
     // Translate capture timestamp to local WebRTC clock.
     encoded_image->capture_time_ms_ += clock_offset_ms;
-    encoded_image->_timeStamp =
-        static_cast<uint32_t>(encoded_image->capture_time_ms_ * 90);
+    encoded_image->SetTimestamp(
+        static_cast<uint32_t>(encoded_image->capture_time_ms_ * 90));
     encode_start_ms.emplace(encoded_image->timing_.encode_start_ms +
                             clock_offset_ms);
   }
@@ -389,7 +390,7 @@
     const CodecSpecificInfo* codec_specific,
     const RTPFragmentationHeader* fragmentation_header) {
   TRACE_EVENT_INSTANT1("webrtc", "VCMEncodedFrameCallback::Encoded",
-                       "timestamp", encoded_image._timeStamp);
+                       "timestamp", encoded_image.Timestamp());
   size_t simulcast_svc_idx = 0;
   if (codec_specific->codecType == kVideoCodecVP9) {
     if (codec_specific->codecSpecific.VP9.num_spatial_layers > 1)
diff --git a/modules/video_coding/generic_encoder_unittest.cc b/modules/video_coding/generic_encoder_unittest.cc
index 6037381..c889769 100644
--- a/modules/video_coding/generic_encoder_unittest.cc
+++ b/modules/video_coding/generic_encoder_unittest.cc
@@ -94,7 +94,7 @@
       CodecSpecificInfo codec_specific;
       image._length = FrameSize(min_frame_size, max_frame_size, s, i);
       image.capture_time_ms_ = current_timestamp;
-      image._timeStamp = static_cast<uint32_t>(current_timestamp * 90);
+      image.SetTimestamp(static_cast<uint32_t>(current_timestamp * 90));
       codec_specific.codecType = kVideoCodecGeneric;
       codec_specific.codecSpecific.generic.simulcast_idx = s;
       callback.OnEncodeStarted(static_cast<uint32_t>(current_timestamp * 90),
@@ -187,7 +187,7 @@
   int64_t timestamp = 1;
   image._length = 500;
   image.capture_time_ms_ = timestamp;
-  image._timeStamp = static_cast<uint32_t>(timestamp * 90);
+  image.SetTimestamp(static_cast<uint32_t>(timestamp * 90));
   codec_specific.codecType = kVideoCodecGeneric;
   codec_specific.codecSpecific.generic.simulcast_idx = 0;
   FakeEncodedImageCallback sink;
@@ -204,7 +204,7 @@
 
   // New frame, now skip OnEncodeStarted. Should not result in timing frame.
   image.capture_time_ms_ = ++timestamp;
-  image._timeStamp = static_cast<uint32_t>(timestamp * 90);
+  image.SetTimestamp(static_cast<uint32_t>(timestamp * 90));
   callback.OnEncodedImage(image, &codec_specific, nullptr);
   EXPECT_FALSE(sink.WasTimingFrame());
 }
@@ -219,7 +219,7 @@
   int64_t timestamp = 1;
   image._length = 500;
   image.capture_time_ms_ = timestamp;
-  image._timeStamp = static_cast<uint32_t>(timestamp * 90);
+  image.SetTimestamp(static_cast<uint32_t>(timestamp * 90));
   codec_specific.codecType = kVideoCodecGeneric;
   codec_specific.codecSpecific.generic.simulcast_idx = 0;
   FakeEncodedImageCallback sink;
@@ -237,7 +237,7 @@
   // New frame, but this time with encode timestamps set in timing_.
   // This should be a timing frame.
   image.capture_time_ms_ = ++timestamp;
-  image._timeStamp = static_cast<uint32_t>(timestamp * 90);
+  image.SetTimestamp(static_cast<uint32_t>(timestamp * 90));
   image.timing_.encode_start_ms = timestamp + kEncodeStartDelayMs;
   image.timing_.encode_finish_ms = timestamp + kEncodeFinishDelayMs;
   callback.OnEncodedImage(image, &codec_specific, nullptr);
@@ -263,27 +263,27 @@
   // Any non-zero bitrate needed to be set before the first frame.
   callback.OnTargetBitrateChanged(500, 0);
   image.capture_time_ms_ = kTimestampMs1;
-  image._timeStamp = static_cast<uint32_t>(image.capture_time_ms_ * 90);
-  callback.OnEncodeStarted(image._timeStamp, image.capture_time_ms_, 0);
+  image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
+  callback.OnEncodeStarted(image.Timestamp(), image.capture_time_ms_, 0);
   EXPECT_EQ(0u, sink.GetNumFramesDropped());
   callback.OnEncodedImage(image, &codec_specific, nullptr);
 
   image.capture_time_ms_ = kTimestampMs2;
-  image._timeStamp = static_cast<uint32_t>(image.capture_time_ms_ * 90);
-  callback.OnEncodeStarted(image._timeStamp, image.capture_time_ms_, 0);
+  image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
+  callback.OnEncodeStarted(image.Timestamp(), image.capture_time_ms_, 0);
   // No OnEncodedImageCall for timestamp2. Yet, at this moment it's not known
   // that frame with timestamp2 was dropped.
   EXPECT_EQ(0u, sink.GetNumFramesDropped());
 
   image.capture_time_ms_ = kTimestampMs3;
-  image._timeStamp = static_cast<uint32_t>(image.capture_time_ms_ * 90);
-  callback.OnEncodeStarted(image._timeStamp, image.capture_time_ms_, 0);
+  image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
+  callback.OnEncodeStarted(image.Timestamp(), image.capture_time_ms_, 0);
   callback.OnEncodedImage(image, &codec_specific, nullptr);
   EXPECT_EQ(1u, sink.GetNumFramesDropped());
 
   image.capture_time_ms_ = kTimestampMs4;
-  image._timeStamp = static_cast<uint32_t>(image.capture_time_ms_ * 90);
-  callback.OnEncodeStarted(image._timeStamp, image.capture_time_ms_, 0);
+  image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
+  callback.OnEncodeStarted(image.Timestamp(), image.capture_time_ms_, 0);
   callback.OnEncodedImage(image, &codec_specific, nullptr);
   EXPECT_EQ(1u, sink.GetNumFramesDropped());
 }
@@ -299,8 +299,8 @@
   // Any non-zero bitrate needed to be set before the first frame.
   callback.OnTargetBitrateChanged(500, 0);
   image.capture_time_ms_ = kTimestampMs;  // Incorrect timesetamp.
-  image._timeStamp = static_cast<uint32_t>(image.capture_time_ms_ * 90);
-  callback.OnEncodeStarted(image._timeStamp, image.capture_time_ms_, 0);
+  image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
+  callback.OnEncodeStarted(image.Timestamp(), image.capture_time_ms_, 0);
   image.capture_time_ms_ = 0;  // Incorrect timesetamp.
   callback.OnEncodedImage(image, &codec_specific, nullptr);
   EXPECT_EQ(kTimestampMs, sink.GetLastCaptureTimestamp());
diff --git a/modules/video_coding/jitter_buffer.cc b/modules/video_coding/jitter_buffer.cc
index 03a9845..4a0622a 100644
--- a/modules/video_coding/jitter_buffer.cc
+++ b/modules/video_coding/jitter_buffer.cc
@@ -54,7 +54,7 @@
 }
 
 void FrameList::InsertFrame(VCMFrameBuffer* frame) {
-  insert(rbegin().base(), FrameListPair(frame->TimeStamp(), frame));
+  insert(rbegin().base(), FrameListPair(frame->Timestamp(), frame));
 }
 
 VCMFrameBuffer* FrameList::PopFrame(uint32_t timestamp) {
@@ -110,7 +110,7 @@
     }
     free_frames->push_back(oldest_frame);
     TRACE_EVENT_INSTANT1("webrtc", "JB::OldOrEmptyFrameDropped", "timestamp",
-                         oldest_frame->TimeStamp());
+                         oldest_frame->Timestamp());
     erase(begin());
   }
 }
@@ -212,7 +212,7 @@
       continue;
     }
     SsMap::iterator ss_it;
-    if (Find(frame_it.second->TimeStamp(), &ss_it)) {
+    if (Find(frame_it.second->Timestamp(), &ss_it)) {
       if (gof_idx >= ss_it->second.num_frames_in_gof) {
         continue;  // Assume corresponding SS not yet received.
       }
@@ -528,7 +528,7 @@
     }
   }
 
-  *timestamp = oldest_frame->TimeStamp();
+  *timestamp = oldest_frame->Timestamp();
   return true;
 }
 
@@ -564,7 +564,7 @@
       // Wait for this one to get complete.
       waiting_for_completion_.frame_size = frame->Length();
       waiting_for_completion_.latest_packet_time = frame->LatestPacketTimeMs();
-      waiting_for_completion_.timestamp = frame->TimeStamp();
+      waiting_for_completion_.timestamp = frame->Timestamp();
     }
   }
 
@@ -715,8 +715,8 @@
       frame->InsertPacket(packet, now_ms, decode_error_mode_, frame_data);
 
   if (previous_state != kStateComplete) {
-    TRACE_EVENT_ASYNC_BEGIN1("webrtc", "Video", frame->TimeStamp(), "timestamp",
-                             frame->TimeStamp());
+    TRACE_EVENT_ASYNC_BEGIN1("webrtc", "Video", frame->Timestamp(), "timestamp",
+                             frame->Timestamp());
   }
 
   if (buffer_state > 0) {
@@ -831,7 +831,7 @@
   for (FrameList::const_iterator it = decodable_frames_.begin();
        it != decodable_frames_.end(); ++it) {
     VCMFrameBuffer* decodable_frame = it->second;
-    if (IsNewerTimestamp(decodable_frame->TimeStamp(), frame.TimeStamp())) {
+    if (IsNewerTimestamp(decodable_frame->Timestamp(), frame.Timestamp())) {
       break;
     }
     decoding_state.SetState(decodable_frame);
@@ -865,7 +865,7 @@
        it != incomplete_frames_.end();) {
     VCMFrameBuffer* frame = it->second;
     if (IsNewerTimestamp(original_decoded_state.time_stamp(),
-                         frame->TimeStamp())) {
+                         frame->Timestamp())) {
       ++it;
       continue;
     }
@@ -947,11 +947,11 @@
   if (incomplete_frames_.empty()) {
     return 0;
   }
-  uint32_t start_timestamp = incomplete_frames_.Front()->TimeStamp();
+  uint32_t start_timestamp = incomplete_frames_.Front()->Timestamp();
   if (!decodable_frames_.empty()) {
-    start_timestamp = decodable_frames_.Back()->TimeStamp();
+    start_timestamp = decodable_frames_.Back()->Timestamp();
   }
-  return incomplete_frames_.Back()->TimeStamp() - start_timestamp;
+  return incomplete_frames_.Back()->Timestamp() - start_timestamp;
 }
 
 uint16_t VCMJitterBuffer::EstimatedLowSequenceNumber(
@@ -1184,10 +1184,10 @@
   incoming_frame_count_++;
 
   if (frame.FrameType() == kVideoFrameKey) {
-    TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", frame.TimeStamp(),
+    TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", frame.Timestamp(),
                             "KeyComplete");
   } else {
-    TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", frame.TimeStamp(),
+    TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", frame.Timestamp(),
                             "DeltaComplete");
   }
 
@@ -1263,7 +1263,7 @@
   }
   // No retransmitted frames should be a part of the jitter
   // estimate.
-  UpdateJitterEstimate(frame.LatestPacketTimeMs(), frame.TimeStamp(),
+  UpdateJitterEstimate(frame.LatestPacketTimeMs(), frame.Timestamp(),
                        frame.Length(), incomplete_frame);
 }
 
diff --git a/modules/video_coding/jitter_buffer_unittest.cc b/modules/video_coding/jitter_buffer_unittest.cc
index d1744d5..e38a7eb 100644
--- a/modules/video_coding/jitter_buffer_unittest.cc
+++ b/modules/video_coding/jitter_buffer_unittest.cc
@@ -259,7 +259,7 @@
     VCMEncodedFrame* found_frame = jitter_buffer_->NextCompleteFrame(10);
     if (!found_frame)
       return nullptr;
-    return jitter_buffer_->ExtractAndSetDecode(found_frame->TimeStamp());
+    return jitter_buffer_->ExtractAndSetDecode(found_frame->Timestamp());
   }
 
   VCMEncodedFrame* DecodeIncompleteFrame() {
@@ -414,7 +414,7 @@
       return false;
 
     VCMEncodedFrame* frame =
-        jitter_buffer_->ExtractAndSetDecode(found_frame->TimeStamp());
+        jitter_buffer_->ExtractAndSetDecode(found_frame->Timestamp());
     bool ret = (frame != NULL);
     jitter_buffer_->ReleaseFrame(frame);
     return ret;
@@ -964,12 +964,12 @@
   EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
 
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
-  EXPECT_EQ(1000U, frame_out->TimeStamp());
+  EXPECT_EQ(1000U, frame_out->Timestamp());
   EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
   jitter_buffer_->ReleaseFrame(frame_out);
 
   frame_out = DecodeCompleteFrame();
-  EXPECT_EQ(13000U, frame_out->TimeStamp());
+  EXPECT_EQ(13000U, frame_out->Timestamp());
   EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
   jitter_buffer_->ReleaseFrame(frame_out);
 }
@@ -1029,7 +1029,7 @@
   EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
 
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
-  EXPECT_EQ(3000U, frame_out->TimeStamp());
+  EXPECT_EQ(3000U, frame_out->Timestamp());
   EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
   EXPECT_EQ(0, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
   EXPECT_FALSE(
@@ -1037,14 +1037,14 @@
   jitter_buffer_->ReleaseFrame(frame_out);
 
   frame_out = DecodeCompleteFrame();
-  EXPECT_EQ(6000U, frame_out->TimeStamp());
+  EXPECT_EQ(6000U, frame_out->Timestamp());
   EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
   EXPECT_EQ(2, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
   EXPECT_TRUE(frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
   jitter_buffer_->ReleaseFrame(frame_out);
 
   frame_out = DecodeCompleteFrame();
-  EXPECT_EQ(9000U, frame_out->TimeStamp());
+  EXPECT_EQ(9000U, frame_out->Timestamp());
   EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
   EXPECT_EQ(1, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
   EXPECT_TRUE(frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
@@ -1123,7 +1123,7 @@
   EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
 
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
-  EXPECT_EQ(3000U, frame_out->TimeStamp());
+  EXPECT_EQ(3000U, frame_out->Timestamp());
   EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
   EXPECT_EQ(0, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
   EXPECT_FALSE(
@@ -1131,7 +1131,7 @@
   jitter_buffer_->ReleaseFrame(frame_out);
 
   frame_out = DecodeCompleteFrame();
-  EXPECT_EQ(6000U, frame_out->TimeStamp());
+  EXPECT_EQ(6000U, frame_out->Timestamp());
   EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
   EXPECT_EQ(1, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
   EXPECT_TRUE(frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
@@ -1481,8 +1481,8 @@
   uint32_t next_timestamp;
   VCMEncodedFrame* frame = jitter_buffer_->NextCompleteFrame(0);
   EXPECT_NE(frame, nullptr);
-  EXPECT_EQ(packet_->timestamp, frame->TimeStamp());
-  frame = jitter_buffer_->ExtractAndSetDecode(frame->TimeStamp());
+  EXPECT_EQ(packet_->timestamp, frame->Timestamp());
+  frame = jitter_buffer_->ExtractAndSetDecode(frame->Timestamp());
   EXPECT_TRUE(frame != NULL);
   jitter_buffer_->ReleaseFrame(frame);
 
@@ -1728,7 +1728,7 @@
             jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
-  EXPECT_EQ(3000u, frame_out->TimeStamp());
+  EXPECT_EQ(3000u, frame_out->Timestamp());
   CheckOutFrame(frame_out, size_, false);
   EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
   jitter_buffer_->ReleaseFrame(frame_out);
@@ -1763,7 +1763,7 @@
             jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
-  EXPECT_EQ(timestamp_, frame_out->TimeStamp());
+  EXPECT_EQ(timestamp_, frame_out->Timestamp());
 
   CheckOutFrame(frame_out, size_, false);
 
@@ -1873,13 +1873,13 @@
             jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
-  EXPECT_EQ(0xffffff00, frame_out->TimeStamp());
+  EXPECT_EQ(0xffffff00, frame_out->Timestamp());
   CheckOutFrame(frame_out, size_, false);
   EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
   jitter_buffer_->ReleaseFrame(frame_out);
 
   VCMEncodedFrame* frame_out2 = DecodeCompleteFrame();
-  EXPECT_EQ(2700u, frame_out2->TimeStamp());
+  EXPECT_EQ(2700u, frame_out2->Timestamp());
   CheckOutFrame(frame_out2, size_, false);
   EXPECT_EQ(kVideoFrameDelta, frame_out2->FrameType());
   jitter_buffer_->ReleaseFrame(frame_out2);
@@ -1916,13 +1916,13 @@
             jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
-  EXPECT_EQ(0xffffff00, frame_out->TimeStamp());
+  EXPECT_EQ(0xffffff00, frame_out->Timestamp());
   CheckOutFrame(frame_out, size_, false);
   EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
   jitter_buffer_->ReleaseFrame(frame_out);
 
   VCMEncodedFrame* frame_out2 = DecodeCompleteFrame();
-  EXPECT_EQ(2700u, frame_out2->TimeStamp());
+  EXPECT_EQ(2700u, frame_out2->Timestamp());
   CheckOutFrame(frame_out2, size_, false);
   EXPECT_EQ(kVideoFrameDelta, frame_out2->FrameType());
   jitter_buffer_->ReleaseFrame(frame_out2);
@@ -2017,7 +2017,7 @@
             jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
-  EXPECT_EQ(first_key_frame_timestamp, frame_out->TimeStamp());
+  EXPECT_EQ(first_key_frame_timestamp, frame_out->Timestamp());
   CheckOutFrame(frame_out, size_, false);
   EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
   jitter_buffer_->ReleaseFrame(frame_out);
@@ -2043,7 +2043,7 @@
     VCMEncodedFrame* testFrame = DecodeIncompleteFrame();
     // Timestamp should never be the last TS inserted.
     if (testFrame != NULL) {
-      EXPECT_TRUE(testFrame->TimeStamp() < timestamp_);
+      EXPECT_TRUE(testFrame->Timestamp() < timestamp_);
       jitter_buffer_->ReleaseFrame(testFrame);
     }
   }
diff --git a/modules/video_coding/receiver.cc b/modules/video_coding/receiver.cc
index 4c56d12..7f22b73 100644
--- a/modules/video_coding/receiver.cc
+++ b/modules/video_coding/receiver.cc
@@ -140,7 +140,7 @@
       jitter_buffer_.NextCompleteFrame(max_wait_time_ms);
 
   if (found_frame) {
-    frame_timestamp = found_frame->TimeStamp();
+    frame_timestamp = found_frame->Timestamp();
     min_playout_delay_ms = found_frame->EncodedImage().playout_delay_.min_ms;
     max_playout_delay_ms = found_frame->EncodedImage().playout_delay_.max_ms;
   } else {
@@ -212,7 +212,7 @@
     return NULL;
   }
   frame->SetRenderTime(render_time_ms);
-  TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", frame->TimeStamp(), "SetRenderTS",
+  TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", frame->Timestamp(), "SetRenderTS",
                           "render_time", frame->RenderTimeMs());
   if (!frame->Complete()) {
     // Update stats for incomplete frames.
diff --git a/modules/video_coding/utility/ivf_file_writer.cc b/modules/video_coding/utility/ivf_file_writer.cc
index 454aeb0..fcf2dda 100644
--- a/modules/video_coding/utility/ivf_file_writer.cc
+++ b/modules/video_coding/utility/ivf_file_writer.cc
@@ -115,7 +115,7 @@
   height_ = encoded_image._encodedHeight;
   RTC_CHECK_GT(width_, 0);
   RTC_CHECK_GT(height_, 0);
-  using_capture_timestamps_ = encoded_image._timeStamp == 0;
+  using_capture_timestamps_ = encoded_image.Timestamp() == 0;
 
   codec_type_ = codec_type;
 
@@ -151,7 +151,7 @@
 
   int64_t timestamp = using_capture_timestamps_
                           ? encoded_image.capture_time_ms_
-                          : wrap_handler_.Unwrap(encoded_image._timeStamp);
+                          : wrap_handler_.Unwrap(encoded_image.Timestamp());
   if (last_timestamp_ != -1 && timestamp <= last_timestamp_) {
     RTC_LOG(LS_WARNING) << "Timestamp no increasing: " << last_timestamp_
                         << " -> " << timestamp;
diff --git a/modules/video_coding/utility/ivf_file_writer_unittest.cc b/modules/video_coding/utility/ivf_file_writer_unittest.cc
index 2172b00..c287920 100644
--- a/modules/video_coding/utility/ivf_file_writer_unittest.cc
+++ b/modules/video_coding/utility/ivf_file_writer_unittest.cc
@@ -50,7 +50,7 @@
       if (use_capture_tims_ms) {
         frame.capture_time_ms_ = i;
       } else {
-        frame._timeStamp = i;
+        frame.SetTimestamp(i);
       }
       if (!file_writer_->WriteFrame(frame, codec_type))
         return false;
diff --git a/modules/video_coding/utility/simulcast_test_fixture_impl.cc b/modules/video_coding/utility/simulcast_test_fixture_impl.cc
index e81c2f4..03de176 100644
--- a/modules/video_coding/utility/simulcast_test_fixture_impl.cc
+++ b/modules/video_coding/utility/simulcast_test_fixture_impl.cc
@@ -109,7 +109,7 @@
       temporal_layer_[codec_specific_info->codecSpecific.VP8.simulcastIdx] =
           codec_specific_info->codecSpecific.VP8.temporalIdx;
     }
-    return Result(Result::OK, encoded_image._timeStamp);
+    return Result(Result::OK, encoded_image.Timestamp());
   }
   // This method only makes sense for VP8.
   void GetLastEncodedFrameInfo(int* temporal_layer,
diff --git a/modules/video_coding/video_sender_unittest.cc b/modules/video_coding/video_sender_unittest.cc
index c391510..5c7f32d 100644
--- a/modules/video_coding/video_sender_unittest.cc
+++ b/modules/video_coding/video_sender_unittest.cc
@@ -100,7 +100,7 @@
     assert(codec_specific_info);
     frame_data_.push_back(
         FrameData(encoded_image._length, *codec_specific_info));
-    return Result(Result::OK, encoded_image._timeStamp);
+    return Result(Result::OK, encoded_image.Timestamp());
   }
 
   void Reset() {
diff --git a/sdk/android/src/jni/androidmediadecoder.cc b/sdk/android/src/jni/androidmediadecoder.cc
index 2f5398e..47ddaac 100644
--- a/sdk/android/src/jni/androidmediadecoder.cc
+++ b/sdk/android/src/jni/androidmediadecoder.cc
@@ -517,7 +517,7 @@
   bool success = Java_MediaCodecVideoDecoder_queueInputBuffer(
       jni, j_media_codec_video_decoder_, j_input_buffer_index,
       static_cast<int>(inputImage._length), presentation_timestamp_us,
-      static_cast<int64_t>(inputImage._timeStamp), inputImage.ntp_time_ms_);
+      static_cast<int64_t>(inputImage.Timestamp()), inputImage.ntp_time_ms_);
   if (CheckException(jni) || !success) {
     ALOGE << "queueInputBuffer error";
     return ProcessHWErrorOnCodecThread();
diff --git a/sdk/android/src/jni/androidmediaencoder.cc b/sdk/android/src/jni/androidmediaencoder.cc
index 0fd6912..ebd760f 100644
--- a/sdk/android/src/jni/androidmediaencoder.cc
+++ b/sdk/android/src/jni/androidmediaencoder.cc
@@ -987,7 +987,7 @@
           new EncodedImage(payload, payload_size, payload_size));
       image->_encodedWidth = width_;
       image->_encodedHeight = height_;
-      image->_timeStamp = output_timestamp_;
+      image->SetTimestamp(output_timestamp_);
       image->capture_time_ms_ = output_render_time_ms_;
       image->rotation_ = output_rotation_;
       image->content_type_ = (codec_mode_ == VideoCodecMode::kScreensharing)
diff --git a/sdk/android/src/jni/videodecoderwrapper.cc b/sdk/android/src/jni/videodecoderwrapper.cc
index 7bd2b5a..d5812cd 100644
--- a/sdk/android/src/jni/videodecoderwrapper.cc
+++ b/sdk/android/src/jni/videodecoderwrapper.cc
@@ -98,12 +98,12 @@
   EncodedImage input_image(image_param);
   // We use RTP timestamp for capture time because capture_time_ms_ is always 0.
   input_image.capture_time_ms_ =
-      input_image._timeStamp / kNumRtpTicksPerMillisec;
+      input_image.Timestamp() / kNumRtpTicksPerMillisec;
 
   FrameExtraInfo frame_extra_info;
   frame_extra_info.timestamp_ns =
       input_image.capture_time_ms_ * rtc::kNumNanosecsPerMillisec;
-  frame_extra_info.timestamp_rtp = input_image._timeStamp;
+  frame_extra_info.timestamp_rtp = input_image.Timestamp();
   frame_extra_info.timestamp_ntp = input_image.ntp_time_ms_;
   frame_extra_info.qp =
       qp_parsing_enabled_ ? ParseQP(input_image) : absl::nullopt;
diff --git a/sdk/android/src/jni/videoencoderwrapper.cc b/sdk/android/src/jni/videoencoderwrapper.cc
index 9500a1f..717f944 100644
--- a/sdk/android/src/jni/videoencoderwrapper.cc
+++ b/sdk/android/src/jni/videoencoderwrapper.cc
@@ -274,7 +274,7 @@
                          task_buffer.size(), task_buffer.size());
       frame._encodedWidth = encoded_width;
       frame._encodedHeight = encoded_height;
-      frame._timeStamp = frame_extra_info.timestamp_rtp;
+      frame.SetTimestamp(frame_extra_info.timestamp_rtp);
       frame.capture_time_ms_ = capture_time_ns / rtc::kNumNanosecsPerMillisec;
       frame._frameType = (FrameType)frame_type;
       frame.rotation_ = (VideoRotation)rotation;
diff --git a/sdk/objc/Framework/Classes/PeerConnection/RTCEncodedImage.mm b/sdk/objc/Framework/Classes/PeerConnection/RTCEncodedImage.mm
index 467f225..e9c0a8c 100644
--- a/sdk/objc/Framework/Classes/PeerConnection/RTCEncodedImage.mm
+++ b/sdk/objc/Framework/Classes/PeerConnection/RTCEncodedImage.mm
@@ -39,7 +39,7 @@
                              freeWhenDone:NO];
     _encodedWidth = rtc::dchecked_cast<int32_t>(encodedImage._encodedWidth);
     _encodedHeight = rtc::dchecked_cast<int32_t>(encodedImage._encodedHeight);
-    _timeStamp = encodedImage._timeStamp;
+    _timeStamp = encodedImage.Timestamp();
     _captureTimeMs = encodedImage.capture_time_ms_;
     _ntpTimeMs = encodedImage.ntp_time_ms_;
     _flags = encodedImage.timing_.flags;
@@ -63,7 +63,7 @@
       (uint8_t *)_buffer.bytes, (size_t)_buffer.length, (size_t)_buffer.length);
   encodedImage._encodedWidth = rtc::dchecked_cast<uint32_t>(_encodedWidth);
   encodedImage._encodedHeight = rtc::dchecked_cast<uint32_t>(_encodedHeight);
-  encodedImage._timeStamp = _timeStamp;
+  encodedImage.SetTimestamp(_timeStamp);
   encodedImage.capture_time_ms_ = _captureTimeMs;
   encodedImage.ntp_time_ms_ = _ntpTimeMs;
   encodedImage.timing_.flags = _flags;
diff --git a/test/configurable_frame_size_encoder.cc b/test/configurable_frame_size_encoder.cc
index 4fb8df4..7671a3c 100644
--- a/test/configurable_frame_size_encoder.cc
+++ b/test/configurable_frame_size_encoder.cc
@@ -48,7 +48,7 @@
   encodedImage._encodedHeight = inputImage.height();
   encodedImage._encodedWidth = inputImage.width();
   encodedImage._frameType = kVideoFrameKey;
-  encodedImage._timeStamp = inputImage.timestamp();
+  encodedImage.SetTimestamp(inputImage.timestamp());
   encodedImage.capture_time_ms_ = inputImage.render_time_ms();
   RTPFragmentationHeader* fragmentation = NULL;
   CodecSpecificInfo specific;
diff --git a/test/fake_decoder.cc b/test/fake_decoder.cc
index eaf12d0..2155008 100644
--- a/test/fake_decoder.cc
+++ b/test/fake_decoder.cc
@@ -40,7 +40,7 @@
   VideoFrame frame(I420Buffer::Create(width_, height_),
                    webrtc::kVideoRotation_0,
                    render_time_ms * rtc::kNumMicrosecsPerMillisec);
-  frame.set_timestamp(input._timeStamp);
+  frame.set_timestamp(input.Timestamp());
   frame.set_ntp_time_ms(input.ntp_time_ms_);
 
   callback_->Decoded(frame);
diff --git a/test/fake_encoder.cc b/test/fake_encoder.cc
index 12a77e3..5a3e2b8 100644
--- a/test/fake_encoder.cc
+++ b/test/fake_encoder.cc
@@ -144,7 +144,7 @@
     std::unique_ptr<uint8_t[]> encoded_buffer(new uint8_t[num_encoded_bytes]);
     memcpy(encoded_buffer.get(), encoded_buffer_, num_encoded_bytes);
     EncodedImage encoded(encoded_buffer.get(), stream_bytes, num_encoded_bytes);
-    encoded._timeStamp = input_image.timestamp();
+    encoded.SetTimestamp(input_image.timestamp());
     encoded.capture_time_ms_ = input_image.render_time_ms();
     encoded._frameType = (*frame_types)[i];
     encoded._encodedWidth = simulcast_streams[i].width;
diff --git a/test/fuzzers/frame_buffer2_fuzzer.cc b/test/fuzzers/frame_buffer2_fuzzer.cc
index 57f0c74..2d58309 100644
--- a/test/fuzzers/frame_buffer2_fuzzer.cc
+++ b/test/fuzzers/frame_buffer2_fuzzer.cc
@@ -57,7 +57,6 @@
   ~FuzzyFrameObject() {}
 
   bool GetBitstream(uint8_t* destination) const override { return false; }
-  uint32_t Timestamp() const override { return timestamp; }
   int64_t ReceivedTime() const override { return 0; }
   int64_t RenderTime() const override { return _renderTimeMs; }
 };
@@ -76,7 +75,7 @@
       std::unique_ptr<FuzzyFrameObject> frame(new FuzzyFrameObject());
       frame->id.picture_id = reader.GetNum<int64_t>();
       frame->id.spatial_layer = reader.GetNum<uint8_t>();
-      frame->timestamp = reader.GetNum<uint32_t>();
+      frame->SetTimestamp(reader.GetNum<uint32_t>());
       frame->num_references = reader.GetNum<uint8_t>() %
                               video_coding::EncodedFrame::kMaxFrameReferences;
 
diff --git a/video/send_statistics_proxy.cc b/video/send_statistics_proxy.cc
index 2fb955c..2091bad 100644
--- a/video/send_statistics_proxy.cc
+++ b/video/send_statistics_proxy.cc
@@ -250,7 +250,7 @@
   // Check for jump in timestamp.
   if (!encoded_frames_.empty()) {
     uint32_t oldest_timestamp = encoded_frames_.begin()->first;
-    if (ForwardDiff(oldest_timestamp, encoded_frame._timeStamp) >
+    if (ForwardDiff(oldest_timestamp, encoded_frame.Timestamp()) >
         kMaxEncodedFrameTimestampDiff) {
       // Gap detected, clear frames to have a sequence where newest timestamp
       // is not too far away from oldest in order to distinguish old and new.
@@ -262,7 +262,7 @@
   if (it == encoded_frames_.end()) {
     // First frame with this timestamp.
     encoded_frames_.insert(
-        std::make_pair(encoded_frame._timeStamp,
+        std::make_pair(encoded_frame.Timestamp(),
                        Frame(now_ms, encoded_frame._encodedWidth,
                              encoded_frame._encodedHeight, simulcast_idx)));
     sent_fps_counter_.Add(1);
diff --git a/video/send_statistics_proxy_unittest.cc b/video/send_statistics_proxy_unittest.cc
index 6942e36..d4a7876 100644
--- a/video/send_statistics_proxy_unittest.cc
+++ b/video/send_statistics_proxy_unittest.cc
@@ -1036,7 +1036,7 @@
   // Not enough samples, stats should not be updated.
   for (int i = 0; i < kMinSamples - 1; ++i) {
     fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
-    encoded_image._timeStamp += 90 * 1000 / kFps;
+    encoded_image.SetTimestamp(encoded_image.Timestamp() + 90 * 1000 / kFps);
     statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
   }
   SetUp();  // Reset stats proxy also causes histograms to be reported.
@@ -1044,10 +1044,10 @@
   EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.SentHeightInPixels"));
 
   // Enough samples, max resolution per frame should be reported.
-  encoded_image._timeStamp = 0xffff0000;  // Will wrap.
+  encoded_image.SetTimestamp(0xffff0000);  // Will wrap.
   for (int i = 0; i < kMinSamples; ++i) {
     fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
-    encoded_image._timeStamp += 90 * 1000 / kFps;
+    encoded_image.SetTimestamp(encoded_image.Timestamp() + 90 * 1000 / kFps);
     encoded_image._encodedWidth = kWidth;
     encoded_image._encodedHeight = kHeight;
     statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
@@ -1083,7 +1083,7 @@
   int frames = kMinPeriodicSamples * kFpsPeriodicIntervalMs * kFps / 1000 + 1;
   for (int i = 0; i < frames; ++i) {
     fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
-    ++encoded_image._timeStamp;
+    encoded_image.SetTimestamp(encoded_image.Timestamp() + 1);
     statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
     // Frame with same timestamp should not be counted.
     statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
@@ -1124,7 +1124,7 @@
   int frames = kMinPeriodicSamples * kFpsPeriodicIntervalMs * kFps / 1000;
   for (int i = 0; i < frames; ++i) {
     fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
-    encoded_image._timeStamp = i + 1;
+    encoded_image.SetTimestamp(i + 1);
     statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
   }
   // Suspend.
@@ -1133,7 +1133,7 @@
 
   for (int i = 0; i < frames; ++i) {
     fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
-    encoded_image._timeStamp = i + 1;
+    encoded_image.SetTimestamp(i + 1);
     statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
   }
   // Suspended time interval should not affect the framerate.
@@ -1431,7 +1431,8 @@
   encoded_image._encodedHeight = kHeight;
   for (int i = 0; i < kMinSamples; ++i) {
     fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
-    encoded_image._timeStamp += (kRtpClockRateHz / kFps);
+    encoded_image.SetTimestamp(encoded_image.Timestamp() +
+                               (kRtpClockRateHz / kFps));
     statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
   }
 
@@ -1466,7 +1467,8 @@
   EncodedImage encoded_image;
   for (int i = 0; i < kMinSamples; ++i) {
     fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
-    encoded_image._timeStamp += (kRtpClockRateHz / kFps);
+    encoded_image.SetTimestamp(encoded_image.Timestamp() +
+                               (kRtpClockRateHz / kFps));
     encoded_image._encodedWidth = kWidth;
     encoded_image._encodedHeight = kHeight;
     statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
@@ -1511,7 +1513,8 @@
   encoded_image._encodedHeight = kHeight / 2;
   for (int i = 0; i < kMinSamples; ++i) {
     fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
-    encoded_image._timeStamp += (kRtpClockRateHz / kFps);
+    encoded_image.SetTimestamp(encoded_image.Timestamp() +
+                               (kRtpClockRateHz / kFps));
     statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
   }
 
@@ -1623,21 +1626,23 @@
   encoded_image._encodedHeight = kHeight / 2;
   for (int i = 0; i < kMinSamples; ++i) {
     fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
-    encoded_image._timeStamp += (kRtpClockRateHz / kFps);
+    encoded_image.SetTimestamp(encoded_image.Timestamp() +
+                               (kRtpClockRateHz / kFps));
     statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
     EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
   }
 
   // First frame removed from EncodedFrameMap, stats updated.
   fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
-  ++encoded_image._timeStamp;
+  encoded_image.SetTimestamp(encoded_image.Timestamp() + 1);
   statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
   EXPECT_TRUE(statistics_proxy_->GetStats().bw_limited_resolution);
 
   // Two streams encoded.
   for (int i = 0; i < kMinSamples; ++i) {
     fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
-    encoded_image._timeStamp += (kRtpClockRateHz / kFps);
+    encoded_image.SetTimestamp(encoded_image.Timestamp() +
+                               (kRtpClockRateHz / kFps));
     encoded_image._encodedWidth = kWidth;
     encoded_image._encodedHeight = kHeight;
     statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
@@ -1650,7 +1655,8 @@
 
   // First frame with two streams removed, expect no resolution limit.
   fake_clock_.AdvanceTimeMilliseconds(1000 / kFps);
-  encoded_image._timeStamp += (kRtpClockRateHz / kFps);
+  encoded_image.SetTimestamp(encoded_image.Timestamp() +
+                             (kRtpClockRateHz / kFps));
   statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
   EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
 
diff --git a/video/video_receive_stream.cc b/video/video_receive_stream.cc
index 088c0eb..bd61881 100644
--- a/video/video_receive_stream.cc
+++ b/video/video_receive_stream.cc
@@ -332,7 +332,7 @@
     }
   }
 
-  return Result(Result::OK, encoded_image._timeStamp);
+  return Result(Result::OK, encoded_image.Timestamp());
 }
 
 void VideoReceiveStream::SendNack(
diff --git a/video/video_send_stream_impl.cc b/video/video_send_stream_impl.cc
index f660725..0c15856 100644
--- a/video/video_send_stream_impl.cc
+++ b/video/video_send_stream_impl.cc
@@ -529,7 +529,7 @@
   if (config_->post_encode_callback) {
     config_->post_encode_callback->EncodedFrameCallback(EncodedFrame(
         encoded_image._buffer, encoded_image._length, encoded_image._frameType,
-        simulcast_idx, encoded_image._timeStamp));
+        simulcast_idx, encoded_image.Timestamp()));
   }
   {
     rtc::CritScope lock(&encoder_activity_crit_sect_);
diff --git a/video/video_send_stream_tests.cc b/video/video_send_stream_tests.cc
index 3baf13a..cb11ebc 100644
--- a/video/video_send_stream_tests.cc
+++ b/video/video_send_stream_tests.cc
@@ -3037,7 +3037,7 @@
 
       uint8_t buffer[16] = {0};
       EncodedImage encoded(buffer, sizeof(buffer), sizeof(buffer));
-      encoded._timeStamp = input_image.timestamp();
+      encoded.SetTimestamp(input_image.timestamp());
       encoded.capture_time_ms_ = input_image.render_time_ms();
 
       for (size_t i = 0; i < kNumStreams; ++i) {
diff --git a/video/video_stream_decoder_impl.cc b/video/video_stream_decoder_impl.cc
index 5313d6a..3e67e5c 100644
--- a/video/video_stream_decoder_impl.cc
+++ b/video/video_stream_decoder_impl.cc
@@ -186,7 +186,7 @@
     }
 
     int64_t decode_start_time_ms = rtc::TimeMillis();
-    int64_t timestamp = frame->timestamp;
+    int64_t timestamp = frame->Timestamp();
     int64_t render_time_us = frame->RenderTimeMs() * 1000;
     bookkeeping_queue_.PostTask(
         [this, decode_start_time_ms, timestamp, render_time_us]() {
diff --git a/video/video_stream_encoder.cc b/video/video_stream_encoder.cc
index 4bf3a86..e7bc7e1 100644
--- a/video/video_stream_encoder.cc
+++ b/video/video_stream_encoder.cc
@@ -887,7 +887,7 @@
       sink_->OnEncodedImage(encoded_image, codec_specific_info, fragmentation);
 
   int64_t time_sent_us = rtc::TimeMicros();
-  uint32_t timestamp = encoded_image._timeStamp;
+  uint32_t timestamp = encoded_image.Timestamp();
   const int qp = encoded_image.qp_;
   int64_t capture_time_us =
       encoded_image.capture_time_ms_ * rtc::kNumMicrosecsPerMillisec;
diff --git a/video/video_stream_encoder_unittest.cc b/video/video_stream_encoder_unittest.cc
index cda8a05..84063d4 100644
--- a/video/video_stream_encoder_unittest.cc
+++ b/video/video_stream_encoder_unittest.cc
@@ -656,7 +656,7 @@
         const RTPFragmentationHeader* fragmentation) override {
       rtc::CritScope lock(&crit_);
       EXPECT_TRUE(expect_frames_);
-      last_timestamp_ = encoded_image._timeStamp;
+      last_timestamp_ = encoded_image.Timestamp();
       last_width_ = encoded_image._encodedWidth;
       last_height_ = encoded_image._encodedHeight;
       encoded_frame_event_.Set();