Relanding r3952: VCM: Updating receiver logic
BUG=r1734
R=stefan@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/1433004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@3970 4adac7df-926f-26a2-2b94-8c16560cd09d
diff --git a/webrtc/modules/video_coding/main/source/jitter_buffer.cc b/webrtc/modules/video_coding/main/source/jitter_buffer.cc
index fecefc9..2003383 100644
--- a/webrtc/modules/video_coding/main/source/jitter_buffer.cc
+++ b/webrtc/modules/video_coding/main/source/jitter_buffer.cc
@@ -343,54 +343,6 @@
   TRACE_COUNTER1("webrtc", "JBIncomingBitrate", incoming_bit_rate_);
 }
 
-// Wait for the first packet in the next frame to arrive.
-int64_t VCMJitterBuffer::NextTimestamp(uint32_t max_wait_time_ms,
-                                       FrameType* incoming_frame_type,
-                                       int64_t* render_time_ms) {
-  assert(incoming_frame_type);
-  assert(render_time_ms);
-  if (!running_) {
-    return -1;
-  }
-
-  crit_sect_->Enter();
-
-  // Finding oldest frame ready for decoder, check sequence number and size.
-  CleanUpOldOrEmptyFrames();
-
-  FrameList::iterator it = frame_list_.begin();
-
-  if (it == frame_list_.end()) {
-    packet_event_->Reset();
-    crit_sect_->Leave();
-
-    if (packet_event_->Wait(max_wait_time_ms) == kEventSignaled) {
-      // are we closing down the Jitter buffer
-      if (!running_) {
-        return -1;
-      }
-      crit_sect_->Enter();
-
-      CleanUpOldOrEmptyFrames();
-      it = frame_list_.begin();
-    } else {
-      crit_sect_->Enter();
-    }
-  }
-
-  if (it == frame_list_.end()) {
-    crit_sect_->Leave();
-    return -1;
-  }
-  // We have a frame.
-  *incoming_frame_type = (*it)->FrameType();
-  *render_time_ms = (*it)->RenderTimeMs();
-  const uint32_t timestamp = (*it)->TimeStamp();
-  crit_sect_->Leave();
-
-  return timestamp;
-}
-
 // Answers the question:
 // Will the packet sequence be complete if the next frame is grabbed for
 // decoding right now? That is, have we lost a frame between the last decoded
@@ -430,12 +382,12 @@
 
 // Returns immediately or a |max_wait_time_ms| ms event hang waiting for a
 // complete frame, |max_wait_time_ms| decided by caller.
-VCMEncodedFrame* VCMJitterBuffer::GetCompleteFrameForDecoding(
-    uint32_t max_wait_time_ms) {
-  TRACE_EVENT0("webrtc", "JB::GetCompleteFrame");
+bool VCMJitterBuffer::NextCompleteTimestamp(
+    uint32_t max_wait_time_ms, uint32_t* timestamp) {
+  TRACE_EVENT0("webrtc", "JB::NextCompleteTimestamp");
   crit_sect_->Enter();
   if (!running_) {
-    return NULL;
+    return false;
   }
   CleanUpOldOrEmptyFrames();
 
@@ -453,9 +405,8 @@
         // Are we closing down the Jitter buffer?
         if (!running_) {
           crit_sect_->Leave();
-          return NULL;
+          return false;
         }
-
         // Finding oldest frame ready for decoder, but check
         // sequence number and size
         CleanUpOldOrEmptyFrames();
@@ -484,81 +435,85 @@
 
   if (it == frame_list_.end()) {
       crit_sect_->Leave();
-      return NULL;
+      return false;
   }
 
   VCMFrameBuffer* oldest_frame = *it;
 
-  it = frame_list_.erase(it);
-  if (frame_list_.empty()) {
-    TRACE_EVENT_INSTANT1("webrtc", "JB::FrameListEmptied",
-                         "type", "GetCompleteFrameForDecoding");
-  }
-
-  // Update jitter estimate.
-  const bool retransmitted = (oldest_frame->GetNackCount() > 0);
-  if (retransmitted) {
-    jitter_estimate_.FrameNacked();
-  } else if (oldest_frame->Length() > 0) {
-    // Ignore retransmitted and empty frames.
-    UpdateJitterEstimate(*oldest_frame, false);
-  }
-
-  oldest_frame->SetState(kStateDecoding);
-
-  // We have a frame - update decoded state with frame info.
-  last_decoded_state_.SetState(oldest_frame);
-  DropPacketsFromNackList(last_decoded_state_.sequence_num());
-
+  *timestamp = oldest_frame->TimeStamp();
   crit_sect_->Leave();
-  return oldest_frame;
+  return true;
 }
 
-VCMEncodedFrame* VCMJitterBuffer::MaybeGetIncompleteFrameForDecoding() {
-  TRACE_EVENT0("webrtc", "JB::MaybeGetIncompleteFrameForDecoding");
+bool VCMJitterBuffer::NextMaybeIncompleteTimestamp(
+    uint32_t* timestamp) {
+  TRACE_EVENT0("webrtc", "JB::NextMaybeIncompleteTimestamp");
   CriticalSectionScoped cs(crit_sect_);
   if (!running_) {
-    return NULL;
+    return false;
   }
   if (!decode_with_errors_) {
     // No point to continue, as we are not decoding with errors.
-    return NULL;
+    return false;
   }
 
   CleanUpOldOrEmptyFrames();
 
   if (frame_list_.empty()) {
-    return NULL;
+    return false;
   }
 
   VCMFrameBuffer* oldest_frame = frame_list_.front();
   // If we have only one frame in the buffer, release it only if it is complete.
   if (frame_list_.size() <= 1 && oldest_frame->GetState() != kStateComplete) {
-    return NULL;
+    return false;
   }
 
   // Always start with a key frame.
   if (last_decoded_state_.in_initial_state() &&
       oldest_frame->FrameType() != kVideoFrameKey) {
-    return NULL;
+    return false;
   }
 
-  // Incomplete frame pulled out from jitter buffer,
+  *timestamp = oldest_frame->TimeStamp();
+  return true;
+}
+
+VCMEncodedFrame* VCMJitterBuffer::ExtractAndSetDecode(uint32_t timestamp) {
+  TRACE_EVENT0("webrtc", "JB::ExtractAndSetDecode");
+  CriticalSectionScoped cs(crit_sect_);
+
+  if (!running_) {
+    return NULL;
+  }
+  // Extract the frame with the desired timestamp.
+  FrameList::iterator it = std::find_if(
+      frame_list_.begin(),
+      frame_list_.end(),
+      FrameEqualTimestamp(timestamp));
+
+  if (it == frame_list_.end()) {
+    return NULL;
+  }
+  // We got the frame.
+  VCMFrameBuffer* frame = *it;
+
+  // Frame pulled out from jitter buffer,
   // update the jitter estimate with what we currently know.
-  const bool retransmitted = (oldest_frame->GetNackCount() > 0);
+  const bool retransmitted = (frame->GetNackCount() > 0);
   if (retransmitted) {
     jitter_estimate_.FrameNacked();
-  } else if (oldest_frame->Length() > 0) {
+  } else if (frame->Length() > 0) {
     // Ignore retransmitted and empty frames.
     // Update with the previous incomplete frame first
     if (waiting_for_completion_.latest_packet_time >= 0) {
       UpdateJitterEstimate(waiting_for_completion_, true);
     }
     // Then wait for this one to get complete
-    waiting_for_completion_.frame_size = oldest_frame->Length();
+    waiting_for_completion_.frame_size = frame->Length();
     waiting_for_completion_.latest_packet_time =
-      oldest_frame->LatestPacketTimeMs();
-    waiting_for_completion_.timestamp = oldest_frame->TimeStamp();
+        frame->LatestPacketTimeMs();
+    waiting_for_completion_.timestamp = frame->TimeStamp();
   }
   frame_list_.erase(frame_list_.begin());
   if (frame_list_.empty()) {
@@ -566,21 +521,21 @@
                          "type", "MaybeGetIncompleteFrameForDecoding");
   }
 
-  // Look for previous frame loss
-  VerifyAndSetPreviousFrameLost(oldest_frame);
+  // Look for previous frame loss.
+  VerifyAndSetPreviousFrameLost(frame);
 
   // The state must be changed to decoding before cleaning up zero sized
   // frames to avoid empty frames being cleaned up and then given to the
   // decoder.
   // Set as decoding. Propagates the missing_frame bit.
-  oldest_frame->SetState(kStateDecoding);
+  frame->SetState(kStateDecoding);
 
-  num_not_decodable_packets_ += oldest_frame->NotDecodablePackets();
+  num_not_decodable_packets_ += frame->NotDecodablePackets();
 
   // We have a frame - update decoded state with frame info.
-  last_decoded_state_.SetState(oldest_frame);
+  last_decoded_state_.SetState(frame);
   DropPacketsFromNackList(last_decoded_state_.sequence_num());
-  return oldest_frame;
+  return frame;
 }
 
 // Release frame when done with decoding. Should never be used to release
@@ -594,7 +549,7 @@
 
 // Gets frame to use for this timestamp. If no match, get empty frame.
 int VCMJitterBuffer::GetFrame(const VCMPacket& packet,
-                               VCMEncodedFrame*& frame) {
+                              VCMEncodedFrame*& frame) {
   if (!running_) {  // Don't accept incoming packets until we are started.
     return VCM_UNINITIALIZED;
   }
@@ -766,6 +721,8 @@
       break;
     }
     case kCompleteSession: {
+      // Don't let the first packet be overridden by a complete session.
+      ret = kCompleteSession;
       // Only update return value for a JB flush indicator.
       if (UpdateFrameState(frame) == kFlushIndicator)
         ret = kFlushIndicator;
@@ -982,11 +939,15 @@
   return last_decoded_state_.time_stamp();
 }
 
-int VCMJitterBuffer::RenderBufferSizeMs() {
+void VCMJitterBuffer::RenderBufferSize(
+    uint32_t* timestamp_start, uint32_t* timestamp_end) {
   CriticalSectionScoped cs(crit_sect_);
   CleanUpOldOrEmptyFrames();
+  *timestamp_start = 0u;
+  *timestamp_end = 0u;
+
   if (frame_list_.empty()) {
-    return 0;
+    return;
   }
   FrameList::iterator frame_it = frame_list_.begin();
   VCMFrameBuffer* current_frame = *frame_it;
@@ -998,16 +959,16 @@
     frame_it = find_if(frame_list_.begin(), frame_list_.end(),
         CompleteKeyFrameCriteria());
     if (frame_it == frame_list_.end()) {
-      return 0;
+      return;
     }
+    *timestamp_start = last_decoded_state_.time_stamp();
     current_frame = *frame_it;
     previous_state.SetState(current_frame);
+    ++frame_it;
   } else {
     previous_state.CopyFrom(last_decoded_state_);
   }
   bool continuous_complete = true;
-  int64_t start_render = current_frame->RenderTimeMs();
-  ++frame_it;
   while (frame_it != frame_list_.end() && continuous_complete) {
     current_frame = *frame_it;
     continuous_complete = current_frame->IsSessionComplete() &&
@@ -1018,8 +979,7 @@
   // Desired frame is the previous one.
   --frame_it;
   current_frame = *frame_it;
-  // Got the frame, now compute the time delta.
-  return static_cast<int>(current_frame->RenderTimeMs() - start_render);
+  *timestamp_end = current_frame->TimeStamp();
 }
 
 // Set the frame state to free and remove it from the sorted
diff --git a/webrtc/modules/video_coding/main/source/jitter_buffer.h b/webrtc/modules/video_coding/main/source/jitter_buffer.h
index 21f3db8..f4723a9 100644
--- a/webrtc/modules/video_coding/main/source/jitter_buffer.h
+++ b/webrtc/modules/video_coding/main/source/jitter_buffer.h
@@ -89,31 +89,25 @@
   void IncomingRateStatistics(unsigned int* framerate,
                               unsigned int* bitrate);
 
-  // Waits for the first packet in the next frame to arrive and then returns
-  // the timestamp of that frame. |incoming_frame_type| and |render_time_ms| are
-  // set to the frame type and render time of the next frame.
-  // Blocks for up to |max_wait_time_ms| ms. Returns -1 if no packet has arrived
-  // after |max_wait_time_ms| ms.
-  int64_t NextTimestamp(uint32_t max_wait_time_ms,
-                        FrameType* incoming_frame_type,
-                        int64_t* render_time_ms);
-
   // Checks if the packet sequence will be complete if the next frame would be
   // grabbed for decoding. That is, if a frame has been lost between the
   // last decoded frame and the next, or if the next frame is missing one
   // or more packets.
   bool CompleteSequenceWithNextFrame();
 
-  // Returns a complete frame ready for decoding. Allows max_wait_time_ms to
-  // wait for such a frame, if one is unavailable.
-  // Always starts with a key frame.
-  VCMEncodedFrame* GetCompleteFrameForDecoding(uint32_t max_wait_time_ms);
+  // Wait |max_wait_time_ms| for a complete frame to arrive.
+  // The function returns true once such a frame is found, its corresponding
+  // timestamp is returned. Otherwise, returns false.
+  bool NextCompleteTimestamp(uint32_t max_wait_time_ms, uint32_t* timestamp);
 
-  // Get next frame for decoding without delay. If decoding with errors is not
-  // enabled, will return NULL. Actual returned frame will be the next one in
-  // the list, either complete or not.
-  // TODO(mikhal): Consider only allowing decodable/complete.
-  VCMEncodedFrame* MaybeGetIncompleteFrameForDecoding();
+  // Locates a frame for decoding (even an incomplete) without delay.
+  // The function returns true once such a frame is found, its corresponding
+  // timestamp is returned. Otherwise, returns false.
+  bool NextMaybeIncompleteTimestamp(uint32_t* timestamp);
+
+  // Extract frame corresponding to input timestamp.
+  // Frame will be set to a decoding state.
+  VCMEncodedFrame* ExtractAndSetDecode(uint32_t timestamp);
 
   // Releases a frame returned from the jitter buffer, should be called when
   // done with decoding.
@@ -133,8 +127,7 @@
                                   const VCMPacket& packet);
 
   // Enable a max filter on the jitter estimate by setting an initial
-  // non-zero delay. When set to zero (default), the last jitter
-  // estimate will be used.
+  // non-zero delay.
   void SetMaxJitterEstimate(bool enable);
 
   // Returns the estimated jitter in milliseconds.
@@ -166,8 +159,9 @@
   int64_t LastDecodedTimestamp() const;
   bool decode_with_errors() const {return decode_with_errors_;}
 
-  // Returns size in time (milliseconds) of complete continuous frames.
-  int RenderBufferSizeMs();
+  // Used to compute time of complete continuous frames. Returns the timestamps
+  // corresponding to the start and end of the continuous complete buffer.
+  void RenderBufferSize(uint32_t* timestamp_start, uint32_t* timestamp_end);
 
  private:
   class SequenceNumberLessThan {
diff --git a/webrtc/modules/video_coding/main/source/jitter_buffer_unittest.cc b/webrtc/modules/video_coding/main/source/jitter_buffer_unittest.cc
index 7b0c77a..b5307f07 100644
--- a/webrtc/modules/video_coding/main/source/jitter_buffer_unittest.cc
+++ b/webrtc/modules/video_coding/main/source/jitter_buffer_unittest.cc
@@ -101,15 +101,23 @@
   }
 
   bool DecodeCompleteFrame() {
-    VCMEncodedFrame* frame = jitter_buffer_->GetCompleteFrameForDecoding(0);
+    uint32_t timestamp = 0;
+    bool found_frame = jitter_buffer_->NextCompleteTimestamp(0, &timestamp);
+    if (!found_frame)
+      return false;
+
+    VCMEncodedFrame* frame = jitter_buffer_->ExtractAndSetDecode(timestamp);
     bool ret = (frame != NULL);
     jitter_buffer_->ReleaseFrame(frame);
     return ret;
   }
 
   bool DecodeIncompleteFrame() {
-    VCMEncodedFrame* frame =
-        jitter_buffer_->MaybeGetIncompleteFrameForDecoding();
+    uint32_t timestamp = 0;
+    bool found_frame = jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp);
+    if (!found_frame)
+      return false;
+    VCMEncodedFrame* frame = jitter_buffer_->ExtractAndSetDecode(timestamp);
     bool ret = (frame != NULL);
     jitter_buffer_->ReleaseFrame(frame);
     return ret;
@@ -470,7 +478,7 @@
   clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
   for (int i = 0; i < 5; ++i) {
     if (stream_generator_->NextSequenceNumber()  != 65535) {
-      EXPECT_EQ(kFirstPacket, InsertPacketAndPop(0));
+      EXPECT_EQ(kCompleteSession, InsertPacketAndPop(0));
       EXPECT_FALSE(request_key_frame);
     } else {
       stream_generator_->NextPacket(NULL);  // Drop packet
@@ -479,7 +487,7 @@
                                      clock_->TimeInMilliseconds());
     clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
   }
-  EXPECT_EQ(kFirstPacket, InsertPacketAndPop(0));
+  EXPECT_EQ(kCompleteSession, InsertPacketAndPop(0));
   EXPECT_FALSE(request_key_frame);
   uint16_t nack_list_size = 0;
   bool extended = false;
diff --git a/webrtc/modules/video_coding/main/source/jitter_estimator.cc b/webrtc/modules/video_coding/main/source/jitter_estimator.cc
index 45b5422..d5d21d8 100644
--- a/webrtc/modules/video_coding/main/source/jitter_estimator.cc
+++ b/webrtc/modules/video_coding/main/source/jitter_estimator.cc
@@ -405,12 +405,13 @@
     }
 }
 
-void VCMJitterEstimator::SetMaxJitterEstimate(bool enable) {
-  if (enable) {
-    _jitterEstimateMode = kMaxEstimate;
-  } else {
-    _jitterEstimateMode = kLastEstimate;
-  }
+void VCMJitterEstimator::SetMaxJitterEstimate(bool enable)
+{
+    if (enable) {
+        _jitterEstimateMode = kMaxEstimate;
+    } else {
+        _jitterEstimateMode = kLastEstimate;
+    }
 }
 
 // Returns the current filtered estimate if available,
diff --git a/webrtc/modules/video_coding/main/source/receiver.cc b/webrtc/modules/video_coding/main/source/receiver.cc
index c77936c..3457260 100644
--- a/webrtc/modules/video_coding/main/source/receiver.cc
+++ b/webrtc/modules/video_coding/main/source/receiver.cc
@@ -103,59 +103,31 @@
                    packet.seqNum, packet.timestamp,
                    MaskWord64ToUWord32(clock_->TimeInMilliseconds()));
     }
-
-    const int64_t now_ms = clock_->TimeInMilliseconds();
-
-    int64_t render_time_ms = timing_->RenderTimeMs(packet.timestamp, now_ms);
-
-    if (render_time_ms < 0) {
-      // Render time error. Assume that this is due to some change in the
-      // incoming video stream and reset the JB and the timing.
-      jitter_buffer_.Flush();
-      timing_->Reset(clock_->TimeInMilliseconds());
-      return VCM_FLUSH_INDICATOR;
-    } else if (render_time_ms < now_ms - max_video_delay_ms_) {
-      WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding,
-                   VCMId(vcm_id_, receiver_id_),
-                   "This frame should have been rendered more than %u ms ago."
-                   "Flushing jitter buffer and resetting timing.",
-                   max_video_delay_ms_);
-      jitter_buffer_.Flush();
-      timing_->Reset(clock_->TimeInMilliseconds());
-      return VCM_FLUSH_INDICATOR;
-    } else if (static_cast<int>(timing_->TargetVideoDelay()) >
-               max_video_delay_ms_) {
-      WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding,
-                   VCMId(vcm_id_, receiver_id_),
-                   "More than %u ms target delay. Flushing jitter buffer and"
-                   "resetting timing.", max_video_delay_ms_);
-      jitter_buffer_.Flush();
-      timing_->Reset(clock_->TimeInMilliseconds());
-      return VCM_FLUSH_INDICATOR;
-    }
-
     // First packet received belonging to this frame.
-    if (buffer->Length() == 0) {
+    if (buffer->Length() == 0 && master_) {
       const int64_t now_ms = clock_->TimeInMilliseconds();
-      if (master_) {
-        // Only trace the primary receiver to make it possible to parse and plot
-        // the trace file.
-        WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding,
-                     VCMId(vcm_id_, receiver_id_),
-                     "First packet of frame %u at %u", packet.timestamp,
-                     MaskWord64ToUWord32(now_ms));
-      }
-      render_time_ms = timing_->RenderTimeMs(packet.timestamp, now_ms);
-      if (render_time_ms >= 0) {
-        buffer->SetRenderTime(render_time_ms);
-      } else {
-        buffer->SetRenderTime(now_ms);
-      }
+      // Only trace the primary receiver to make it possible to parse and plot
+      // the trace file.
+      WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding,
+          VCMId(vcm_id_, receiver_id_),
+          "First packet of frame %u at %u", packet.timestamp,
+          MaskWord64ToUWord32(now_ms));
     }
 
     // Insert packet into the jitter buffer both media and empty packets.
     const VCMFrameBufferEnum
     ret = jitter_buffer_.InsertPacket(buffer, packet);
+    if (ret == kCompleteSession) {
+      bool retransmitted = false;
+      const int64_t last_packet_time_ms =
+         jitter_buffer_.LastPacketTime(buffer, &retransmitted);
+      if (last_packet_time_ms >= 0 && !retransmitted) {
+        // We don't want to include timestamps which have suffered from
+        // retransmission here, since we compensate with extra retransmission
+        // delay within the jitter estimate.
+        timing_->IncomingTimestamp(packet.timestamp, last_packet_time_ms);
+      }
+    }
     if (ret == kFlushIndicator) {
       return VCM_FLUSH_INDICATOR;
     } else if (ret < 0) {
@@ -175,155 +147,101 @@
     bool render_timing,
     VCMReceiver* dual_receiver) {
   TRACE_EVENT0("webrtc", "Recv::FrameForDecoding");
-  // No need to enter the critical section here since the jitter buffer
-  // is thread-safe.
-  FrameType incoming_frame_type = kVideoFrameDelta;
-  next_render_time_ms = -1;
   const int64_t start_time_ms = clock_->TimeInMilliseconds();
-  int64_t ret = jitter_buffer_.NextTimestamp(max_wait_time_ms,
-                                             &incoming_frame_type,
-                                             &next_render_time_ms);
-  if (ret < 0) {
-    // No timestamp in jitter buffer at the moment.
+  uint32_t frame_timestamp = 0;
+  // Exhaust wait time to get a complete frame for decoding.
+  bool found_frame = jitter_buffer_.NextCompleteTimestamp(
+      max_wait_time_ms, &frame_timestamp);
+
+  if (!found_frame) {
+    // Get an incomplete frame when enabled.
+    const bool dual_receiver_enabled_and_passive = (dual_receiver != NULL &&
+        dual_receiver->State() == kPassive &&
+        dual_receiver->NackMode() == kNack);
+    if (dual_receiver_enabled_and_passive &&
+        !jitter_buffer_.CompleteSequenceWithNextFrame()) {
+      // Jitter buffer state might get corrupt with this frame.
+      dual_receiver->CopyJitterBufferStateFromReceiver(*this);
+    }
+    found_frame = jitter_buffer_.NextMaybeIncompleteTimestamp(
+        &frame_timestamp);
+  }
+
+  if (!found_frame) {
     return NULL;
   }
-  const uint32_t time_stamp = static_cast<uint32_t>(ret);
 
-  // Update the timing.
+  // We have a frame - Set timing and render timestamp.
   timing_->SetRequiredDelay(jitter_buffer_.EstimatedJitterMs());
-  timing_->UpdateCurrentDelay(time_stamp);
-
-  const int32_t temp_wait_time = max_wait_time_ms -
-      static_cast<int32_t>(clock_->TimeInMilliseconds() - start_time_ms);
-  uint16_t new_max_wait_time = static_cast<uint16_t>(VCM_MAX(temp_wait_time,
-                                                             0));
-
-  VCMEncodedFrame* frame = NULL;
-
-  if (render_timing) {
-    frame = FrameForDecoding(new_max_wait_time, next_render_time_ms,
-                             dual_receiver);
-  } else {
-    frame = FrameForRendering(new_max_wait_time, next_render_time_ms,
-                              dual_receiver);
+  const int64_t now_ms = clock_->TimeInMilliseconds();
+  timing_->UpdateCurrentDelay(frame_timestamp);
+  next_render_time_ms = timing_->RenderTimeMs(frame_timestamp, now_ms);
+  // Check render timing.
+  bool timing_error = false;
+  // Assume that render timing errors are due to changes in the video stream.
+  if (next_render_time_ms < 0) {
+    timing_error = true;
+  } else if (next_render_time_ms < now_ms - max_video_delay_ms_) {
+    WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding,
+                 VCMId(vcm_id_, receiver_id_),
+                 "This frame should have been rendered more than %u ms ago."
+                 "Flushing jitter buffer and resetting timing.",
+                 max_video_delay_ms_);
+    timing_error = true;
+  } else if (static_cast<int>(timing_->TargetVideoDelay()) >
+             max_video_delay_ms_) {
+    WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding,
+                 VCMId(vcm_id_, receiver_id_),
+                 "More than %u ms target delay. Flushing jitter buffer and"
+                 "resetting timing.", max_video_delay_ms_);
+    timing_error = true;
   }
 
-  if (frame != NULL) {
+  if (timing_error) {
+    // Timing error => reset timing and flush the jitter buffer.
+    jitter_buffer_.Flush();
+    timing_->Reset(clock_->TimeInMilliseconds());
+    return NULL;
+  }
+
+  if (!render_timing) {
+    // Decode frame as close as possible to the render timestamp.
+    TRACE_EVENT0("webrtc", "FrameForRendering");
+    const int32_t available_wait_time = max_wait_time_ms -
+        static_cast<int32_t>(clock_->TimeInMilliseconds() - start_time_ms);
+    uint16_t new_max_wait_time = static_cast<uint16_t>(
+        VCM_MAX(available_wait_time, 0));
+    uint32_t wait_time_ms = timing_->MaxWaitingTime(
+        next_render_time_ms, clock_->TimeInMilliseconds());
+    if (new_max_wait_time < wait_time_ms) {
+      // We're not allowed to wait until the frame is supposed to be rendered,
+      // waiting as long as we're allowed to avoid busy looping, and then return
+      // NULL. Next call to this function might return the frame.
+      render_wait_event_->Wait(max_wait_time_ms);
+      return NULL;
+    }
+    // Wait until it's time to render.
+    render_wait_event_->Wait(wait_time_ms);
+  }
+
+  // Extract the frame from the jitter buffer and set the render time.
+  VCMEncodedFrame* frame = jitter_buffer_.ExtractAndSetDecode(frame_timestamp);
+  assert(frame);
+  frame->SetRenderTime(next_render_time_ms);
+  if (dual_receiver != NULL) {
+    dual_receiver->UpdateState(*frame);
+  }
+  if (!frame->Complete()) {
+    // Update stats for incomplete frames.
     bool retransmitted = false;
     const int64_t last_packet_time_ms =
-      jitter_buffer_.LastPacketTime(frame, &retransmitted);
+        jitter_buffer_.LastPacketTime(frame, &retransmitted);
     if (last_packet_time_ms >= 0 && !retransmitted) {
       // We don't want to include timestamps which have suffered from
       // retransmission here, since we compensate with extra retransmission
       // delay within the jitter estimate.
-      timing_->IncomingTimestamp(time_stamp, last_packet_time_ms);
+      timing_->IncomingTimestamp(frame_timestamp, last_packet_time_ms);
     }
-    if (dual_receiver != NULL) {
-      dual_receiver->UpdateState(*frame);
-    }
-  }
-  return frame;
-}
-
-VCMEncodedFrame* VCMReceiver::FrameForDecoding(
-    uint16_t max_wait_time_ms,
-    int64_t next_render_time_ms,
-    VCMReceiver* dual_receiver) {
-  TRACE_EVENT1("webrtc", "FrameForDecoding",
-               "max_wait", max_wait_time_ms);
-  // How long can we wait until we must decode the next frame.
-  uint32_t wait_time_ms = timing_->MaxWaitingTime(
-      next_render_time_ms, clock_->TimeInMilliseconds());
-
-  // Try to get a complete frame from the jitter buffer.
-  VCMEncodedFrame* frame = jitter_buffer_.GetCompleteFrameForDecoding(0);
-
-  if (frame == NULL && max_wait_time_ms == 0 && wait_time_ms > 0) {
-    // If we're not allowed to wait for frames to get complete we must
-    // calculate if it's time to decode, and if it's not we will just return
-    // for now.
-    return NULL;
-  }
-
-  if (frame == NULL && VCM_MIN(wait_time_ms, max_wait_time_ms) == 0) {
-    // No time to wait for a complete frame, check if we have an incomplete.
-    const bool dual_receiver_enabled_and_passive = (dual_receiver != NULL &&
-        dual_receiver->State() == kPassive &&
-        dual_receiver->NackMode() == kNack);
-    if (dual_receiver_enabled_and_passive &&
-        !jitter_buffer_.CompleteSequenceWithNextFrame()) {
-      // Jitter buffer state might get corrupt with this frame.
-      dual_receiver->CopyJitterBufferStateFromReceiver(*this);
-    }
-    frame = jitter_buffer_.MaybeGetIncompleteFrameForDecoding();
-  }
-  if (frame == NULL) {
-    // Wait for a complete frame.
-    frame = jitter_buffer_.GetCompleteFrameForDecoding(max_wait_time_ms);
-  }
-  if (frame == NULL) {
-    // Get an incomplete frame.
-    if (timing_->MaxWaitingTime(next_render_time_ms,
-                                clock_->TimeInMilliseconds()) > 0) {
-      // Still time to wait for a complete frame.
-      return NULL;
-    }
-
-    // No time left to wait, we must decode this frame now.
-    const bool dual_receiver_enabled_and_passive = (dual_receiver != NULL &&
-        dual_receiver->State() == kPassive &&
-        dual_receiver->NackMode() == kNack);
-    if (dual_receiver_enabled_and_passive &&
-        !jitter_buffer_.CompleteSequenceWithNextFrame()) {
-      // Jitter buffer state might get corrupt with this frame.
-      dual_receiver->CopyJitterBufferStateFromReceiver(*this);
-    }
-
-    frame = jitter_buffer_.MaybeGetIncompleteFrameForDecoding();
-  }
-  return frame;
-}
-
-VCMEncodedFrame* VCMReceiver::FrameForRendering(uint16_t max_wait_time_ms,
-                                                int64_t next_render_time_ms,
-                                                VCMReceiver* dual_receiver) {
-  TRACE_EVENT0("webrtc", "FrameForRendering");
-  // How long MUST we wait until we must decode the next frame. This is
-  // different for the case where we have a renderer which can render at a
-  // specified time. Here we must wait as long as possible before giving the
-  // frame to the decoder, which will render the frame as soon as it has been
-  // decoded.
-  uint32_t wait_time_ms = timing_->MaxWaitingTime(
-      next_render_time_ms, clock_->TimeInMilliseconds());
-  if (max_wait_time_ms < wait_time_ms) {
-    // If we're not allowed to wait until the frame is supposed to be rendered,
-    // waiting as long as we're allowed to avoid busy looping, and then return
-    // NULL. Next call to this function might return the frame.
-    render_wait_event_->Wait(max_wait_time_ms);
-    return NULL;
-  }
-  // Wait until it's time to render.
-  render_wait_event_->Wait(wait_time_ms);
-
-  // Get a complete frame if possible.
-  // Note: This might cause us to wait more than a total of |max_wait_time_ms|.
-  // This is necessary to avoid a possible busy loop if no complete frame
-  // has been received.
-  VCMEncodedFrame* frame = jitter_buffer_.GetCompleteFrameForDecoding(
-      max_wait_time_ms);
-
-  if (frame == NULL) {
-    // Get an incomplete frame.
-    const bool dual_receiver_enabled_and_passive = (dual_receiver != NULL &&
-        dual_receiver->State() == kPassive &&
-        dual_receiver->NackMode() == kNack);
-    if (dual_receiver_enabled_and_passive &&
-        !jitter_buffer_.CompleteSequenceWithNextFrame()) {
-      // Jitter buffer state might get corrupt with this frame.
-      dual_receiver->CopyJitterBufferStateFromReceiver(*this);
-    }
-
-    frame = jitter_buffer_.MaybeGetIncompleteFrameForDecoding();
   }
   return frame;
 }
@@ -430,7 +348,6 @@
   if (desired_delay_ms < 0 || desired_delay_ms > kMaxReceiverDelayMs) {
     return -1;
   }
-  // Enable a max filter on the jitter estimate for non-zero delays.
   jitter_buffer_.SetMaxJitterEstimate(desired_delay_ms > 0);
   max_video_delay_ms_ = desired_delay_ms + kMaxVideoDelayMs;
   // Initializing timing to the desired delay.
@@ -439,7 +356,21 @@
 }
 
 int VCMReceiver::RenderBufferSizeMs() {
-  return jitter_buffer_.RenderBufferSizeMs();
+  uint32_t timestamp_start = 0u;
+  uint32_t timestamp_end = 0u;
+  // Render timestamps are computed just prior to decoding. Therefore this is
+  // only an estimate based on frames' timestamps and current timing state.
+  jitter_buffer_.RenderBufferSize(&timestamp_start, &timestamp_end);
+  if (timestamp_start == timestamp_end) {
+    return 0;
+  }
+  // Update timing.
+  const int64_t now_ms = clock_->TimeInMilliseconds();
+  timing_->SetRequiredDelay(jitter_buffer_.EstimatedJitterMs());
+  // Get render timestamps.
+  uint32_t render_start = timing_->RenderTimeMs(timestamp_start, now_ms);
+  uint32_t render_end = timing_->RenderTimeMs(timestamp_end, now_ms);
+  return render_end - render_start;
 }
 
 void VCMReceiver::UpdateState(VCMReceiverState new_state) {
diff --git a/webrtc/modules/video_coding/main/source/receiver.h b/webrtc/modules/video_coding/main/source/receiver.h
index a62ae2f..b478f49 100644
--- a/webrtc/modules/video_coding/main/source/receiver.h
+++ b/webrtc/modules/video_coding/main/source/receiver.h
@@ -81,16 +81,11 @@
   bool DecodeWithErrors() const;
 
   // Returns size in time (milliseconds) of complete continuous frames in the
-  // jitter buffer.
+  // jitter buffer. The render time is estimated based on the render delay at
+  // the time this function is called.
   int RenderBufferSizeMs();
 
  private:
-  VCMEncodedFrame* FrameForDecoding(uint16_t max_wait_time_ms,
-                                    int64_t nextrender_time_ms,
-                                    VCMReceiver* dual_receiver);
-  VCMEncodedFrame* FrameForRendering(uint16_t max_wait_time_ms,
-                                     int64_t nextrender_time_ms,
-                                     VCMReceiver* dual_receiver);
   void CopyJitterBufferStateFromReceiver(const VCMReceiver& receiver);
   void UpdateState(VCMReceiverState new_state);
   void UpdateState(const VCMEncodedFrame& frame);
diff --git a/webrtc/modules/video_coding/main/source/receiver_unittest.cc b/webrtc/modules/video_coding/main/source/receiver_unittest.cc
index 5f5b09f..92f578c 100644
--- a/webrtc/modules/video_coding/main/source/receiver_unittest.cc
+++ b/webrtc/modules/video_coding/main/source/receiver_unittest.cc
@@ -115,6 +115,10 @@
   for (int i = 0; i < num_of_frames; ++i) {
     EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
   }
+  int64_t next_render_time_ms = 0;
+  VCMEncodedFrame* frame = receiver_.FrameForDecoding(10, next_render_time_ms);
+  EXPECT_TRUE(frame == NULL);
+  receiver_.ReleaseFrame(frame);
   EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
   for (int i = 0; i < num_of_frames; ++i) {
     EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
diff --git a/webrtc/modules/video_coding/main/source/stream_generator.cc b/webrtc/modules/video_coding/main/source/stream_generator.cc
index b4be4ab..a2da3e5 100644
--- a/webrtc/modules/video_coding/main/source/stream_generator.cc
+++ b/webrtc/modules/video_coding/main/source/stream_generator.cc
@@ -43,7 +43,7 @@
                                     int num_media_packets,
                                     int num_empty_packets,
                                     int64_t current_time) {
-  timestamp_ += 90 * (current_time - start_time_);
+  timestamp_ = 90 * (current_time - start_time_);
   // Move the sequence number counter if all packets from the previous frame
   // wasn't collected.
   sequence_number_ += packets_.size();
diff --git a/webrtc/modules/video_coding/main/source/timing.cc b/webrtc/modules/video_coding/main/source/timing.cc
index 6330913..5b2cd0c 100644
--- a/webrtc/modules/video_coding/main/source/timing.cc
+++ b/webrtc/modules/video_coding/main/source/timing.cc
@@ -34,8 +34,7 @@
 _minTotalDelayMs(0),
 _requiredDelayMs(0),
 _currentDelayMs(0),
-_prevFrameTimestamp(0),
-_maxVideoDelayMs(kMaxVideoDelayMs)
+_prevFrameTimestamp(0)
 {
     if (masterTiming == NULL)
     {
@@ -219,10 +218,6 @@
 {
     CriticalSectionScoped cs(_critSect);
     const int64_t renderTimeMs = RenderTimeMsInternal(frameTimestamp, nowMs);
-    if (renderTimeMs < 0)
-    {
-        return renderTimeMs;
-    }
     if (_master)
     {
         WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _timingId),
@@ -239,16 +234,6 @@
 {
     int64_t estimatedCompleteTimeMs =
             _tsExtrapolator->ExtrapolateLocalTime(frameTimestamp);
-    if (estimatedCompleteTimeMs - nowMs > _maxVideoDelayMs)
-    {
-        if (_master)
-        {
-            WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _timingId),
-                    "Timestamp arrived 2 seconds early, reset statistics",
-                    frameTimestamp, estimatedCompleteTimeMs);
-        }
-        return -1;
-    }
     if (_master)
     {
         WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _timingId),
@@ -315,12 +300,6 @@
     return static_cast<int32_t>(availableProcessingTimeMs) - maxDecodeTimeMs > 0;
 }
 
-void VCMTiming::SetMaxVideoDelay(int maxVideoDelayMs)
-{
-    CriticalSectionScoped cs(_critSect);
-    _maxVideoDelayMs = maxVideoDelayMs;
-}
-
 uint32_t
 VCMTiming::TargetVideoDelay() const
 {
diff --git a/webrtc/modules/video_coding/main/source/timing.h b/webrtc/modules/video_coding/main/source/timing.h
index ae0bd57..e100f7a 100644
--- a/webrtc/modules/video_coding/main/source/timing.h
+++ b/webrtc/modules/video_coding/main/source/timing.h
@@ -62,8 +62,8 @@
                                   int64_t startTimeMs,
                                   int64_t nowMs);
 
-    // Used to report that a frame is passed to decoding. Updates the timestamp filter
-    // which is used to map between timestamps and receiver system time.
+    // Used to report that a frame is passed to decoding. Updates the timestamp
+    // filter which is used to map between timestamps and receiver system time.
     void IncomingTimestamp(uint32_t timeStamp, int64_t lastPacketTimeMs);
 
     // Returns the receiver system time when the frame with timestamp frameTimestamp
@@ -82,16 +82,12 @@
     // certain amount of processing time.
     bool EnoughTimeToDecode(uint32_t availableProcessingTimeMs) const;
 
-    // Set the max allowed video delay.
-    void SetMaxVideoDelay(int maxVideoDelayMs);
-
     enum { kDefaultRenderDelayMs = 10 };
     enum { kDelayMaxChangeMsPerS = 100 };
 
 protected:
     int32_t MaxDecodeTimeMs(FrameType frameType = kVideoFrameDelta) const;
-    int64_t RenderTimeMsInternal(uint32_t frameTimestamp,
-                                       int64_t nowMs) const;
+    int64_t RenderTimeMsInternal(uint32_t frameTimestamp, int64_t nowMs) const;
     uint32_t TargetDelayInternal() const;
 
 private:
@@ -107,7 +103,6 @@
     uint32_t _requiredDelayMs;
     uint32_t _currentDelayMs;
     uint32_t _prevFrameTimestamp;
-    int _maxVideoDelayMs;
 };
 
 } // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/video_coding_robustness_unittest.cc b/webrtc/modules/video_coding/main/source/video_coding_robustness_unittest.cc
index b06e296..7c99c08 100644
--- a/webrtc/modules/video_coding/main/source/video_coding_robustness_unittest.cc
+++ b/webrtc/modules/video_coding/main/source/video_coding_robustness_unittest.cc
@@ -107,10 +107,12 @@
   InsertPacket(0, 0, true, false, kVideoFrameKey);
   InsertPacket(0, 1, false, false, kVideoFrameKey);
   InsertPacket(0, 2, false, true, kVideoFrameKey);
+  clock_->AdvanceTimeMilliseconds(1000 / 30);
 
   InsertPacket(3000, 3, true, false, kVideoFrameDelta);
   InsertPacket(3000, 4, false, false, kVideoFrameDelta);
   InsertPacket(3000, 5, false, true, kVideoFrameDelta);
+  clock_->AdvanceTimeMilliseconds(1000 / 30);
 
   ASSERT_EQ(VCM_OK, vcm_->Decode(0));
   ASSERT_EQ(VCM_OK, vcm_->Decode(0));
diff --git a/webrtc/modules/video_coding/main/source/video_coding_test.gypi b/webrtc/modules/video_coding/main/source/video_coding_test.gypi
index c3eed23..3a66987 100644
--- a/webrtc/modules/video_coding/main/source/video_coding_test.gypi
+++ b/webrtc/modules/video_coding/main/source/video_coding_test.gypi
@@ -52,7 +52,6 @@
         '../test/codec_database_test.cc',
         '../test/decode_from_storage_test.cc',
         '../test/generic_codec_test.cc',
-        '../test/jitter_buffer_test.cc',
         '../test/media_opt_test.cc',
         '../test/mt_test_common.cc',
         '../test/mt_rx_tx_test.cc',
diff --git a/webrtc/modules/video_coding/main/test/jitter_buffer_test.cc b/webrtc/modules/video_coding/main/test/jitter_buffer_test.cc
index d78b7e3..9edbd2a 100644
--- a/webrtc/modules/video_coding/main/test/jitter_buffer_test.cc
+++ b/webrtc/modules/video_coding/main/test/jitter_buffer_test.cc
@@ -90,6 +90,23 @@
     return 0;
 }
 
+VCMEncodedFrame* DecodeCompleteFrame(uint32_t max_wait_time_ms) {
+  uint32_t timestamp = 0;
+  bool found_frame = jb.NextCompleteTimestamp(max_wait_time_ms, &timestamp);
+  if (!found_frame)
+    return NULL;
+
+  return jb.ExtractAndSetDecode(timestamp);
+}
+
+VCMEncodedFrame* DecodeIncompleteFrame() {
+  uint32_t timestamp = 0;
+  bool found_frame =
+      jb.MaybeGetIncompleteFrameTimestampForDecoding(&timestamp);
+  if (!found_frame)
+    return NULL;
+  return frame = jb.ExtractAndSetDecode(timestamp);
+}
 
 int JitterBufferTest(CmdArgs& args)
 {
@@ -107,9 +124,7 @@
 
     seqNum = 1234;
     timeStamp = 123*90;
-    FrameType incomingFrameType(kVideoFrameKey);
     VCMEncodedFrame* frameOut=NULL;
-    int64_t renderTimeMs = 0;
     packet.timestamp = timeStamp;
     packet.seqNum = seqNum;
 
@@ -134,9 +149,8 @@
 
     // Not started
     TEST(0 == jb.GetFrame(packet));
-    TEST(-1 == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
-    TEST(0 == jb.GetCompleteFrameForDecoding(10));
-    TEST(0 == jb.MaybeGetIncompleteFrameForDecoding());
+    TEST(0 == DecodeCompleteFrame(10));
+    TEST(0 == DecodeIncompleteFrame());
 
     // Start
     jb.Start();
@@ -149,7 +163,7 @@
     TEST(frameIn != 0);
 
     // No packets inserted
-    TEST(0 == jb.GetCompleteFrameForDecoding(10));
+    TEST(0 == DecodeCompleteFrame(10));
 
 
     //
@@ -167,26 +181,20 @@
     // packet.isFirstPacket;
     // packet.markerBit;
     //
-    packet.frameType = kVideoFrameDelta;
+    packet.frameType = kVideoFrameKey;
     packet.isFirstPacket = true;
     packet.markerBit = true;
 
-    // Insert a packet into a frame
+    // Insert a packet into a frame.
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
-    // get packet notification
-    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
-
-    // check incoming frame type
-    TEST(incomingFrameType == kVideoFrameDelta);
-
-    // get the frame
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    // Get the frame (always starts with a key frame).
+    frameOut = DecodeCompleteFrame(10);
 
     TEST(CheckOutFrame(frameOut, size, false) == 0);
 
     // check the frame type
-    TEST(frameOut->FrameType() == kVideoFrameDelta);
+    TEST(frameOut->FrameType() == kVideoFrameKey);
 
     // Release frame (when done with decoding)
     jb.ReleaseFrame(frameOut);
@@ -215,14 +223,8 @@
     // Insert a packet into a frame
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
-    // get packet notification
-    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
-
-    // check incoming frame type
-    TEST(incomingFrameType == kVideoFrameDelta);
-
     // get the frame
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = DecodeCompleteFrame(10);
 
     // it should not be complete
     TEST(frameOut == 0);
@@ -239,7 +241,7 @@
     TEST(kCompleteSession == jb.InsertPacket(frameIn, packet));
 
     // get the frame
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = DecodeCompleteFrame(10);
 
     TEST(CheckOutFrame(frameOut, size*2, false) == 0);
 
@@ -274,14 +276,8 @@
     // Insert a packet into a frame
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
-    // get packet notification
-    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
-
-    // check incoming frame type
-    TEST(incomingFrameType == kVideoFrameKey);
-
     // get the frame
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = DecodeCompleteFrame(10);
 
     // it should not be complete
     TEST(frameOut == 0);
@@ -316,7 +312,7 @@
     TEST(kCompleteSession == jb.InsertPacket(frameIn, packet));
 
     // get the frame
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = DecodeCompleteFrame(10);
 
     TEST(CheckOutFrame(frameOut, size*100, false) == 0);
 
@@ -350,14 +346,8 @@
     // Insert a packet into a frame
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
-    // get packet notification
-    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
-
-    // check incoming frame type
-    TEST(incomingFrameType == kVideoFrameDelta);
-
     // get the frame
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = DecodeCompleteFrame(10);
 
     // it should not be complete
     TEST(frameOut == 0);
@@ -392,7 +382,7 @@
     TEST(kCompleteSession == jb.InsertPacket(frameIn, packet));
 
     // get the frame
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = DecodeCompleteFrame(10);
 
     TEST(CheckOutFrame(frameOut, size*100, false) == 0);
 
@@ -427,14 +417,8 @@
     // Insert a packet into a frame
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
-    // get packet notification
-    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
-
-    // check incoming frame type
-    TEST(incomingFrameType == kVideoFrameDelta);
-
     // get the frame
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = DecodeCompleteFrame(10);
 
     // it should not be complete
     TEST(frameOut == 0);
@@ -469,7 +453,7 @@
     TEST(kCompleteSession == jb.InsertPacket(frameIn, packet));
 
     // get the frame
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = DecodeCompleteFrame(10);
 
     TEST(CheckOutFrame(frameOut, size*100, false) == 0);
 
@@ -504,14 +488,8 @@
     // Insert a packet into a frame
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
-    // get packet notification
-    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
-
-    // check incoming frame type
-    TEST(incomingFrameType == kVideoFrameDelta);
-
     // get the frame
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = DecodeCompleteFrame(10);
 
     // it should not be complete
     TEST(frameOut == 0);
@@ -528,7 +506,7 @@
     TEST(kCompleteSession == jb.InsertPacket(frameIn, packet));
 
     // check that we fail to get frame since seqnum is not continuous
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = DecodeCompleteFrame(10);
     TEST(frameOut == 0);
 
     seqNum -= 3;
@@ -545,12 +523,6 @@
     // Insert a packet into a frame
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
-    // get packet notification
-    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
-
-    // check incoming frame type
-    TEST(incomingFrameType == kVideoFrameDelta);
-
     // get the frame
     frameOut = jb.GetCompleteFrameForDecoding(10);
 
@@ -569,7 +541,7 @@
     TEST(kCompleteSession == jb.InsertPacket(frameIn, packet));
 
     // get the frame
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = DecodeCompleteFrame(10);
 
     TEST(CheckOutFrame(frameOut, size*2, false) == 0);
 
@@ -580,7 +552,7 @@
     jb.ReleaseFrame(frameOut);
 
     // get the frame
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = DecodeCompleteFrame(10);
 
     TEST(CheckOutFrame(frameOut, size*2, false) == 0);
 
@@ -619,14 +591,8 @@
     // Insert a packet into a frame
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
-    // get packet notification
-    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
-
-    // check incoming frame type
-    TEST(incomingFrameType == kVideoFrameDelta);
-
     // get the frame
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = DecodeCompleteFrame(10);
 
     // it should not be complete
     TEST(frameOut == 0);
@@ -646,7 +612,7 @@
     TEST(kCompleteSession == jb.InsertPacket(frameIn, packet));
 
     // get the frame
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = DecodeCompleteFrame(10);
 
     TEST(CheckOutFrame(frameOut, size*2, false) == 0);
 
@@ -681,14 +647,8 @@
     // Insert a packet into a frame
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
-    // get packet notification
-    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
-
-    // check incoming frame type
-    TEST(incomingFrameType == kVideoFrameDelta);
-
     // get the frame
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = DecodeCompleteFrame(10);
 
     // it should not be complete
     TEST(frameOut == 0);
@@ -705,7 +665,7 @@
     TEST(kCompleteSession == jb.InsertPacket(frameIn, packet));
 
     // get the frame
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = DecodeCompleteFrame(10);
 
     TEST(CheckOutFrame(frameOut, size * 2 + 4 * 2, true) == 0);
 
@@ -763,22 +723,8 @@
       // Insert a packet into a frame
       TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
-      // Get packet notification
-      TEST(timeStamp - 33 * 90 == jb.NextTimestamp(10, &incomingFrameType,
-                                                   &renderTimeMs));
-
-      // Check incoming frame type
-      if (i == 0)
-      {
-          TEST(incomingFrameType == kVideoFrameKey);
-      }
-      else
-      {
-          TEST(incomingFrameType == frametype);
-      }
-
       // Get the frame
-      frameOut = jb.GetCompleteFrameForDecoding(10);
+      frameOut = DecodeCompleteFrame(10);
 
       // Should not be complete
       TEST(frameOut == 0);
@@ -811,7 +757,7 @@
       TEST(kIncomplete == jb.InsertPacket(frameIn, packet));
 
       // Get the frame
-      frameOut = jb.MaybeGetIncompleteFrameForDecoding();
+      frameOut = DecodeIncompleteFrame();
 
       // One of the packets has been discarded by the jitter buffer.
       // Last frame can't be extracted yet.
@@ -881,7 +827,7 @@
     // insert first packet
     timeStamp += 33*90;
     seqNum = 0xfff0;
-    packet.frameType = kVideoFrameDelta;
+    packet.frameType = kVideoFrameKey;
     packet.isFirstPacket = true;
     packet.markerBit = false;
     packet.seqNum = seqNum;
@@ -893,19 +839,13 @@
     // Insert a packet into a frame
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
-    // get packet notification
-    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
-
-    // check incoming frame type
-    TEST(incomingFrameType == kVideoFrameDelta);
-
     // get the frame
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = DecodeCompleteFrame(10);
 
     // it should not be complete
     TEST(frameOut == 0);
 
-    // insert 98 packets
+    // Insert 98 packets.
     loop = 0;
     do
     {
@@ -920,15 +860,8 @@
         // Insert a packet into a frame
         TEST(kIncomplete == jb.InsertPacket(frameIn, packet));
 
-        // get packet notification
-        TEST(timeStamp == jb.NextTimestamp(2, &incomingFrameType,
-                                           &renderTimeMs));
-
-        // check incoming frame type
-        TEST(incomingFrameType == kVideoFrameDelta);
-
         // get the frame
-        frameOut = jb.GetCompleteFrameForDecoding(2);
+        frameOut = DecodeCompleteFrame(2);
 
         // it should not be complete
         TEST(frameOut == 0);
@@ -949,12 +882,12 @@
     TEST(kCompleteSession == jb.InsertPacket(frameIn, packet));
 
     // get the frame
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = DecodeCompleteFrame(10);
 
     TEST(CheckOutFrame(frameOut, size*100, false) == 0);
 
     // check the frame type
-    TEST(frameOut->FrameType() == kVideoFrameDelta);
+    TEST(frameOut->FrameType() == kVideoFrameKey);
 
     // Release frame (when done with decoding)
     jb.ReleaseFrame(frameOut);
@@ -975,7 +908,7 @@
     // insert "first" packet last seqnum
     timeStamp += 33*90;
     seqNum = 10;
-    packet.frameType = kVideoFrameDelta;
+    packet.frameType = kVideoFrameKey;
     packet.isFirstPacket = false;
     packet.markerBit = true;
     packet.seqNum = seqNum;
@@ -987,14 +920,8 @@
     // Insert a packet into a frame
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
-    // get packet notification
-    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
-
-    // check incoming frame type
-    TEST(incomingFrameType == kVideoFrameDelta);
-
     // get the frame
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = DecodeIncompleteFrame();
 
     // it should not be complete
     TEST(frameOut == 0);
@@ -1014,15 +941,8 @@
         // Insert a packet into a frame
         TEST(kIncomplete == jb.InsertPacket(frameIn, packet));
 
-        // get packet notification
-        TEST(timeStamp == jb.NextTimestamp(2, &incomingFrameType,
-                                           &renderTimeMs));
-
-        // check incoming frame type
-        TEST(incomingFrameType == kVideoFrameDelta);
-
         // get the frame
-        frameOut = jb.GetCompleteFrameForDecoding(2);
+        frameOut = DecodeCompleteFrame(2);
 
         // it should not be complete
         TEST(frameOut == 0);
@@ -1043,7 +963,7 @@
     TEST(kCompleteSession == jb.InsertPacket(frameIn, packet));
 
     // get the frame
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = DecodeIncompleteFrame();
 
     TEST(CheckOutFrame(frameOut, size*100, false) == 0);
 
@@ -1068,7 +988,7 @@
     // insert "first" packet last seqnum
     timeStamp += 33*90;
     seqNum = 1;
-    packet.frameType = kVideoFrameDelta;
+    packet.frameType = kVideoFrameKey;
     packet.isFirstPacket = false;
     packet.markerBit = true;
     packet.seqNum = seqNum;
@@ -1080,14 +1000,8 @@
     // Insert a packet into a frame
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
-    // get packet notification
-    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
-
-    // check incoming frame type
-    TEST(incomingFrameType == kVideoFrameDelta);
-
     // get the frame
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = DecodeCompleteFrame(10);
 
     // it should not be complete
     TEST(frameOut == 0);
@@ -1104,14 +1018,8 @@
     // Insert a packet into a frame
     TEST(kIncomplete == jb.InsertPacket(frameIn, packet));
 
-    // get packet notification
-    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
-
-    // check incoming frame type
-    TEST(incomingFrameType == kVideoFrameDelta);
-
     // get the frame
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = DecodeIncompleteFrame();
 
     // it should not be complete
     TEST(frameOut == 0);
@@ -1128,7 +1036,7 @@
     TEST(kCompleteSession == jb.InsertPacket(frameIn, packet));
 
     // get the frame
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = DecodeIncompleteFrame();
 
     TEST(CheckOutFrame(frameOut, size*3, false) == 0);
 
@@ -1165,12 +1073,8 @@
     // Insert a packet into a frame
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
-    // get packet notification
-    TEST(3000 == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
-    TEST(kVideoFrameDelta == incomingFrameType);
-
     // Get the frame
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = DecodeCompleteFrame();
     TEST(3000 == frameOut->TimeStamp());
 
     TEST(CheckOutFrame(frameOut, size, false) == 0);
@@ -1219,12 +1123,8 @@
     // Insert a packet into a frame
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
-    // get packet notification
-    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
-    TEST(kVideoFrameDelta == incomingFrameType);
-
     // Get the frame
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = DecodeIncompleteFrame();
     TEST(timeStamp == frameOut->TimeStamp());
 
     TEST(CheckOutFrame(frameOut, size, false) == 0);
@@ -1270,14 +1170,8 @@
     // Insert a packet into a frame
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
-    // get packet notification
-    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
-
-    // check incoming frame type
-    TEST(incomingFrameType == kVideoFrameDelta);
-
     // get the frame
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = DecodeCompleteFrame(10);
 
     // it should not be complete
     TEST(frameOut == 0);
@@ -1293,7 +1187,7 @@
     // Insert a packet into a frame
     TEST(kCompleteSession == jb.InsertPacket(frameIn, packet));
 
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = DecodeIncompleteFrame();
 
     TEST(CheckOutFrame(frameOut, size*2, false) == 0);
 
@@ -1313,14 +1207,8 @@
     // Insert a packet into a frame
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
-    // get packet notification
-    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
-
-    // check incoming frame type
-    TEST(incomingFrameType == kVideoFrameDelta);
-
     // get the frame
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = DecodeCompleteFrame(10);
 
     // it should not be complete
     TEST(frameOut == 0);
@@ -1337,7 +1225,7 @@
     TEST(kCompleteSession == jb.InsertPacket(frameIn, packet));
 
     // get the frame
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = DecodeIncompleteFrame();
 
     TEST(CheckOutFrame(frameOut, size*2, false) == 0);
 
@@ -1373,10 +1261,6 @@
     // Insert first frame
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
-    // Get packet notification
-    TEST(0xffffff00 == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
-    TEST(kVideoFrameDelta == incomingFrameType);
-
     // Insert next frame
     seqNum++;
     timeStamp = 2700;
@@ -1392,12 +1276,8 @@
     // Insert a packet into a frame
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
-    // Get packet notification
-    TEST(0xffffff00 == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
-    TEST(kVideoFrameDelta == incomingFrameType);
-
     // Get frame
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = jb.GetFrameForDecoding();
     TEST(0xffffff00 == frameOut->TimeStamp());
 
     TEST(CheckOutFrame(frameOut, size, false) == 0);
@@ -1405,12 +1285,8 @@
     // check the frame type
     TEST(frameOut->FrameType() == kVideoFrameDelta);
 
-    // Get packet notification
-    TEST(2700 == jb.NextTimestamp(0, &incomingFrameType, &renderTimeMs));
-    TEST(kVideoFrameDelta == incomingFrameType);
-
     // Get frame
-    VCMEncodedFrame* frameOut2 = jb.GetCompleteFrameForDecoding(10);
+    VCMEncodedFrame* frameOut2 = DecodeIncompleteFrame();
     TEST(2700 == frameOut2->TimeStamp());
 
     TEST(CheckOutFrame(frameOut2, size, false) == 0);
@@ -1448,10 +1324,6 @@
     // Insert first frame
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
-    // Get packet notification
-    TEST(2700 == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
-    TEST(kVideoFrameDelta == incomingFrameType);
-
     // Insert second frame
     seqNum--;
     timeStamp = 0xffffff00;
@@ -1467,12 +1339,8 @@
     // Insert a packet into a frame
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
-    // Get packet notification
-    TEST(0xffffff00 == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
-    TEST(kVideoFrameDelta == incomingFrameType);
-
     // Get frame
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = jb.GetFrameForDecoding();
     TEST(0xffffff00 == frameOut->TimeStamp());
 
     TEST(CheckOutFrame(frameOut, size, false) == 0);
@@ -1480,12 +1348,8 @@
     // check the frame type
     TEST(frameOut->FrameType() == kVideoFrameDelta);
 
-    // get packet notification
-    TEST(2700 == jb.NextTimestamp(0, &incomingFrameType, &renderTimeMs));
-    TEST(kVideoFrameDelta == incomingFrameType);
-
     // Get frame
-    frameOut2 = jb.GetCompleteFrameForDecoding(10);
+    frameOut2 = DecodeIncompleteFrame();
     TEST(2700 == frameOut2->TimeStamp());
 
     TEST(CheckOutFrame(frameOut2, size, false) == 0);
@@ -1530,13 +1394,6 @@
             TEST(kIncomplete == jb.InsertPacket(frameIn, packet));
         }
 
-        // get packet notification
-        TEST(packet.timestamp == jb.NextTimestamp(10, &incomingFrameType,
-                                                  &renderTimeMs));
-
-        // check incoming frame type
-        TEST(incomingFrameType == kVideoFrameDelta);
-
         loop++;
     } while (loop < kMaxPacketsInSession);
 
@@ -1554,7 +1411,7 @@
     // Insert the packet -> frame recycled
     TEST(kSizeError == jb.InsertPacket(frameIn, packet));
 
-    TEST(0 == jb.GetCompleteFrameForDecoding(10));
+    TEST(0 == DecodeIncompleteFrame());
 
     //printf("DONE fill frame - packets > max number of packets\n");
 
@@ -1571,8 +1428,6 @@
 
     loop = 0;
     seqNum = 65485;
-    uint32_t timeStampStart = timeStamp +  33*90;
-    uint32_t timeStampFirstKey = 0;
     VCMEncodedFrame* ptrLastDeltaFrame = NULL;
     VCMEncodedFrame* ptrFirstKeyFrame = NULL;
     // insert MAX_NUMBER_OF_FRAMES frames
@@ -1596,19 +1451,11 @@
         {
             ptrFirstKeyFrame = frameIn;
             packet.frameType = kVideoFrameKey;
-            timeStampFirstKey = packet.timestamp;
         }
 
         // Insert frame
         TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
-        // Get packet notification, should be first inserted frame
-        TEST(timeStampStart == jb.NextTimestamp(10, &incomingFrameType,
-                                                &renderTimeMs));
-
-        // check incoming frame type
-        TEST(incomingFrameType == kVideoFrameDelta);
-
         loop++;
     } while (loop < kMaxNumberOfFrames);
 
@@ -1630,15 +1477,8 @@
     // Insert frame
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
-    // First inserted key frame should be oldest in buffer
-    TEST(timeStampFirstKey == jb.NextTimestamp(10, &incomingFrameType,
-                                               &renderTimeMs));
-
-    // check incoming frame type
-    TEST(incomingFrameType == kVideoFrameKey);
-
     // get the first key frame
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = jb.GetFrameForDecoding();
     TEST(ptrFirstKeyFrame == frameOut);
 
     TEST(CheckOutFrame(frameOut, size, false) == 0);
@@ -1744,9 +1584,6 @@
     frameIn = jb.GetFrame(packet);
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
-    // Get packet notification
-    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType,
-                                       &renderTimeMs));
     frameOut = jb.MaybeGetIncompleteFrameForDecoding();
 
     // We can decode everything from a NALU until a packet has been lost.
@@ -1863,7 +1700,7 @@
     TEST(kCompleteSession == jb.InsertPacket(frameIn, emptypacket));
 
     // get the frame
-    frameOut = jb.GetCompleteFrameForDecoding(10);
+    frameOut = jb.GetFrameForDecoding();
     // Only last NALU is complete
     TEST(CheckOutFrame(frameOut, packet.sizeBytes, false) == 0);
 
diff --git a/webrtc/modules/video_coding/main/test/tester_main.cc b/webrtc/modules/video_coding/main/test/tester_main.cc
index c928a80..3a5c4a9 100644
--- a/webrtc/modules/video_coding/main/test/tester_main.cc
+++ b/webrtc/modules/video_coding/main/test/tester_main.cc
@@ -98,7 +98,6 @@
       ret = NormalTest::RunTest(args);
       ret |= CodecDataBaseTest::RunTest(args);
       ret |= ReceiverTimingTests(args);
-      ret |= JitterBufferTest(args);
       break;
     case 1:
       ret = NormalTest::RunTest(args);
@@ -126,12 +125,9 @@
       ret = RtpPlayMT(args);
       break;
     case 9:
-      ret = JitterBufferTest(args);
-      break;
-    case 10:
       ret = DecodeFromStorageTest(args);
       break;
-    case 11:
+    case 10:
       qualityModeTest(args);
       break;
     default: