Add framerate to VideoSinkWants and ability to signal on overuse

In ViEEncoder, try to reduce framerate instead of resolution if the
current degradation preference is maintain-resolution rather than
balanced.

BUG=webrtc:4172

Review-Url: https://codereview.webrtc.org/2716643002
Cr-Commit-Position: refs/heads/master@{#17327}
diff --git a/webrtc/call/bitrate_estimator_tests.cc b/webrtc/call/bitrate_estimator_tests.cc
index bb4960f..24874eb7 100644
--- a/webrtc/call/bitrate_estimator_tests.cc
+++ b/webrtc/call/bitrate_estimator_tests.cc
@@ -172,7 +172,7 @@
           Clock::GetRealTimeClock()));
       send_stream_->SetSource(
           frame_generator_capturer_.get(),
-          VideoSendStream::DegradationPreference::kBalanced);
+          VideoSendStream::DegradationPreference::kMaintainFramerate);
       send_stream_->Start();
       frame_generator_capturer_->Start();
 
diff --git a/webrtc/call/call_perf_tests.cc b/webrtc/call/call_perf_tests.cc
index a421c24..d406750 100644
--- a/webrtc/call/call_perf_tests.cc
+++ b/webrtc/call/call_perf_tests.cc
@@ -494,14 +494,13 @@
       // First expect CPU overuse. Then expect CPU underuse when the encoder
       // delay has been decreased.
       if (wants.target_pixel_count &&
-          *wants.target_pixel_count <
-              wants.max_pixel_count.value_or(std::numeric_limits<int>::max())) {
+          *wants.target_pixel_count < wants.max_pixel_count) {
         // On adapting up, ViEEncoder::VideoSourceProxy will set the target
         // pixel count to a step up from the current and the max value to
         // something higher than the target.
         EXPECT_FALSE(expect_lower_resolution_wants_);
         observation_complete_.Set();
-      } else if (wants.max_pixel_count) {
+      } else if (wants.max_pixel_count < std::numeric_limits<int>::max()) {
         // On adapting down, ViEEncoder::VideoSourceProxy will set only the max
         // pixel count, leaving the target unset.
         EXPECT_TRUE(expect_lower_resolution_wants_);
diff --git a/webrtc/media/base/adaptedvideotracksource.cc b/webrtc/media/base/adaptedvideotracksource.cc
index 236c4a5..7a3e9f2 100644
--- a/webrtc/media/base/adaptedvideotracksource.cc
+++ b/webrtc/media/base/adaptedvideotracksource.cc
@@ -81,8 +81,8 @@
 void AdaptedVideoTrackSource::OnSinkWantsChanged(
     const rtc::VideoSinkWants& wants) {
   RTC_DCHECK(thread_checker_.CalledOnValidThread());
-  video_adapter_.OnResolutionRequest(wants.target_pixel_count,
-                                     wants.max_pixel_count);
+  video_adapter_.OnResolutionFramerateRequest(
+      wants.target_pixel_count, wants.max_pixel_count, wants.max_framerate_fps);
 }
 
 bool AdaptedVideoTrackSource::AdaptFrame(int width,
diff --git a/webrtc/media/base/fakevideocapturer.h b/webrtc/media/base/fakevideocapturer.h
index b5bfd34..34f9dbf 100644
--- a/webrtc/media/base/fakevideocapturer.h
+++ b/webrtc/media/base/fakevideocapturer.h
@@ -66,8 +66,9 @@
                               GetCaptureFormat()->fourcc);
   }
   bool CaptureCustomFrame(int width, int height, uint32_t fourcc) {
-    // default to 30fps
-    return CaptureCustomFrame(width, height, 33333333, fourcc);
+    // Default to 30fps.
+    return CaptureCustomFrame(width, height, rtc::kNumNanosecsPerSec / 30,
+                              fourcc);
   }
   bool CaptureCustomFrame(int width,
                           int height,
@@ -92,8 +93,11 @@
     // AdaptFrame, and the test case
     // VideoCapturerTest.SinkWantsMaxPixelAndMaxPixelCountStepUp
     // depends on this.
-    if (AdaptFrame(width, height, 0, 0, &adapted_width, &adapted_height,
-                   &crop_width, &crop_height, &crop_x, &crop_y, nullptr)) {
+    if (AdaptFrame(width, height,
+                   next_timestamp_ / rtc::kNumNanosecsPerMicrosec,
+                   next_timestamp_ / rtc::kNumNanosecsPerMicrosec,
+                   &adapted_width, &adapted_height, &crop_width, &crop_height,
+                   &crop_x, &crop_y, nullptr)) {
       rtc::scoped_refptr<webrtc::I420Buffer> buffer(
           webrtc::I420Buffer::Create(adapted_width, adapted_height));
       buffer->InitializeData();
diff --git a/webrtc/media/base/fakevideorenderer.h b/webrtc/media/base/fakevideorenderer.h
index 7255c05..f08e213 100644
--- a/webrtc/media/base/fakevideorenderer.h
+++ b/webrtc/media/base/fakevideorenderer.h
@@ -12,6 +12,7 @@
 #define WEBRTC_MEDIA_BASE_FAKEVIDEORENDERER_H_
 
 #include "webrtc/api/video/video_frame.h"
+#include "webrtc/base/criticalsection.h"
 #include "webrtc/base/logging.h"
 #include "webrtc/media/base/videosinkinterface.h"
 
diff --git a/webrtc/media/base/videoadapter.cc b/webrtc/media/base/videoadapter.cc
index 522d175..bee2a57 100644
--- a/webrtc/media/base/videoadapter.cc
+++ b/webrtc/media/base/videoadapter.cc
@@ -106,7 +106,8 @@
       previous_height_(0),
       required_resolution_alignment_(required_resolution_alignment),
       resolution_request_target_pixel_count_(std::numeric_limits<int>::max()),
-      resolution_request_max_pixel_count_(std::numeric_limits<int>::max()) {}
+      resolution_request_max_pixel_count_(std::numeric_limits<int>::max()),
+      max_framerate_request_(std::numeric_limits<int>::max()) {}
 
 VideoAdapter::VideoAdapter() : VideoAdapter(1) {}
 
@@ -114,21 +115,34 @@
 
 bool VideoAdapter::KeepFrame(int64_t in_timestamp_ns) {
   rtc::CritScope cs(&critical_section_);
-  if (!requested_format_ || requested_format_->interval == 0)
+  if (max_framerate_request_ <= 0)
+    return false;
+
+  int64_t frame_interval_ns =
+      requested_format_ ? requested_format_->interval : 0;
+
+  // If |max_framerate_request_| is not set, it will default to maxint, which
+  // will lead to a frame_interval_ns rounded to 0.
+  frame_interval_ns = std::max<int64_t>(
+      frame_interval_ns, rtc::kNumNanosecsPerSec / max_framerate_request_);
+
+  if (frame_interval_ns <= 0) {
+    // Frame rate throttling not enabled.
     return true;
+  }
 
   if (next_frame_timestamp_ns_) {
     // Time until next frame should be outputted.
     const int64_t time_until_next_frame_ns =
         (*next_frame_timestamp_ns_ - in_timestamp_ns);
 
-    // Continue if timestamp is withing expected range.
-    if (std::abs(time_until_next_frame_ns) < 2 * requested_format_->interval) {
+    // Continue if timestamp is within expected range.
+    if (std::abs(time_until_next_frame_ns) < 2 * frame_interval_ns) {
       // Drop if a frame shouldn't be outputted yet.
       if (time_until_next_frame_ns > 0)
         return false;
       // Time to output new frame.
-      *next_frame_timestamp_ns_ += requested_format_->interval;
+      *next_frame_timestamp_ns_ += frame_interval_ns;
       return true;
     }
   }
@@ -137,7 +151,7 @@
   // reset. Set first timestamp target to just half the interval to prefer
   // keeping frames in case of jitter.
   next_frame_timestamp_ns_ =
-      rtc::Optional<int64_t>(in_timestamp_ns + requested_format_->interval / 2);
+      rtc::Optional<int64_t>(in_timestamp_ns + frame_interval_ns / 2);
   return true;
 }
 
@@ -249,14 +263,15 @@
   next_frame_timestamp_ns_ = rtc::Optional<int64_t>();
 }
 
-void VideoAdapter::OnResolutionRequest(
+void VideoAdapter::OnResolutionFramerateRequest(
     const rtc::Optional<int>& target_pixel_count,
-    const rtc::Optional<int>& max_pixel_count) {
+    int max_pixel_count,
+    int max_framerate_fps) {
   rtc::CritScope cs(&critical_section_);
-  resolution_request_max_pixel_count_ =
-      max_pixel_count.value_or(std::numeric_limits<int>::max());
+  resolution_request_max_pixel_count_ = max_pixel_count;
   resolution_request_target_pixel_count_ =
       target_pixel_count.value_or(resolution_request_max_pixel_count_);
+  max_framerate_request_ = max_framerate_fps;
 }
 
 }  // namespace cricket
diff --git a/webrtc/media/base/videoadapter.h b/webrtc/media/base/videoadapter.h
index caaab3c..eb2257b 100644
--- a/webrtc/media/base/videoadapter.h
+++ b/webrtc/media/base/videoadapter.h
@@ -25,7 +25,9 @@
 class VideoAdapter {
  public:
   VideoAdapter();
-  VideoAdapter(int required_resolution_alignment);
+  // The output frames will have height and width that is divisible by
+  // |required_resolution_alignment|.
+  explicit VideoAdapter(int required_resolution_alignment);
   virtual ~VideoAdapter();
 
   // Return the adapted resolution and cropping parameters given the
@@ -49,12 +51,16 @@
   void OnOutputFormatRequest(const VideoFormat& format);
 
   // Requests the output frame size from |AdaptFrameResolution| to have as close
-  // as possible to |target_pixel_count|, but no more than |max_pixel_count|
-  // pixels. If |target_pixel_count| is not set, treat it as being equal to
-  // |max_pixel_count|. If |max_pixel_count| is not set, treat is as being the
-  // highest resolution available.
-  void OnResolutionRequest(const rtc::Optional<int>& target_pixel_count,
-                           const rtc::Optional<int>& max_pixel_count);
+  // as possible to |target_pixel_count| pixels (if set) but no more than
+  // |max_pixel_count|.
+  // |max_framerate_fps| is essentially analogous to |max_pixel_count|, but for
+  // framerate rather than resolution.
+  // Set |max_pixel_count| and/or |max_framerate_fps| to
+  // std::numeric_limit<int>::max() if no upper limit is desired.
+  void OnResolutionFramerateRequest(
+      const rtc::Optional<int>& target_pixel_count,
+      int max_pixel_count,
+      int max_framerate_fps);
 
  private:
   // Determine if frame should be dropped based on input fps and requested fps.
@@ -77,6 +83,7 @@
   rtc::Optional<VideoFormat> requested_format_ GUARDED_BY(critical_section_);
   int resolution_request_target_pixel_count_ GUARDED_BY(critical_section_);
   int resolution_request_max_pixel_count_ GUARDED_BY(critical_section_);
+  int max_framerate_request_ GUARDED_BY(critical_section_);
 
   // The critical section to protect the above variables.
   rtc::CriticalSection critical_section_;
diff --git a/webrtc/media/base/videoadapter_unittest.cc b/webrtc/media/base/videoadapter_unittest.cc
index 6ec90d8..cd9df98 100644
--- a/webrtc/media/base/videoadapter_unittest.cc
+++ b/webrtc/media/base/videoadapter_unittest.cc
@@ -22,13 +22,16 @@
 #include "webrtc/media/base/videoadapter.h"
 
 namespace cricket {
+namespace {
+const int kDefaultFps = 30;
+}  // namespace
 
 class VideoAdapterTest : public testing::Test {
  public:
   virtual void SetUp() {
     capturer_.reset(new FakeVideoCapturer);
     capture_format_ = capturer_->GetSupportedFormats()->at(0);
-    capture_format_.interval = VideoFormat::FpsToInterval(30);
+    capture_format_.interval = VideoFormat::FpsToInterval(kDefaultFps);
 
     listener_.reset(new VideoCapturerListener(&adapter_));
     capturer_->AddOrUpdateSink(listener_.get(), rtc::VideoSinkWants());
@@ -290,7 +293,7 @@
 // the adapter is conservative and resets to the new offset and does not drop
 // any frame.
 TEST_F(VideoAdapterTest, AdaptFramerateTimestampOffset) {
-  const int64_t capture_interval = VideoFormat::FpsToInterval(30);
+  const int64_t capture_interval = VideoFormat::FpsToInterval(kDefaultFps);
   adapter_.OnOutputFormatRequest(
       VideoFormat(640, 480, capture_interval, cricket::FOURCC_ANY));
 
@@ -319,7 +322,7 @@
 
 // Request 30 fps and send 30 fps with jitter. Expect that no frame is dropped.
 TEST_F(VideoAdapterTest, AdaptFramerateTimestampJitter) {
-  const int64_t capture_interval = VideoFormat::FpsToInterval(30);
+  const int64_t capture_interval = VideoFormat::FpsToInterval(kDefaultFps);
   adapter_.OnOutputFormatRequest(
       VideoFormat(640, 480, capture_interval, cricket::FOURCC_ANY));
 
@@ -384,6 +387,56 @@
   EXPECT_GT(listener_->GetStats().dropped_frames, 0);
 }
 
+// Do not adapt the frame rate or the resolution. Expect no frame drop, no
+// cropping, and no resolution change.
+TEST_F(VideoAdapterTest, OnFramerateRequestMax) {
+  adapter_.OnResolutionFramerateRequest(rtc::Optional<int>(),
+                                        std::numeric_limits<int>::max(),
+                                        std::numeric_limits<int>::max());
+
+  EXPECT_EQ(CS_RUNNING, capturer_->Start(capture_format_));
+  for (int i = 0; i < 10; ++i)
+    capturer_->CaptureFrame();
+
+  // Verify no frame drop and no resolution change.
+  VideoCapturerListener::Stats stats = listener_->GetStats();
+  EXPECT_GE(stats.captured_frames, 10);
+  EXPECT_EQ(0, stats.dropped_frames);
+  VerifyAdaptedResolution(stats, capture_format_.width, capture_format_.height,
+                          capture_format_.width, capture_format_.height);
+  EXPECT_TRUE(stats.last_adapt_was_no_op);
+}
+
+TEST_F(VideoAdapterTest, OnFramerateRequestZero) {
+  adapter_.OnResolutionFramerateRequest(rtc::Optional<int>(),
+                                        std::numeric_limits<int>::max(), 0);
+  EXPECT_EQ(CS_RUNNING, capturer_->Start(capture_format_));
+  for (int i = 0; i < 10; ++i)
+    capturer_->CaptureFrame();
+
+  // Verify no crash and that frames aren't dropped.
+  VideoCapturerListener::Stats stats = listener_->GetStats();
+  EXPECT_GE(stats.captured_frames, 10);
+  EXPECT_EQ(10, stats.dropped_frames);
+}
+
+// Adapt the frame rate to be half of the capture rate at the beginning. Expect
+// the number of dropped frames to be half of the number the captured frames.
+TEST_F(VideoAdapterTest, OnFramerateRequestHalf) {
+  adapter_.OnResolutionFramerateRequest(
+      rtc::Optional<int>(), std::numeric_limits<int>::max(), kDefaultFps / 2);
+  EXPECT_EQ(CS_RUNNING, capturer_->Start(capture_format_));
+  for (int i = 0; i < 10; ++i)
+    capturer_->CaptureFrame();
+
+  // Verify no crash and that frames aren't dropped.
+  VideoCapturerListener::Stats stats = listener_->GetStats();
+  EXPECT_GE(stats.captured_frames, 10);
+  EXPECT_EQ(5, stats.dropped_frames);
+  VerifyAdaptedResolution(stats, capture_format_.width, capture_format_.height,
+                          capture_format_.width, capture_format_.height);
+}
+
 // Set a very high output pixel resolution. Expect no cropping or resolution
 // change.
 TEST_F(VideoAdapterTest, AdaptFrameResolutionHighLimit) {
@@ -696,8 +749,8 @@
   EXPECT_EQ(720, out_height_);
 
   // Adapt down one step.
-  adapter_.OnResolutionRequest(rtc::Optional<int>(),
-                               rtc::Optional<int>(1280 * 720 - 1));
+  adapter_.OnResolutionFramerateRequest(rtc::Optional<int>(), 1280 * 720 - 1,
+                                        std::numeric_limits<int>::max());
   EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0,
                                             &cropped_width_, &cropped_height_,
                                             &out_width_, &out_height_));
@@ -707,8 +760,8 @@
   EXPECT_EQ(540, out_height_);
 
   // Adapt down one step more.
-  adapter_.OnResolutionRequest(rtc::Optional<int>(),
-                               rtc::Optional<int>(960 * 540 - 1));
+  adapter_.OnResolutionFramerateRequest(rtc::Optional<int>(), 960 * 540 - 1,
+                                        std::numeric_limits<int>::max());
   EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0,
                                             &cropped_width_, &cropped_height_,
                                             &out_width_, &out_height_));
@@ -718,8 +771,8 @@
   EXPECT_EQ(360, out_height_);
 
   // Adapt down one step more.
-  adapter_.OnResolutionRequest(rtc::Optional<int>(),
-                               rtc::Optional<int>(640 * 360 - 1));
+  adapter_.OnResolutionFramerateRequest(rtc::Optional<int>(), 640 * 360 - 1,
+                                        std::numeric_limits<int>::max());
   EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0,
                                             &cropped_width_, &cropped_height_,
                                             &out_width_, &out_height_));
@@ -729,8 +782,9 @@
   EXPECT_EQ(270, out_height_);
 
   // Adapt up one step.
-  adapter_.OnResolutionRequest(rtc::Optional<int>(640 * 360),
-                               rtc::Optional<int>(960 * 540));
+  adapter_.OnResolutionFramerateRequest(rtc::Optional<int>(640 * 360),
+                                        960 * 540,
+                                        std::numeric_limits<int>::max());
   EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0,
                                             &cropped_width_, &cropped_height_,
                                             &out_width_, &out_height_));
@@ -740,8 +794,9 @@
   EXPECT_EQ(360, out_height_);
 
   // Adapt up one step more.
-  adapter_.OnResolutionRequest(rtc::Optional<int>(960 * 540),
-                               rtc::Optional<int>(1280 * 720));
+  adapter_.OnResolutionFramerateRequest(rtc::Optional<int>(960 * 540),
+                                        1280 * 720,
+                                        std::numeric_limits<int>::max());
   EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0,
                                             &cropped_width_, &cropped_height_,
                                             &out_width_, &out_height_));
@@ -751,8 +806,9 @@
   EXPECT_EQ(540, out_height_);
 
   // Adapt up one step more.
-  adapter_.OnResolutionRequest(rtc::Optional<int>(1280 * 720),
-                               rtc::Optional<int>(1920 * 1080));
+  adapter_.OnResolutionFramerateRequest(rtc::Optional<int>(1280 * 720),
+                                        1920 * 1080,
+                                        std::numeric_limits<int>::max());
   EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0,
                                             &cropped_width_, &cropped_height_,
                                             &out_width_, &out_height_));
@@ -771,7 +827,8 @@
   EXPECT_EQ(1280, out_width_);
   EXPECT_EQ(720, out_height_);
 
-  adapter_.OnResolutionRequest(rtc::Optional<int>(), rtc::Optional<int>(0));
+  adapter_.OnResolutionFramerateRequest(rtc::Optional<int>(), 0,
+                                        std::numeric_limits<int>::max());
   EXPECT_FALSE(adapter_.AdaptFrameResolution(1280, 720, 0,
                                              &cropped_width_, &cropped_height_,
                                              &out_width_, &out_height_));
@@ -779,8 +836,8 @@
 
 TEST_F(VideoAdapterTest, TestOnResolutionRequestInLargeSteps) {
   // Large step down.
-  adapter_.OnResolutionRequest(rtc::Optional<int>(),
-                               rtc::Optional<int>(640 * 360 - 1));
+  adapter_.OnResolutionFramerateRequest(rtc::Optional<int>(), 640 * 360 - 1,
+                                        std::numeric_limits<int>::max());
   EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0,
                                             &cropped_width_, &cropped_height_,
                                             &out_width_, &out_height_));
@@ -790,8 +847,9 @@
   EXPECT_EQ(270, out_height_);
 
   // Large step up.
-  adapter_.OnResolutionRequest(rtc::Optional<int>(1280 * 720),
-                               rtc::Optional<int>(1920 * 1080));
+  adapter_.OnResolutionFramerateRequest(rtc::Optional<int>(1280 * 720),
+                                        1920 * 1080,
+                                        std::numeric_limits<int>::max());
   EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0,
                                             &cropped_width_, &cropped_height_,
                                             &out_width_, &out_height_));
@@ -802,8 +860,8 @@
 }
 
 TEST_F(VideoAdapterTest, TestOnOutputFormatRequestCapsMaxResolution) {
-  adapter_.OnResolutionRequest(rtc::Optional<int>(),
-                               rtc::Optional<int>(640 * 360 - 1));
+  adapter_.OnResolutionFramerateRequest(rtc::Optional<int>(), 640 * 360 - 1,
+                                        std::numeric_limits<int>::max());
   EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0,
                                             &cropped_width_, &cropped_height_,
                                             &out_width_, &out_height_));
@@ -822,8 +880,8 @@
   EXPECT_EQ(480, out_width_);
   EXPECT_EQ(270, out_height_);
 
-  adapter_.OnResolutionRequest(rtc::Optional<int>(),
-                               rtc::Optional<int>(960 * 720));
+  adapter_.OnResolutionFramerateRequest(rtc::Optional<int>(), 960 * 720,
+                                        std::numeric_limits<int>::max());
   EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0,
                                             &cropped_width_, &cropped_height_,
                                             &out_width_, &out_height_));
@@ -842,8 +900,8 @@
   EXPECT_EQ(1280, out_width_);
   EXPECT_EQ(720, out_height_);
 
-  adapter_.OnResolutionRequest(rtc::Optional<int>(),
-                               rtc::Optional<int>(640 * 360 - 1));
+  adapter_.OnResolutionFramerateRequest(rtc::Optional<int>(), 640 * 360 - 1,
+                                        std::numeric_limits<int>::max());
   EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0,
                                             &cropped_width_, &cropped_height_,
                                             &out_width_, &out_height_));
@@ -852,7 +910,9 @@
   EXPECT_EQ(480, out_width_);
   EXPECT_EQ(270, out_height_);
 
-  adapter_.OnResolutionRequest(rtc::Optional<int>(), rtc::Optional<int>());
+  adapter_.OnResolutionFramerateRequest(rtc::Optional<int>(),
+                                        std::numeric_limits<int>::max(),
+                                        std::numeric_limits<int>::max());
   EXPECT_TRUE(adapter_.AdaptFrameResolution(1280, 720, 0,
                                             &cropped_width_, &cropped_height_,
                                             &out_width_, &out_height_));
@@ -876,8 +936,8 @@
   EXPECT_EQ(360, out_height_);
 
   // Adapt down one step.
-  adapter_.OnResolutionRequest(rtc::Optional<int>(),
-                               rtc::Optional<int>(640 * 360 - 1));
+  adapter_.OnResolutionFramerateRequest(rtc::Optional<int>(), 640 * 360 - 1,
+                                        std::numeric_limits<int>::max());
   // Expect cropping to 16:9 format and 3/4 scaling.
   EXPECT_TRUE(adapter_.AdaptFrameResolution(640, 480, 0,
                                             &cropped_width_, &cropped_height_,
@@ -888,8 +948,8 @@
   EXPECT_EQ(270, out_height_);
 
   // Adapt down one step more.
-  adapter_.OnResolutionRequest(rtc::Optional<int>(),
-                               rtc::Optional<int>(480 * 270 - 1));
+  adapter_.OnResolutionFramerateRequest(rtc::Optional<int>(), 480 * 270 - 1,
+                                        std::numeric_limits<int>::max());
   // Expect cropping to 16:9 format and 1/2 scaling.
   EXPECT_TRUE(adapter_.AdaptFrameResolution(640, 480, 0,
                                             &cropped_width_, &cropped_height_,
@@ -900,8 +960,9 @@
   EXPECT_EQ(180, out_height_);
 
   // Adapt up one step.
-  adapter_.OnResolutionRequest(rtc::Optional<int>(480 * 270),
-                               rtc::Optional<int>(640 * 360));
+  adapter_.OnResolutionFramerateRequest(rtc::Optional<int>(480 * 270),
+                                        640 * 360,
+                                        std::numeric_limits<int>::max());
   // Expect cropping to 16:9 format and 3/4 scaling.
   EXPECT_TRUE(adapter_.AdaptFrameResolution(640, 480, 0,
                                             &cropped_width_, &cropped_height_,
@@ -912,8 +973,9 @@
   EXPECT_EQ(270, out_height_);
 
   // Adapt up one step more.
-  adapter_.OnResolutionRequest(rtc::Optional<int>(640 * 360),
-                               rtc::Optional<int>(960 * 540));
+  adapter_.OnResolutionFramerateRequest(rtc::Optional<int>(640 * 360),
+                                        960 * 540,
+                                        std::numeric_limits<int>::max());
   // Expect cropping to 16:9 format and no scaling.
   EXPECT_TRUE(adapter_.AdaptFrameResolution(640, 480, 0,
                                             &cropped_width_, &cropped_height_,
@@ -924,8 +986,9 @@
   EXPECT_EQ(360, out_height_);
 
   // Try to adapt up one step more.
-  adapter_.OnResolutionRequest(rtc::Optional<int>(960 * 540),
-                               rtc::Optional<int>(1280 * 720));
+  adapter_.OnResolutionFramerateRequest(rtc::Optional<int>(960 * 540),
+                                        1280 * 720,
+                                        std::numeric_limits<int>::max());
   // Expect cropping to 16:9 format and no scaling.
   EXPECT_TRUE(adapter_.AdaptFrameResolution(640, 480, 0,
                                             &cropped_width_, &cropped_height_,
@@ -940,8 +1003,9 @@
   // Ask for 640x360 (16:9 aspect), with 3/16 scaling.
   adapter_.OnOutputFormatRequest(
       VideoFormat(640, 360, 0, FOURCC_I420));
-  adapter_.OnResolutionRequest(rtc::Optional<int>(),
-                               rtc::Optional<int>(640 * 360 * 3 / 16 * 3 / 16));
+  adapter_.OnResolutionFramerateRequest(rtc::Optional<int>(),
+                                        640 * 360 * 3 / 16 * 3 / 16,
+                                        std::numeric_limits<int>::max());
 
   // Send 640x480 (4:3 aspect).
   EXPECT_TRUE(adapter_.AdaptFrameResolution(640, 480, 0,
@@ -961,8 +1025,9 @@
   const int w = 1920;
   const int h = 1080;
   adapter_.OnOutputFormatRequest(VideoFormat(w, h, 0, FOURCC_I420));
-  adapter_.OnResolutionRequest(rtc::Optional<int>(),
-                               rtc::Optional<int>(w * h * 1 / 16 * 1 / 16));
+  adapter_.OnResolutionFramerateRequest(rtc::Optional<int>(),
+                                        w * h * 1 / 16 * 1 / 16,
+                                        std::numeric_limits<int>::max());
 
   // Send 1920x1080 (16:9 aspect).
   EXPECT_TRUE(adapter_.AdaptFrameResolution(
@@ -976,8 +1041,9 @@
   EXPECT_EQ(67, out_height_);
 
   // Adapt back up one step to 3/32.
-  adapter_.OnResolutionRequest(rtc::Optional<int>(w * h * 3 / 32 * 3 / 32),
-                               rtc::Optional<int>(w * h * 1 / 8 * 1 / 8));
+  adapter_.OnResolutionFramerateRequest(
+      rtc::Optional<int>(w * h * 3 / 32 * 3 / 32), w * h * 1 / 8 * 1 / 8,
+      std::numeric_limits<int>::max());
 
   // Send 1920x1080 (16:9 aspect).
   EXPECT_TRUE(adapter_.AdaptFrameResolution(
@@ -997,8 +1063,9 @@
       &cropped_width_, &cropped_height_,
       &out_width_, &out_height_));
 
-  adapter_.OnResolutionRequest(rtc::Optional<int>(960 * 540),
-                               rtc::Optional<int>());
+  adapter_.OnResolutionFramerateRequest(rtc::Optional<int>(960 * 540),
+                                        std::numeric_limits<int>::max(),
+                                        std::numeric_limits<int>::max());
 
   // Still expect all frames to be dropped
   EXPECT_FALSE(adapter_.AdaptFrameResolution(
@@ -1006,8 +1073,8 @@
       &cropped_width_, &cropped_height_,
       &out_width_, &out_height_));
 
-  adapter_.OnResolutionRequest(rtc::Optional<int>(),
-                               rtc::Optional<int>(640 * 480 - 1));
+  adapter_.OnResolutionFramerateRequest(rtc::Optional<int>(), 640 * 480 - 1,
+                                        std::numeric_limits<int>::max());
 
   // Still expect all frames to be dropped
   EXPECT_FALSE(adapter_.AdaptFrameResolution(
@@ -1019,8 +1086,9 @@
 // Test that we will adapt to max given a target pixel count close to max.
 TEST_F(VideoAdapterTest, TestAdaptToMax) {
   adapter_.OnOutputFormatRequest(VideoFormat(640, 360, 0, FOURCC_I420));
-  adapter_.OnResolutionRequest(rtc::Optional<int>(640 * 360 - 1) /* target */,
-                               rtc::Optional<int>());
+  adapter_.OnResolutionFramerateRequest(
+      rtc::Optional<int>(640 * 360 - 1) /* target */,
+      std::numeric_limits<int>::max(), std::numeric_limits<int>::max());
 
   EXPECT_TRUE(adapter_.AdaptFrameResolution(640, 360, 0, &cropped_width_,
                                             &cropped_height_, &out_width_,
@@ -1028,5 +1096,4 @@
   EXPECT_EQ(640, out_width_);
   EXPECT_EQ(360, out_height_);
 }
-
 }  // namespace cricket
diff --git a/webrtc/media/base/videobroadcaster.cc b/webrtc/media/base/videobroadcaster.cc
index 5d0edeb..c5b1484 100644
--- a/webrtc/media/base/videobroadcaster.cc
+++ b/webrtc/media/base/videobroadcaster.cc
@@ -84,9 +84,7 @@
       wants.rotation_applied = true;
     }
     // wants.max_pixel_count == MIN(sink.wants.max_pixel_count)
-    if (sink.wants.max_pixel_count &&
-        (!wants.max_pixel_count ||
-         (*sink.wants.max_pixel_count < *wants.max_pixel_count))) {
+    if (sink.wants.max_pixel_count < wants.max_pixel_count) {
       wants.max_pixel_count = sink.wants.max_pixel_count;
     }
     // Select the minimum requested target_pixel_count, if any, of all sinks so
@@ -98,11 +96,15 @@
          (*sink.wants.target_pixel_count < *wants.target_pixel_count))) {
       wants.target_pixel_count = sink.wants.target_pixel_count;
     }
+    // Select the minimum for the requested max framerates.
+    if (sink.wants.max_framerate_fps < wants.max_framerate_fps) {
+      wants.max_framerate_fps = sink.wants.max_framerate_fps;
+    }
   }
 
-  if (wants.max_pixel_count && wants.target_pixel_count &&
-      *wants.target_pixel_count >= *wants.max_pixel_count) {
-    wants.target_pixel_count = wants.max_pixel_count;
+  if (wants.target_pixel_count &&
+      *wants.target_pixel_count >= wants.max_pixel_count) {
+    wants.target_pixel_count.emplace(wants.max_pixel_count);
   }
   current_wants_ = wants;
 }
diff --git a/webrtc/media/base/videobroadcaster_unittest.cc b/webrtc/media/base/videobroadcaster_unittest.cc
index 5274868..ad9ccb7 100644
--- a/webrtc/media/base/videobroadcaster_unittest.cc
+++ b/webrtc/media/base/videobroadcaster_unittest.cc
@@ -87,23 +87,24 @@
 
 TEST(VideoBroadcasterTest, AppliesMinOfSinkWantsMaxPixelCount) {
   VideoBroadcaster broadcaster;
-  EXPECT_TRUE(!broadcaster.wants().max_pixel_count);
+  EXPECT_EQ(std::numeric_limits<int>::max(),
+            broadcaster.wants().max_pixel_count);
 
   FakeVideoRenderer sink1;
   VideoSinkWants wants1;
-  wants1.max_pixel_count = rtc::Optional<int>(1280 * 720);
+  wants1.max_pixel_count = 1280 * 720;
 
   broadcaster.AddOrUpdateSink(&sink1, wants1);
-  EXPECT_EQ(1280 * 720, *broadcaster.wants().max_pixel_count);
+  EXPECT_EQ(1280 * 720, broadcaster.wants().max_pixel_count);
 
   FakeVideoRenderer sink2;
   VideoSinkWants wants2;
-  wants2.max_pixel_count = rtc::Optional<int>(640 * 360);
+  wants2.max_pixel_count = 640 * 360;
   broadcaster.AddOrUpdateSink(&sink2, wants2);
-  EXPECT_EQ(640 * 360, *broadcaster.wants().max_pixel_count);
+  EXPECT_EQ(640 * 360, broadcaster.wants().max_pixel_count);
 
   broadcaster.RemoveSink(&sink2);
-  EXPECT_EQ(1280 * 720, *broadcaster.wants().max_pixel_count);
+  EXPECT_EQ(1280 * 720, broadcaster.wants().max_pixel_count);
 }
 
 TEST(VideoBroadcasterTest, AppliesMinOfSinkWantsMaxAndTargetPixelCount) {
@@ -127,6 +128,28 @@
   EXPECT_EQ(1280 * 720, *broadcaster.wants().target_pixel_count);
 }
 
+TEST(VideoBroadcasterTest, AppliesMinOfSinkWantsMaxFramerate) {
+  VideoBroadcaster broadcaster;
+  EXPECT_EQ(std::numeric_limits<int>::max(),
+            broadcaster.wants().max_framerate_fps);
+
+  FakeVideoRenderer sink1;
+  VideoSinkWants wants1;
+  wants1.max_framerate_fps = 30;
+
+  broadcaster.AddOrUpdateSink(&sink1, wants1);
+  EXPECT_EQ(30, broadcaster.wants().max_framerate_fps);
+
+  FakeVideoRenderer sink2;
+  VideoSinkWants wants2;
+  wants2.max_framerate_fps = 15;
+  broadcaster.AddOrUpdateSink(&sink2, wants2);
+  EXPECT_EQ(15, broadcaster.wants().max_framerate_fps);
+
+  broadcaster.RemoveSink(&sink2);
+  EXPECT_EQ(30, broadcaster.wants().max_framerate_fps);
+}
+
 TEST(VideoBroadcasterTest, SinkWantsBlackFrames) {
   VideoBroadcaster broadcaster;
   EXPECT_TRUE(!broadcaster.wants().black_frames);
diff --git a/webrtc/media/base/videocapturer.cc b/webrtc/media/base/videocapturer.cc
index efd075d..704840b 100644
--- a/webrtc/media/base/videocapturer.cc
+++ b/webrtc/media/base/videocapturer.cc
@@ -149,8 +149,9 @@
   apply_rotation_ = wants.rotation_applied;
 
   if (video_adapter()) {
-    video_adapter()->OnResolutionRequest(wants.target_pixel_count,
-                                         wants.max_pixel_count);
+    video_adapter()->OnResolutionFramerateRequest(wants.target_pixel_count,
+                                                  wants.max_pixel_count,
+                                                  wants.max_framerate_fps);
   }
 }
 
diff --git a/webrtc/media/base/videocapturer_unittest.cc b/webrtc/media/base/videocapturer_unittest.cc
index 0e6f992..2dc5cec 100644
--- a/webrtc/media/base/videocapturer_unittest.cc
+++ b/webrtc/media/base/videocapturer_unittest.cc
@@ -266,7 +266,7 @@
   // with less than or equal to |wants.max_pixel_count| depending on how the
   // capturer can scale the input frame size.
   rtc::VideoSinkWants wants;
-  wants.max_pixel_count = rtc::Optional<int>(1280 * 720 * 3 / 5);
+  wants.max_pixel_count = 1280 * 720 * 3 / 5;
   capturer_->AddOrUpdateSink(&renderer_, wants);
   EXPECT_TRUE(capturer_->CaptureFrame());
   EXPECT_EQ(2, renderer_.num_rendered_frames());
@@ -274,8 +274,7 @@
   EXPECT_EQ(540, renderer_.height());
 
   // Request a lower resolution.
-  wants.max_pixel_count =
-      rtc::Optional<int>((renderer_.width() * renderer_.height() * 3) / 5);
+  wants.max_pixel_count = (renderer_.width() * renderer_.height() * 3) / 5;
   capturer_->AddOrUpdateSink(&renderer_, wants);
   EXPECT_TRUE(capturer_->CaptureFrame());
   EXPECT_EQ(3, renderer_.num_rendered_frames());
@@ -294,8 +293,8 @@
   EXPECT_EQ(360, renderer2.height());
 
   // Request higher resolution.
-  wants.target_pixel_count.emplace((*wants.max_pixel_count * 5) / 3);
-  wants.max_pixel_count.emplace(*wants.max_pixel_count * 4);
+  wants.target_pixel_count.emplace((wants.max_pixel_count * 5) / 3);
+  wants.max_pixel_count = wants.max_pixel_count * 4;
   capturer_->AddOrUpdateSink(&renderer_, wants);
   EXPECT_TRUE(capturer_->CaptureFrame());
   EXPECT_EQ(5, renderer_.num_rendered_frames());
diff --git a/webrtc/media/base/videosourceinterface.h b/webrtc/media/base/videosourceinterface.h
index 0ea1c60..e7c0d38 100644
--- a/webrtc/media/base/videosourceinterface.h
+++ b/webrtc/media/base/videosourceinterface.h
@@ -27,13 +27,15 @@
   bool black_frames = false;
 
   // Tells the source the maximum number of pixels the sink wants.
-  rtc::Optional<int> max_pixel_count;
+  int max_pixel_count = std::numeric_limits<int>::max();
   // Tells the source the desired number of pixels the sinks wants. This will
   // typically be used when stepping the resolution up again when conditions
   // have improved after an earlier downgrade. The source should select the
   // closest resolution to this pixel count, but if max_pixel_count is set, it
   // still sets the absolute upper bound.
   rtc::Optional<int> target_pixel_count;
+  // Tells the source the maximum framerate the sink wants.
+  int max_framerate_fps = std::numeric_limits<int>::max();
 };
 
 template <typename VideoFrameT>
diff --git a/webrtc/media/engine/fakewebrtccall.cc b/webrtc/media/engine/fakewebrtccall.cc
index 9a05ae6..9ada1b6 100644
--- a/webrtc/media/engine/fakewebrtccall.cc
+++ b/webrtc/media/engine/fakewebrtccall.cc
@@ -108,6 +108,7 @@
       config_(std::move(config)),
       codec_settings_set_(false),
       resolution_scaling_enabled_(false),
+      framerate_scaling_enabled_(false),
       source_(nullptr),
       num_swapped_frames_(0) {
   RTC_DCHECK(config.encoder_settings.encoder != NULL);
@@ -252,9 +253,24 @@
   if (source_)
     source_->RemoveSink(this);
   source_ = source;
-  resolution_scaling_enabled_ =
-      degradation_preference !=
-      webrtc::VideoSendStream::DegradationPreference::kMaintainResolution;
+  switch (degradation_preference) {
+    case DegradationPreference::kMaintainFramerate:
+      resolution_scaling_enabled_ = true;
+      framerate_scaling_enabled_ = false;
+      break;
+    case DegradationPreference::kMaintainResolution:
+      resolution_scaling_enabled_ = false;
+      framerate_scaling_enabled_ = true;
+      break;
+    case DegradationPreference::kBalanced:
+      resolution_scaling_enabled_ = true;
+      framerate_scaling_enabled_ = true;
+      break;
+    case DegradationPreference::kDegradationDisabled:
+      resolution_scaling_enabled_ = false;
+      framerate_scaling_enabled_ = false;
+      break;
+  }
   if (source)
     source->AddOrUpdateSink(this, resolution_scaling_enabled_
                                       ? sink_wants_
@@ -333,7 +349,9 @@
       audio_network_state_(webrtc::kNetworkUp),
       video_network_state_(webrtc::kNetworkUp),
       num_created_send_streams_(0),
-      num_created_receive_streams_(0) {}
+      num_created_receive_streams_(0),
+      audio_transport_overhead_(0),
+      video_transport_overhead_(0) {}
 
 FakeCall::~FakeCall() {
   EXPECT_EQ(0u, video_send_streams_.size());
diff --git a/webrtc/media/engine/fakewebrtccall.h b/webrtc/media/engine/fakewebrtccall.h
index 1c9212d..6b25422 100644
--- a/webrtc/media/engine/fakewebrtccall.h
+++ b/webrtc/media/engine/fakewebrtccall.h
@@ -138,6 +138,7 @@
   bool resolution_scaling_enabled() const {
     return resolution_scaling_enabled_;
   }
+  bool framerate_scaling_enabled() const { return framerate_scaling_enabled_; }
   void InjectVideoSinkWants(const rtc::VideoSinkWants& wants);
 
   rtc::VideoSourceInterface<webrtc::VideoFrame>* source() const {
@@ -169,6 +170,7 @@
     webrtc::VideoCodecVP9 vp9;
   } vpx_settings_;
   bool resolution_scaling_enabled_;
+  bool framerate_scaling_enabled_;
   rtc::VideoSourceInterface<webrtc::VideoFrame>* source_;
   int num_swapped_frames_;
   rtc::Optional<webrtc::VideoFrame> last_frame_;
diff --git a/webrtc/media/engine/webrtcvideoengine2.cc b/webrtc/media/engine/webrtcvideoengine2.cc
index f5682edf..fceff89 100644
--- a/webrtc/media/engine/webrtcvideoengine2.cc
+++ b/webrtc/media/engine/webrtcvideoengine2.cc
@@ -38,9 +38,10 @@
 #include "webrtc/video_decoder.h"
 #include "webrtc/video_encoder.h"
 
+using DegradationPreference = webrtc::VideoSendStream::DegradationPreference;
+
 namespace cricket {
 namespace {
-
 // If this field trial is enabled, we will enable sending FlexFEC and disable
 // sending ULPFEC whenever the former has been negotiated. Receiving FlexFEC
 // is enabled whenever FlexFEC has been negotiated.
@@ -1637,26 +1638,35 @@
   }
 
   if (source_ && stream_) {
-    stream_->SetSource(
-        nullptr, webrtc::VideoSendStream::DegradationPreference::kBalanced);
+    stream_->SetSource(nullptr, DegradationPreference::kDegradationDisabled);
   }
   // Switch to the new source.
   source_ = source;
   if (source && stream_) {
-    // Do not adapt resolution for screen content as this will likely
-    // result in blurry and unreadable text.
-    // |this| acts like a VideoSource to make sure SinkWants are handled on the
-    // correct thread.
-    stream_->SetSource(
-        this, enable_cpu_overuse_detection_ &&
-                      !parameters_.options.is_screencast.value_or(false)
-                  ? webrtc::VideoSendStream::DegradationPreference::kBalanced
-                  : webrtc::VideoSendStream::DegradationPreference::
-                        kMaintainResolution);
+    stream_->SetSource(this, GetDegradationPreference());
   }
   return true;
 }
 
+webrtc::VideoSendStream::DegradationPreference
+WebRtcVideoChannel2::WebRtcVideoSendStream::GetDegradationPreference() const {
+  // Do not adapt resolution for screen content as this will likely
+  // result in blurry and unreadable text.
+  // |this| acts like a VideoSource to make sure SinkWants are handled on the
+  // correct thread.
+  DegradationPreference degradation_preference;
+  if (!enable_cpu_overuse_detection_) {
+    degradation_preference = DegradationPreference::kDegradationDisabled;
+  } else {
+    if (parameters_.options.is_screencast.value_or(false)) {
+      degradation_preference = DegradationPreference::kMaintainResolution;
+    } else {
+      degradation_preference = DegradationPreference::kMaintainFramerate;
+    }
+  }
+  return degradation_preference;
+}
+
 const std::vector<uint32_t>&
 WebRtcVideoChannel2::WebRtcVideoSendStream::GetSsrcs() const {
   return ssrcs_;
@@ -2094,16 +2104,7 @@
   parameters_.encoder_config.encoder_specific_settings = NULL;
 
   if (source_) {
-    // Do not adapt resolution for screen content as this will likely result in
-    // blurry and unreadable text.
-    // |this| acts like a VideoSource to make sure SinkWants are handled on the
-    // correct thread.
-    stream_->SetSource(
-        this, enable_cpu_overuse_detection_ &&
-                      !parameters_.options.is_screencast.value_or(false)
-                  ? webrtc::VideoSendStream::DegradationPreference::kBalanced
-                  : webrtc::VideoSendStream::DegradationPreference::
-                        kMaintainResolution);
+    stream_->SetSource(this, GetDegradationPreference());
   }
 
   // Call stream_->Start() if necessary conditions are met.
diff --git a/webrtc/media/engine/webrtcvideoengine2.h b/webrtc/media/engine/webrtcvideoengine2.h
index 309a84d..0a2f4a5 100644
--- a/webrtc/media/engine/webrtcvideoengine2.h
+++ b/webrtc/media/engine/webrtcvideoengine2.h
@@ -324,6 +324,9 @@
     // and whether or not the encoding in |rtp_parameters_| is active.
     void UpdateSendState();
 
+    webrtc::VideoSendStream::DegradationPreference GetDegradationPreference()
+        const EXCLUSIVE_LOCKS_REQUIRED(&thread_checker_);
+
     rtc::ThreadChecker thread_checker_;
     rtc::AsyncInvoker invoker_;
     rtc::Thread* worker_thread_;
diff --git a/webrtc/media/engine/webrtcvideoengine2_unittest.cc b/webrtc/media/engine/webrtcvideoengine2_unittest.cc
index 47f912b..a80aa12 100644
--- a/webrtc/media/engine/webrtcvideoengine2_unittest.cc
+++ b/webrtc/media/engine/webrtcvideoengine2_unittest.cc
@@ -2115,8 +2115,8 @@
 
   // Trigger overuse.
   rtc::VideoSinkWants wants;
-  wants.max_pixel_count = rtc::Optional<int>(
-      send_stream->GetLastWidth() * send_stream->GetLastHeight() - 1);
+  wants.max_pixel_count =
+      send_stream->GetLastWidth() * send_stream->GetLastHeight() - 1;
   send_stream->InjectVideoSinkWants(wants);
   EXPECT_TRUE(capturer.CaptureCustomFrame(1280, 720, cricket::FOURCC_I420));
   EXPECT_EQ(2, send_stream->GetNumberOfSwappedFrames());
@@ -2124,8 +2124,8 @@
   EXPECT_EQ(720 * 3 / 4, send_stream->GetLastHeight());
 
   // Trigger overuse again.
-  wants.max_pixel_count = rtc::Optional<int>(
-      send_stream->GetLastWidth() * send_stream->GetLastHeight() - 1);
+  wants.max_pixel_count =
+      send_stream->GetLastWidth() * send_stream->GetLastHeight() - 1;
   send_stream->InjectVideoSinkWants(wants);
   EXPECT_TRUE(capturer.CaptureCustomFrame(1280, 720, cricket::FOURCC_I420));
   EXPECT_EQ(3, send_stream->GetNumberOfSwappedFrames());
@@ -2143,7 +2143,7 @@
       send_stream->GetLastWidth() * send_stream->GetLastHeight();
   // Cap the max to 4x the pixel count (assuming max 1/2 x 1/2 scale downs)
   // of the current stream, so we don't take too large steps.
-  wants.max_pixel_count = rtc::Optional<int>(current_pixel_count * 4);
+  wants.max_pixel_count = current_pixel_count * 4;
   // Default step down is 3/5 pixel count, so go up by 5/3.
   wants.target_pixel_count = rtc::Optional<int>((current_pixel_count * 5) / 3);
   send_stream->InjectVideoSinkWants(wants);
@@ -2155,7 +2155,7 @@
   // Trigger underuse again, should go back up to full resolution.
   current_pixel_count =
       send_stream->GetLastWidth() * send_stream->GetLastHeight();
-  wants.max_pixel_count = rtc::Optional<int>(current_pixel_count * 4);
+  wants.max_pixel_count = current_pixel_count * 4;
   wants.target_pixel_count = rtc::Optional<int>((current_pixel_count * 5) / 3);
   send_stream->InjectVideoSinkWants(wants);
   EXPECT_TRUE(capturer.CaptureCustomFrame(1284, 724, cricket::FOURCC_I420));
@@ -2199,8 +2199,8 @@
 
   // Trigger overuse.
   rtc::VideoSinkWants wants;
-  wants.max_pixel_count = rtc::Optional<int>(
-      send_stream->GetLastWidth() * send_stream->GetLastHeight() - 1);
+  wants.max_pixel_count =
+      send_stream->GetLastWidth() * send_stream->GetLastHeight() - 1;
   send_stream->InjectVideoSinkWants(wants);
   EXPECT_TRUE(capturer.CaptureCustomFrame(1280, 720, cricket::FOURCC_I420));
   EXPECT_EQ(2, send_stream->GetNumberOfSwappedFrames());
@@ -2242,6 +2242,7 @@
 
 void WebRtcVideoChannel2Test::TestCpuAdaptation(bool enable_overuse,
                                                 bool is_screenshare) {
+  const int kDefaultFps = 30;
   cricket::VideoCodec codec = GetEngineCodec("VP8");
   cricket::VideoSendParameters parameters;
   parameters.codecs.push_back(codec);
@@ -2263,14 +2264,17 @@
   options.is_screencast = rtc::Optional<bool>(is_screenshare);
   EXPECT_TRUE(channel_->SetVideoSend(last_ssrc_, true, &options, &capturer));
   cricket::VideoFormat capture_format = capturer.GetSupportedFormats()->front();
+  capture_format.interval = rtc::kNumNanosecsPerSec / kDefaultFps;
   EXPECT_EQ(cricket::CS_RUNNING, capturer.Start(capture_format));
 
   EXPECT_TRUE(channel_->SetSend(true));
 
   FakeVideoSendStream* send_stream = fake_call_->GetVideoSendStreams().front();
 
-  if (!enable_overuse || is_screenshare) {
+  if (!enable_overuse) {
     EXPECT_FALSE(send_stream->resolution_scaling_enabled());
+    EXPECT_FALSE(send_stream->framerate_scaling_enabled());
+    EXPECT_EQ(is_screenshare, send_stream->framerate_scaling_enabled());
 
     EXPECT_TRUE(capturer.CaptureFrame());
     EXPECT_EQ(1, send_stream->GetNumberOfSwappedFrames());
@@ -2282,33 +2286,59 @@
     return;
   }
 
-  EXPECT_TRUE(send_stream->resolution_scaling_enabled());
+  if (is_screenshare) {
+    EXPECT_FALSE(send_stream->resolution_scaling_enabled());
+    EXPECT_TRUE(send_stream->framerate_scaling_enabled());
+  } else {
+    EXPECT_TRUE(send_stream->resolution_scaling_enabled());
+    EXPECT_FALSE(send_stream->framerate_scaling_enabled());
+  }
+
   // Trigger overuse.
   ASSERT_EQ(1u, fake_call_->GetVideoSendStreams().size());
 
   rtc::VideoSinkWants wants;
-  wants.max_pixel_count =
-      rtc::Optional<int>(capture_format.width * capture_format.height - 1);
+  if (is_screenshare) {
+    wants.max_framerate_fps = (kDefaultFps * 2) / 3;
+  } else {
+    wants.max_pixel_count = capture_format.width * capture_format.height - 1;
+  }
   send_stream->InjectVideoSinkWants(wants);
 
-  EXPECT_TRUE(capturer.CaptureFrame());
-  EXPECT_EQ(1, send_stream->GetNumberOfSwappedFrames());
+  for (int i = 0; i < kDefaultFps; ++i)
+    EXPECT_TRUE(capturer.CaptureFrame());
 
-  EXPECT_TRUE(capturer.CaptureFrame());
-  EXPECT_EQ(2, send_stream->GetNumberOfSwappedFrames());
-
-  EXPECT_LT(send_stream->GetLastWidth(), capture_format.width);
-  EXPECT_LT(send_stream->GetLastHeight(), capture_format.height);
+  if (is_screenshare) {
+    // Drops every third frame.
+    EXPECT_EQ(kDefaultFps * 2 / 3, send_stream->GetNumberOfSwappedFrames());
+    EXPECT_EQ(send_stream->GetLastWidth(), capture_format.width);
+    EXPECT_EQ(send_stream->GetLastHeight(), capture_format.height);
+  } else {
+    EXPECT_EQ(kDefaultFps, send_stream->GetNumberOfSwappedFrames());
+    EXPECT_LT(send_stream->GetLastWidth(), capture_format.width);
+    EXPECT_LT(send_stream->GetLastHeight(), capture_format.height);
+  }
 
   // Trigger underuse which should go back to normal resolution.
   int last_pixel_count =
       send_stream->GetLastWidth() * send_stream->GetLastHeight();
-  wants.max_pixel_count = rtc::Optional<int>(last_pixel_count * 4);
-  wants.target_pixel_count = rtc::Optional<int>((last_pixel_count * 5) / 3);
+  if (is_screenshare) {
+    wants.max_framerate_fps = kDefaultFps;
+  } else {
+    wants.max_pixel_count = last_pixel_count * 4;
+    wants.target_pixel_count.emplace((last_pixel_count * 5) / 3);
+  }
   send_stream->InjectVideoSinkWants(wants);
 
-  EXPECT_TRUE(capturer.CaptureFrame());
-  EXPECT_EQ(3, send_stream->GetNumberOfSwappedFrames());
+  for (int i = 0; i < kDefaultFps; ++i)
+    EXPECT_TRUE(capturer.CaptureFrame());
+
+  if (is_screenshare) {
+    EXPECT_EQ(kDefaultFps + (kDefaultFps * 2 / 3),
+              send_stream->GetNumberOfSwappedFrames());
+  } else {
+    EXPECT_EQ(kDefaultFps * 2, send_stream->GetNumberOfSwappedFrames());
+  }
 
   EXPECT_EQ(capture_format.width, send_stream->GetLastWidth());
   EXPECT_EQ(capture_format.height, send_stream->GetLastHeight());
diff --git a/webrtc/test/BUILD.gn b/webrtc/test/BUILD.gn
index 322f839..51ff9bc 100644
--- a/webrtc/test/BUILD.gn
+++ b/webrtc/test/BUILD.gn
@@ -43,6 +43,7 @@
     "frame_utils.h",
     "vcm_capturer.cc",
     "vcm_capturer.h",
+    "video_capturer.cc",
     "video_capturer.h",
   ]
 
@@ -53,6 +54,7 @@
 
   deps = [
     "../common_video",
+    "../media:rtc_media_base",
     "../modules/video_capture:video_capture_module",
   ]
 }
diff --git a/webrtc/test/call_test.cc b/webrtc/test/call_test.cc
index 10c6438..352b7fc 100644
--- a/webrtc/test/call_test.cc
+++ b/webrtc/test/call_test.cc
@@ -285,7 +285,7 @@
       width, height, framerate * speed, clock));
   video_send_stream_->SetSource(
       frame_generator_capturer_.get(),
-      VideoSendStream::DegradationPreference::kBalanced);
+      VideoSendStream::DegradationPreference::kMaintainFramerate);
 }
 
 void CallTest::CreateFrameGeneratorCapturer(int framerate,
@@ -295,7 +295,7 @@
       test::FrameGeneratorCapturer::Create(width, height, framerate, clock_));
   video_send_stream_->SetSource(
       frame_generator_capturer_.get(),
-      VideoSendStream::DegradationPreference::kBalanced);
+      VideoSendStream::DegradationPreference::kMaintainFramerate);
 }
 
 void CallTest::CreateFakeAudioDevices() {
diff --git a/webrtc/test/frame_generator_capturer.cc b/webrtc/test/frame_generator_capturer.cc
index 9e74e40..d16d8c8 100644
--- a/webrtc/test/frame_generator_capturer.cc
+++ b/webrtc/test/frame_generator_capturer.cc
@@ -37,30 +37,47 @@
 
  private:
   bool Run() override {
+    bool task_completed = true;
     if (repeat_interval_ms_ > 0) {
-      int64_t delay_ms;
-      int64_t time_now_ms = rtc::TimeMillis();
-      if (intended_run_time_ms_ > 0) {
-        delay_ms = time_now_ms - intended_run_time_ms_;
-      } else {
-        delay_ms = 0;
-        intended_run_time_ms_ = time_now_ms;
-      }
-      intended_run_time_ms_ += repeat_interval_ms_;
-      if (delay_ms < repeat_interval_ms_) {
+      // This is not a one-off frame. Check if the frame interval for this
+      // task queue is the same same as the current configured frame rate.
+      uint32_t current_interval_ms =
+          1000 / frame_generator_capturer_->GetCurrentConfiguredFramerate();
+      if (repeat_interval_ms_ != current_interval_ms) {
+        // Frame rate has changed since task was started, create a new instance.
         rtc::TaskQueue::Current()->PostDelayedTask(
-            std::unique_ptr<rtc::QueuedTask>(this),
-            repeat_interval_ms_ - delay_ms);
+            std::unique_ptr<rtc::QueuedTask>(new InsertFrameTask(
+                frame_generator_capturer_, current_interval_ms)),
+            current_interval_ms);
       } else {
-        rtc::TaskQueue::Current()->PostDelayedTask(
-            std::unique_ptr<rtc::QueuedTask>(this), 0);
-        LOG(LS_ERROR)
-            << "Frame Generator Capturer can't keep up with requested fps";
+        // Schedule the next frame capture event to happen at approximately the
+        // correct absolute time point.
+        int64_t delay_ms;
+        int64_t time_now_ms = rtc::TimeMillis();
+        if (intended_run_time_ms_ > 0) {
+          delay_ms = time_now_ms - intended_run_time_ms_;
+        } else {
+          delay_ms = 0;
+          intended_run_time_ms_ = time_now_ms;
+        }
+        intended_run_time_ms_ += repeat_interval_ms_;
+        if (delay_ms < repeat_interval_ms_) {
+          rtc::TaskQueue::Current()->PostDelayedTask(
+              std::unique_ptr<rtc::QueuedTask>(this),
+              repeat_interval_ms_ - delay_ms);
+        } else {
+          rtc::TaskQueue::Current()->PostDelayedTask(
+              std::unique_ptr<rtc::QueuedTask>(this), 0);
+          LOG(LS_ERROR)
+              << "Frame Generator Capturer can't keep up with requested fps";
+        }
+        // Repost of this instance, make sure it is not deleted.
+        task_completed = false;
       }
     }
     frame_generator_capturer_->InsertFrame();
     // Task should be deleted only if it's not repeating.
-    return repeat_interval_ms_ == 0;
+    return task_completed;
   }
 
   webrtc::test::FrameGeneratorCapturer* const frame_generator_capturer_;
@@ -72,14 +89,12 @@
                                                        int height,
                                                        int target_fps,
                                                        Clock* clock) {
-  FrameGeneratorCapturer* capturer = new FrameGeneratorCapturer(
-      clock, FrameGenerator::CreateSquareGenerator(width, height), target_fps);
-  if (!capturer->Init()) {
-    delete capturer;
-    return NULL;
-  }
+  std::unique_ptr<FrameGeneratorCapturer> capturer(new FrameGeneratorCapturer(
+      clock, FrameGenerator::CreateSquareGenerator(width, height), target_fps));
+  if (!capturer->Init())
+    return nullptr;
 
-  return capturer;
+  return capturer.release();
 }
 
 FrameGeneratorCapturer* FrameGeneratorCapturer::CreateFromYuvFile(
@@ -88,16 +103,15 @@
     size_t height,
     int target_fps,
     Clock* clock) {
-  FrameGeneratorCapturer* capturer = new FrameGeneratorCapturer(
-      clock, FrameGenerator::CreateFromYuvFile(
-                 std::vector<std::string>(1, file_name), width, height, 1),
-      target_fps);
-  if (!capturer->Init()) {
-    delete capturer;
-    return NULL;
-  }
+  std::unique_ptr<FrameGeneratorCapturer> capturer(new FrameGeneratorCapturer(
+      clock,
+      FrameGenerator::CreateFromYuvFile(std::vector<std::string>(1, file_name),
+                                        width, height, 1),
+      target_fps));
+  if (!capturer->Init())
+    return nullptr;
 
-  return capturer;
+  return capturer.release();
 }
 
 FrameGeneratorCapturer::FrameGeneratorCapturer(
@@ -129,29 +143,32 @@
 bool FrameGeneratorCapturer::Init() {
   // This check is added because frame_generator_ might be file based and should
   // not crash because a file moved.
-  if (frame_generator_.get() == NULL)
+  if (frame_generator_.get() == nullptr)
     return false;
 
+  int framerate_fps = GetCurrentConfiguredFramerate();
   task_queue_.PostDelayedTask(
       std::unique_ptr<rtc::QueuedTask>(
-          new InsertFrameTask(this, 1000 / target_fps_)),
-      1000 / target_fps_);
+          new InsertFrameTask(this, 1000 / framerate_fps)),
+      1000 / framerate_fps);
 
   return true;
 }
 
 void FrameGeneratorCapturer::InsertFrame() {
-  {
-    rtc::CritScope cs(&lock_);
-    if (sending_) {
-      VideoFrame* frame = frame_generator_->NextFrame();
-      frame->set_ntp_time_ms(clock_->CurrentNtpInMilliseconds());
-      frame->set_rotation(fake_rotation_);
-      if (first_frame_capture_time_ == -1) {
-        first_frame_capture_time_ = frame->ntp_time_ms();
-      }
-      if (sink_)
-        sink_->OnFrame(*frame);
+  rtc::CritScope cs(&lock_);
+  if (sending_) {
+    VideoFrame* frame = frame_generator_->NextFrame();
+    frame->set_ntp_time_ms(clock_->CurrentNtpInMilliseconds());
+    frame->set_rotation(fake_rotation_);
+    if (first_frame_capture_time_ == -1) {
+      first_frame_capture_time_ = frame->ntp_time_ms();
+    }
+
+    if (sink_) {
+      rtc::Optional<VideoFrame> out_frame = AdaptFrame(*frame);
+      if (out_frame)
+        sink_->OnFrame(*out_frame);
     }
   }
 }
@@ -185,6 +202,19 @@
   sink_ = sink;
   if (sink_wants_observer_)
     sink_wants_observer_->OnSinkWantsChanged(sink, wants);
+
+  // Handle framerate within this class, just pass on resolution for possible
+  // adaptation.
+  rtc::VideoSinkWants resolution_wants = wants;
+  resolution_wants.max_framerate_fps = std::numeric_limits<int>::max();
+  VideoCapturer::AddOrUpdateSink(sink, resolution_wants);
+
+  // Ignore any requests for framerate higher than initially configured.
+  if (wants.max_framerate_fps < target_fps_) {
+    wanted_fps_.emplace(wants.max_framerate_fps);
+  } else {
+    wanted_fps_.reset();
+  }
 }
 
 void FrameGeneratorCapturer::RemoveSink(
@@ -201,5 +231,12 @@
       std::unique_ptr<rtc::QueuedTask>(new InsertFrameTask(this, 0)));
 }
 
+int FrameGeneratorCapturer::GetCurrentConfiguredFramerate() {
+  rtc::CritScope cs(&lock_);
+  if (wanted_fps_ && *wanted_fps_ < target_fps_)
+    return *wanted_fps_;
+  return target_fps_;
+}
+
 }  // namespace test
 }  // namespace webrtc
diff --git a/webrtc/test/frame_generator_capturer.h b/webrtc/test/frame_generator_capturer.h
index 75e4849..c1eca16 100644
--- a/webrtc/test/frame_generator_capturer.h
+++ b/webrtc/test/frame_generator_capturer.h
@@ -78,6 +78,7 @@
 
   void InsertFrame();
   static bool Run(void* obj);
+  int GetCurrentConfiguredFramerate();
 
   Clock* const clock_;
   bool sending_;
@@ -87,7 +88,8 @@
   rtc::CriticalSection lock_;
   std::unique_ptr<FrameGenerator> frame_generator_;
 
-  int target_fps_;
+  int target_fps_ GUARDED_BY(&lock_);
+  rtc::Optional<int> wanted_fps_ GUARDED_BY(&lock_);
   VideoRotation fake_rotation_ = kVideoRotation_0;
 
   int64_t first_frame_capture_time_;
diff --git a/webrtc/test/vcm_capturer.cc b/webrtc/test/vcm_capturer.cc
index 535e9bf..d66cf23 100644
--- a/webrtc/test/vcm_capturer.cc
+++ b/webrtc/test/vcm_capturer.cc
@@ -10,17 +10,18 @@
 
 #include "webrtc/test/vcm_capturer.h"
 
+#include "webrtc/base/logging.h"
 #include "webrtc/modules/video_capture/video_capture_factory.h"
 #include "webrtc/video_send_stream.h"
 
 namespace webrtc {
 namespace test {
 
-VcmCapturer::VcmCapturer() : started_(false), sink_(nullptr), vcm_(NULL) {}
+VcmCapturer::VcmCapturer() : started_(false), sink_(nullptr), vcm_(nullptr) {}
 
 bool VcmCapturer::Init(size_t width, size_t height, size_t target_fps) {
-  VideoCaptureModule::DeviceInfo* device_info =
-      VideoCaptureFactory::CreateDeviceInfo();
+  std::unique_ptr<VideoCaptureModule::DeviceInfo> device_info(
+      VideoCaptureFactory::CreateDeviceInfo());
 
   char device_name[256];
   char unique_name[256];
@@ -35,7 +36,6 @@
   vcm_->RegisterCaptureDataCallback(this);
 
   device_info->GetCapability(vcm_->CurrentDeviceName(), 0, capability_);
-  delete device_info;
 
   capability_.width = static_cast<int32_t>(width);
   capability_.height = static_cast<int32_t>(height);
@@ -47,7 +47,7 @@
     return false;
   }
 
-  assert(vcm_->CaptureStarted());
+  RTC_CHECK(vcm_->CaptureStarted());
 
   return true;
 }
@@ -55,13 +55,13 @@
 VcmCapturer* VcmCapturer::Create(size_t width,
                                  size_t height,
                                  size_t target_fps) {
-  VcmCapturer* vcm_capturer = new VcmCapturer();
+  std::unique_ptr<VcmCapturer> vcm_capturer(new VcmCapturer());
   if (!vcm_capturer->Init(width, height, target_fps)) {
-    // TODO(pbos): Log a warning that this failed.
-    delete vcm_capturer;
-    return NULL;
+    LOG(LS_WARNING) << "Failed to create VcmCapturer(w = " << width
+                    << ", h = " << height << ", fps = " << target_fps << ")";
+    return nullptr;
   }
-  return vcm_capturer;
+  return vcm_capturer.release();
 }
 
 
@@ -80,6 +80,7 @@
   rtc::CritScope lock(&crit_);
   RTC_CHECK(!sink_ || sink_ == sink);
   sink_ = sink;
+  VideoCapturer::AddOrUpdateSink(sink, wants);
 }
 
 void VcmCapturer::RemoveSink(rtc::VideoSinkInterface<VideoFrame>* sink) {
@@ -102,8 +103,11 @@
 
 void VcmCapturer::OnFrame(const VideoFrame& frame) {
   rtc::CritScope lock(&crit_);
-  if (started_ && sink_)
-    sink_->OnFrame(frame);
+  if (started_ && sink_) {
+    rtc::Optional<VideoFrame> out_frame = AdaptFrame(frame);
+    if (out_frame)
+      sink_->OnFrame(*out_frame);
+  }
 }
 
 }  // test
diff --git a/webrtc/test/vcm_capturer.h b/webrtc/test/vcm_capturer.h
index c4dae65..5e40c2b 100644
--- a/webrtc/test/vcm_capturer.h
+++ b/webrtc/test/vcm_capturer.h
@@ -10,6 +10,8 @@
 #ifndef WEBRTC_TEST_VCM_CAPTURER_H_
 #define WEBRTC_TEST_VCM_CAPTURER_H_
 
+#include <memory>
+
 #include "webrtc/base/criticalsection.h"
 #include "webrtc/base/scoped_ref_ptr.h"
 #include "webrtc/common_types.h"
diff --git a/webrtc/test/video_capturer.cc b/webrtc/test/video_capturer.cc
new file mode 100644
index 0000000..c8b3826
--- /dev/null
+++ b/webrtc/test/video_capturer.cc
@@ -0,0 +1,58 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/test/video_capturer.h"
+
+#include "webrtc/base/basictypes.h"
+#include "webrtc/base/constructormagic.h"
+
+namespace webrtc {
+namespace test {
+VideoCapturer::VideoCapturer() : video_adapter_(new cricket::VideoAdapter()) {}
+VideoCapturer::~VideoCapturer() {}
+
+rtc::Optional<VideoFrame> VideoCapturer::AdaptFrame(const VideoFrame& frame) {
+  int cropped_width = 0;
+  int cropped_height = 0;
+  int out_width = 0;
+  int out_height = 0;
+
+  if (!video_adapter_->AdaptFrameResolution(
+          frame.width(), frame.height(), frame.timestamp_us() * 1000,
+          &cropped_width, &cropped_height, &out_width, &out_height)) {
+    // Drop frame in order to respect frame rate constraint.
+    return rtc::Optional<VideoFrame>();
+  }
+
+  rtc::Optional<VideoFrame> out_frame;
+  if (out_height != frame.height() || out_width != frame.width()) {
+    // Video adapter has requested a down-scale. Allocate a new buffer and
+    // return scaled version.
+    rtc::scoped_refptr<I420Buffer> scaled_buffer =
+        I420Buffer::Create(out_width, out_height);
+    scaled_buffer->ScaleFrom(*frame.video_frame_buffer().get());
+    out_frame.emplace(
+        VideoFrame(scaled_buffer, kVideoRotation_0, frame.timestamp_us()));
+  } else {
+    // No adaptations needed, just return the frame as is.
+    out_frame.emplace(frame);
+  }
+
+  return out_frame;
+}
+
+void VideoCapturer::AddOrUpdateSink(rtc::VideoSinkInterface<VideoFrame>* sink,
+                                    const rtc::VideoSinkWants& wants) {
+  video_adapter_->OnResolutionFramerateRequest(
+      wants.target_pixel_count, wants.max_pixel_count, wants.max_framerate_fps);
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/webrtc/test/video_capturer.h b/webrtc/test/video_capturer.h
index 111f986..667d89c8 100644
--- a/webrtc/test/video_capturer.h
+++ b/webrtc/test/video_capturer.h
@@ -12,23 +12,42 @@
 
 #include <stddef.h>
 
+#include <memory>
+
+#include "webrtc/api/video/i420_buffer.h"
 #include "webrtc/api/video/video_frame.h"
+#include "webrtc/base/criticalsection.h"
+#include "webrtc/base/optional.h"
+#include "webrtc/media/base/videoadapter.h"
 #include "webrtc/media/base/videosourceinterface.h"
 
+namespace cricket {
+class VideoAdapter;
+}  // namespace cricket
+
 namespace webrtc {
-
 class Clock;
-
 namespace test {
 
 class VideoCapturer : public rtc::VideoSourceInterface<VideoFrame> {
  public:
-  virtual ~VideoCapturer() {}
+  VideoCapturer();
+  virtual ~VideoCapturer();
 
   virtual void Start() = 0;
   virtual void Stop() = 0;
+
+  void AddOrUpdateSink(rtc::VideoSinkInterface<VideoFrame>* sink,
+                       const rtc::VideoSinkWants& wants) override;
+
+ protected:
+  rtc::Optional<VideoFrame> AdaptFrame(const VideoFrame& frame);
+  rtc::VideoSinkWants GetSinkWants();
+
+ private:
+  const std::unique_ptr<cricket::VideoAdapter> video_adapter_;
 };
-}  // test
-}  // webrtc
+}  // namespace test
+}  // namespace webrtc
 
 #endif  // WEBRTC_TEST_VIDEO_CAPTURER_H_
diff --git a/webrtc/video/end_to_end_tests.cc b/webrtc/video/end_to_end_tests.cc
index baf7161..cc8fbb6 100644
--- a/webrtc/video/end_to_end_tests.cc
+++ b/webrtc/video/end_to_end_tests.cc
@@ -221,7 +221,8 @@
       test::FrameGenerator::CreateSquareGenerator(kWidth, kHeight));
   test::FrameForwarder frame_forwarder;
   video_send_stream_->SetSource(
-      &frame_forwarder, VideoSendStream::DegradationPreference::kBalanced);
+      &frame_forwarder,
+      VideoSendStream::DegradationPreference::kMaintainFramerate);
 
   frame_forwarder.IncomingCapturedFrame(*frame_generator->NextFrame());
   EXPECT_TRUE(renderer.Wait())
@@ -266,7 +267,8 @@
                                                   kDefaultHeight));
   test::FrameForwarder frame_forwarder;
   video_send_stream_->SetSource(
-      &frame_forwarder, VideoSendStream::DegradationPreference::kBalanced);
+      &frame_forwarder,
+      VideoSendStream::DegradationPreference::kMaintainFramerate);
   frame_forwarder.IncomingCapturedFrame(*frame_generator->NextFrame());
 
   EXPECT_TRUE(renderer.Wait())
@@ -1500,7 +1502,7 @@
           width, height, 30, Clock::GetRealTimeClock());
       send_streams[i]->SetSource(
           frame_generators[i],
-          VideoSendStream::DegradationPreference::kBalanced);
+          VideoSendStream::DegradationPreference::kMaintainFramerate);
       frame_generators[i]->Start();
     }
 
@@ -1947,7 +1949,7 @@
                                                   kDefaultHeight));
   test::FrameForwarder forwarder;
   video_send_stream_->SetSource(
-      &forwarder, VideoSendStream::DegradationPreference::kBalanced);
+      &forwarder, VideoSendStream::DegradationPreference::kMaintainFramerate);
   forwarder.IncomingCapturedFrame(*frame_generator->NextFrame());
 
   EXPECT_TRUE(post_encode_observer.Wait())
diff --git a/webrtc/video/overuse_frame_detector.cc b/webrtc/video/overuse_frame_detector.cc
index 68b98a4..cfcbb61 100644
--- a/webrtc/video/overuse_frame_detector.cc
+++ b/webrtc/video/overuse_frame_detector.cc
@@ -16,12 +16,15 @@
 #include <algorithm>
 #include <list>
 #include <map>
+#include <string>
+#include <utility>
 
 #include "webrtc/api/video/video_frame.h"
 #include "webrtc/base/checks.h"
 #include "webrtc/base/logging.h"
 #include "webrtc/base/numerics/exp_filter.h"
 #include "webrtc/common_video/include/frame_callback.h"
+#include "webrtc/system_wrappers/include/field_trial.h"
 
 #if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
 #include <mach/mach.h>
@@ -116,7 +119,7 @@
         filtered_frame_diff_ms_(new rtc::ExpFilter(kWeightFactorFrameDiff)) {
     Reset();
   }
-  ~SendProcessingUsage() {}
+  virtual ~SendProcessingUsage() {}
 
   void Reset() {
     count_ = 0;
@@ -139,7 +142,7 @@
     filtered_processing_ms_->Apply(exp, processing_ms);
   }
 
-  int Value() const {
+  virtual int Value() {
     if (count_ < static_cast<uint32_t>(options_.min_frame_samples)) {
       return static_cast<int>(InitialUsageInPercent() + 0.5f);
     }
@@ -171,6 +174,90 @@
   std::unique_ptr<rtc::ExpFilter> filtered_frame_diff_ms_;
 };
 
+// Class used for manual testing of overuse, enabled via field trial flag.
+class OveruseFrameDetector::OverdoseInjector
+    : public OveruseFrameDetector::SendProcessingUsage {
+ public:
+  OverdoseInjector(const CpuOveruseOptions& options,
+                   int64_t overuse_period_ms,
+                   int64_t normal_period_ms)
+      : OveruseFrameDetector::SendProcessingUsage(options),
+        overuse_period_ms_(overuse_period_ms),
+        normal_period_ms_(normal_period_ms),
+        is_overusing_(false),
+        last_toggling_ms_(-1) {
+    RTC_DCHECK_GT(overuse_period_ms, 0);
+    RTC_DCHECK_GT(normal_period_ms, 0);
+    LOG(LS_INFO) << "Simulating overuse with intervals " << normal_period_ms
+                 << "ms normal mode, " << overuse_period_ms
+                 << "ms overuse mode.";
+  }
+
+  ~OverdoseInjector() override {}
+
+  int Value() override {
+    int64_t now_ms = rtc::TimeMillis();
+    if (last_toggling_ms_ == -1) {
+      last_toggling_ms_ = now_ms;
+    } else {
+      int64_t toggle_time_ms =
+          last_toggling_ms_ +
+          (is_overusing_ ? overuse_period_ms_ : normal_period_ms_);
+      if (now_ms > toggle_time_ms) {
+        is_overusing_ = !is_overusing_;
+        last_toggling_ms_ = now_ms;
+        if (is_overusing_) {
+          LOG(LS_INFO) << "Simulating CPU overuse.";
+        } else {
+          LOG(LS_INFO) << "Disabling CPU overuse simulation.";
+        }
+      }
+    }
+
+    if (is_overusing_)
+      return 250;  // 250% should be enough for anyone.
+
+    return SendProcessingUsage::Value();
+  }
+
+ private:
+  const int64_t overuse_period_ms_;
+  const int64_t normal_period_ms_;
+  bool is_overusing_;
+  int64_t last_toggling_ms_;
+};
+
+std::unique_ptr<OveruseFrameDetector::SendProcessingUsage>
+OveruseFrameDetector::CreateSendProcessingUsage(
+    const CpuOveruseOptions& options) {
+  std::unique_ptr<SendProcessingUsage> instance;
+  std::string toggling_interval =
+      field_trial::FindFullName("WebRTC-ForceSimulatedOveruseIntervalMs");
+  if (!toggling_interval.empty()) {
+    int overuse_period_ms = 0;
+    int normal_period_ms = 0;
+    if (sscanf(toggling_interval.c_str(), "%d-%d", &overuse_period_ms,
+               &normal_period_ms) == 2) {
+      if (overuse_period_ms > 0 && normal_period_ms > 0) {
+        instance.reset(
+            new OverdoseInjector(options, overuse_period_ms, normal_period_ms));
+      } else {
+        LOG(LS_WARNING) << "Invalid (non-positive) overuse / normal periods: "
+                        << overuse_period_ms << " / " << normal_period_ms;
+      }
+    } else {
+      LOG(LS_WARNING) << "Malformed toggling interval: " << toggling_interval;
+    }
+  }
+
+  if (!instance) {
+    // No valid overuse simulation parameters set, use normal usage class.
+    instance.reset(new SendProcessingUsage(options));
+  }
+
+  return instance;
+}
+
 class OveruseFrameDetector::CheckOveruseTask : public rtc::QueuedTask {
  public:
   explicit CheckOveruseTask(OveruseFrameDetector* overuse_detector)
@@ -222,7 +309,7 @@
       last_rampup_time_ms_(-1),
       in_quick_rampup_(false),
       current_rampup_delay_ms_(kStandardRampUpDelayMs),
-      usage_(new SendProcessingUsage(options)) {
+      usage_(CreateSendProcessingUsage(options)) {
   task_checker_.Detach();
 }
 
@@ -320,8 +407,9 @@
   while (!frame_timing_.empty()) {
     FrameTiming timing = frame_timing_.front();
     if (time_sent_in_us - timing.capture_us <
-        kEncodingTimeMeasureWindowMs * rtc::kNumMicrosecsPerMillisec)
+        kEncodingTimeMeasureWindowMs * rtc::kNumMicrosecsPerMillisec) {
       break;
+    }
     if (timing.last_send_us != -1) {
       int encode_duration_us =
           static_cast<int>(timing.last_send_us - timing.capture_us);
@@ -396,6 +484,7 @@
 
 bool OveruseFrameDetector::IsOverusing(const CpuOveruseMetrics& metrics) {
   RTC_DCHECK_CALLED_SEQUENTIALLY(&task_checker_);
+
   if (metrics.encode_usage_percent >=
       options_.high_encode_usage_threshold_percent) {
     ++checks_above_threshold_;
diff --git a/webrtc/video/overuse_frame_detector.h b/webrtc/video/overuse_frame_detector.h
index 752642b..2fb3104 100644
--- a/webrtc/video/overuse_frame_detector.h
+++ b/webrtc/video/overuse_frame_detector.h
@@ -87,6 +87,7 @@
   void CheckForOveruse();  // Protected for test purposes.
 
  private:
+  class OverdoseInjector;
   class SendProcessingUsage;
   class CheckOveruseTask;
   struct FrameTiming {
@@ -110,6 +111,9 @@
 
   void ResetAll(int num_pixels);
 
+  static std::unique_ptr<SendProcessingUsage> CreateSendProcessingUsage(
+      const CpuOveruseOptions& options);
+
   rtc::SequencedTaskChecker task_checker_;
   // Owned by the task queue from where StartCheckForOveruse is called.
   CheckOveruseTask* check_overuse_task_;
diff --git a/webrtc/video/send_statistics_proxy.h b/webrtc/video/send_statistics_proxy.h
index 37db3b0..b3226b6 100644
--- a/webrtc/video/send_statistics_proxy.h
+++ b/webrtc/video/send_statistics_proxy.h
@@ -50,7 +50,7 @@
                       VideoEncoderConfig::ContentType content_type);
   virtual ~SendStatisticsProxy();
 
-  VideoSendStream::Stats GetStats();
+  virtual VideoSendStream::Stats GetStats();
 
   virtual void OnSendEncodedImage(const EncodedImage& encoded_image,
                                   const CodecSpecificInfo* codec_info);
diff --git a/webrtc/video/video_quality_test.h b/webrtc/video/video_quality_test.h
index fa76c91..33a1a8a 100644
--- a/webrtc/video/video_quality_test.h
+++ b/webrtc/video/video_quality_test.h
@@ -146,7 +146,7 @@
   int send_logs_;
 
   VideoSendStream::DegradationPreference degradation_preference_ =
-      VideoSendStream::DegradationPreference::kBalanced;
+      VideoSendStream::DegradationPreference::kMaintainFramerate;
   Params params_;
 };
 
diff --git a/webrtc/video/video_send_stream_tests.cc b/webrtc/video/video_send_stream_tests.cc
index 2cd542a..8fcc9ef 100644
--- a/webrtc/video/video_send_stream_tests.cc
+++ b/webrtc/video/video_send_stream_tests.cc
@@ -1940,7 +1940,7 @@
   video_send_stream_->Start();
   test::FrameForwarder forwarder;
   video_send_stream_->SetSource(
-      &forwarder, VideoSendStream::DegradationPreference::kBalanced);
+      &forwarder, VideoSendStream::DegradationPreference::kMaintainFramerate);
   for (size_t i = 0; i < input_frames.size(); i++) {
     forwarder.IncomingCapturedFrame(input_frames[i]);
     // Wait until the output frame is received before sending the next input
@@ -1949,7 +1949,7 @@
   }
   video_send_stream_->Stop();
   video_send_stream_->SetSource(
-      nullptr, VideoSendStream::DegradationPreference::kBalanced);
+      nullptr, VideoSendStream::DegradationPreference::kMaintainFramerate);
 
   // Test if the input and output frames are the same. render_time_ms and
   // timestamp are not compared because capturer sets those values.
@@ -3192,7 +3192,7 @@
   CreateVideoStreams();
   test::FrameForwarder forwarder;
   video_send_stream_->SetSource(
-      &forwarder, VideoSendStream::DegradationPreference::kBalanced);
+      &forwarder, VideoSendStream::DegradationPreference::kMaintainFramerate);
 
   EXPECT_TRUE(forwarder.sink_wants().rotation_applied !=
               support_orientation_ext);
diff --git a/webrtc/video/vie_encoder.cc b/webrtc/video/vie_encoder.cc
index 2cf08b9..5d2caa4 100644
--- a/webrtc/video/vie_encoder.cc
+++ b/webrtc/video/vie_encoder.cc
@@ -12,6 +12,7 @@
 
 #include <algorithm>
 #include <limits>
+#include <numeric>
 #include <utility>
 
 #include "webrtc/base/arraysize.h"
@@ -42,6 +43,7 @@
 // on MediaCodec and fallback implementations are in place.
 // See https://bugs.chromium.org/p/webrtc/issues/detail?id=7206
 const int kMinPixelsPerFrame = 320 * 180;
+const int kMinFramerateFps = 2;
 
 // The maximum number of frames to drop at beginning of stream
 // to try and achieve desired bitrate.
@@ -150,7 +152,7 @@
  public:
   explicit VideoSourceProxy(ViEEncoder* vie_encoder)
       : vie_encoder_(vie_encoder),
-        degradation_preference_(DegradationPreference::kMaintainResolution),
+        degradation_preference_(DegradationPreference::kDegradationDisabled),
         source_(nullptr) {}
 
   void SetSource(rtc::VideoSourceInterface<VideoFrame>* source,
@@ -161,10 +163,10 @@
     rtc::VideoSinkWants wants;
     {
       rtc::CritScope lock(&crit_);
+      degradation_preference_ = degradation_preference;
       old_source = source_;
       source_ = source;
-      degradation_preference_ = degradation_preference;
-      wants = current_wants();
+      wants = GetActiveSinkWants();
     }
 
     if (old_source != source && old_source != nullptr) {
@@ -181,10 +183,30 @@
   void SetWantsRotationApplied(bool rotation_applied) {
     rtc::CritScope lock(&crit_);
     sink_wants_.rotation_applied = rotation_applied;
-    disabled_scaling_sink_wants_.rotation_applied = rotation_applied;
-    if (source_) {
-      source_->AddOrUpdateSink(vie_encoder_, current_wants());
+    if (source_)
+      source_->AddOrUpdateSink(vie_encoder_, sink_wants_);
+  }
+
+  rtc::VideoSinkWants GetActiveSinkWants() EXCLUSIVE_LOCKS_REQUIRED(&crit_) {
+    rtc::VideoSinkWants wants = sink_wants_;
+    // Clear any constraints from the current sink wants that don't apply to
+    // the used degradation_preference.
+    switch (degradation_preference_) {
+      case DegradationPreference::kBalanced:
+        FALLTHROUGH();
+      case DegradationPreference::kMaintainFramerate:
+        wants.max_framerate_fps = std::numeric_limits<int>::max();
+        break;
+      case DegradationPreference::kMaintainResolution:
+        wants.max_pixel_count = std::numeric_limits<int>::max();
+        wants.target_pixel_count.reset();
+        break;
+      case DegradationPreference::kDegradationDisabled:
+        wants.max_pixel_count = std::numeric_limits<int>::max();
+        wants.target_pixel_count.reset();
+        wants.max_framerate_fps = std::numeric_limits<int>::max();
     }
+    return wants;
   }
 
   void RequestResolutionLowerThan(int pixel_count) {
@@ -202,10 +224,28 @@
     const int pixels_wanted = (pixel_count * 3) / 5;
     if (pixels_wanted < kMinPixelsPerFrame)
       return;
-    sink_wants_.max_pixel_count = rtc::Optional<int>(pixels_wanted);
+    sink_wants_.max_pixel_count = pixels_wanted;
     sink_wants_.target_pixel_count = rtc::Optional<int>();
     if (source_)
-      source_->AddOrUpdateSink(vie_encoder_, sink_wants_);
+      source_->AddOrUpdateSink(vie_encoder_, GetActiveSinkWants());
+  }
+
+  void RequestFramerateLowerThan(int framerate_fps) {
+    // Called on the encoder task queue.
+    rtc::CritScope lock(&crit_);
+    if (!IsFramerateScalingEnabledLocked()) {
+      // This can happen since |degradation_preference_| is set on
+      // libjingle's worker thread but the adaptation is done on the encoder
+      // task queue.
+      return;
+    }
+    // The input video frame rate will be scaled down to 2/3 of input fps,
+    // rounding down.
+    const int framerate_wanted =
+        std::max(kMinFramerateFps, (framerate_fps * 2) / 3);
+    sink_wants_.max_framerate_fps = framerate_wanted;
+    if (source_)
+      source_->AddOrUpdateSink(vie_encoder_, GetActiveSinkWants());
   }
 
   void RequestHigherResolutionThan(int pixel_count) {
@@ -216,16 +256,46 @@
       // task queue.
       return;
     }
-    // On step down we request at most 3/5 the pixel count of the previous
-    // resolution, so in order to take "one step up" we request a resolution as
-    // close as possible to 5/3 of the current resolution. The actual pixel
-    // count selected depends on the capabilities of the source. In order to not
-    // take a too large step up, we cap the requested pixel count to be at most
-    // four time the current number of pixels.
-    sink_wants_.target_pixel_count = rtc::Optional<int>((pixel_count * 5) / 3);
-    sink_wants_.max_pixel_count = rtc::Optional<int>(pixel_count * 4);
+
+    if (pixel_count == std::numeric_limits<int>::max()) {
+      // Remove any constraints.
+      sink_wants_.target_pixel_count.reset();
+      sink_wants_.max_pixel_count = std::numeric_limits<int>::max();
+    } else {
+      // On step down we request at most 3/5 the pixel count of the previous
+      // resolution, so in order to take "one step up" we request a resolution
+      // as close as possible to 5/3 of the current resolution. The actual pixel
+      // count selected depends on the capabilities of the source. In order to
+      // not take a too large step up, we cap the requested pixel count to be at
+      // most four time the current number of pixels.
+      sink_wants_.target_pixel_count =
+          rtc::Optional<int>((pixel_count * 5) / 3);
+      sink_wants_.max_pixel_count = pixel_count * 4;
+    }
     if (source_)
-      source_->AddOrUpdateSink(vie_encoder_, sink_wants_);
+      source_->AddOrUpdateSink(vie_encoder_, GetActiveSinkWants());
+  }
+
+  void RequestHigherFramerateThan(int framerate_fps) {
+    // Called on the encoder task queue.
+    rtc::CritScope lock(&crit_);
+    if (!IsFramerateScalingEnabledLocked()) {
+      // This can happen since |degradation_preference_| is set on
+      // libjingle's worker thread but the adaptation is done on the encoder
+      // task queue.
+      return;
+    }
+    if (framerate_fps == std::numeric_limits<int>::max()) {
+      // Remove any restrains.
+      sink_wants_.max_framerate_fps = std::numeric_limits<int>::max();
+    } else {
+      // The input video frame rate will be scaled up to the last step, with
+      // rounding.
+      const int framerate_wanted = (framerate_fps * 3) / 2;
+      sink_wants_.max_framerate_fps = framerate_wanted;
+    }
+    if (source_)
+      source_->AddOrUpdateSink(vie_encoder_, GetActiveSinkWants());
   }
 
  private:
@@ -235,17 +305,16 @@
            DegradationPreference::kMaintainResolution;
   }
 
-  const rtc::VideoSinkWants& current_wants() const
+  bool IsFramerateScalingEnabledLocked() const
       EXCLUSIVE_LOCKS_REQUIRED(&crit_) {
-    return IsResolutionScalingEnabledLocked() ? sink_wants_
-                                              : disabled_scaling_sink_wants_;
+    return degradation_preference_ ==
+           DegradationPreference::kMaintainResolution;
   }
 
   rtc::CriticalSection crit_;
   rtc::SequencedTaskChecker main_checker_;
   ViEEncoder* const vie_encoder_;
   rtc::VideoSinkWants sink_wants_ GUARDED_BY(&crit_);
-  rtc::VideoSinkWants disabled_scaling_sink_wants_ GUARDED_BY(&crit_);
   DegradationPreference degradation_preference_ GUARDED_BY(&crit_);
   rtc::VideoSourceInterface<VideoFrame>* source_ GUARDED_BY(&crit_);
 
@@ -284,8 +353,7 @@
       has_received_rpsi_(false),
       picture_id_rpsi_(0),
       clock_(Clock::GetRealTimeClock()),
-      scale_counter_(kScaleReasonSize, 0),
-      degradation_preference_(DegradationPreference::kMaintainResolution),
+      degradation_preference_(DegradationPreference::kDegradationDisabled),
       last_captured_timestamp_(0),
       delta_ntp_internal_ms_(clock_->CurrentNtpInMilliseconds() -
                              clock_->TimeInMilliseconds()),
@@ -356,7 +424,11 @@
   source_proxy_->SetSource(source, degradation_preference);
   encoder_queue_.PostTask([this, degradation_preference] {
     RTC_DCHECK_RUN_ON(&encoder_queue_);
-
+    if (degradation_preference_ != degradation_preference) {
+      // Reset adaptation state, so that we're not tricked into thinking there's
+      // an already pending request of the same type.
+      last_adaptation_request_.reset();
+    }
     degradation_preference_ = degradation_preference;
     initial_rampup_ =
         degradation_preference_ != DegradationPreference::kMaintainResolution
@@ -475,9 +547,10 @@
     quality_scaler_.reset(nullptr);
     initial_rampup_ = kMaxInitialFramedrop;
   }
+  const std::vector<int>& scale_counters = GetScaleCounters();
   stats_proxy_->SetResolutionRestrictionStats(
-      degradation_preference_allows_scaling, scale_counter_[kCpu] > 0,
-      scale_counter_[kQuality]);
+      degradation_preference_allows_scaling, scale_counters[kCpu] > 0,
+      scale_counters[kQuality]);
 }
 
 void ViEEncoder::OnFrame(const VideoFrame& video_frame) {
@@ -728,79 +801,191 @@
 
 void ViEEncoder::AdaptDown(AdaptReason reason) {
   RTC_DCHECK_RUN_ON(&encoder_queue_);
-  if (degradation_preference_ != DegradationPreference::kBalanced)
-    return;
-  RTC_DCHECK(static_cast<bool>(last_frame_info_));
-  int current_pixel_count = last_frame_info_->pixel_count();
-  if (last_adaptation_request_ &&
-      last_adaptation_request_->mode_ == AdaptationRequest::Mode::kAdaptDown &&
-      current_pixel_count >= last_adaptation_request_->input_pixel_count_) {
-    // Don't request lower resolution if the current resolution is not lower
-    // than the last time we asked for the resolution to be lowered.
-    return;
+  AdaptationRequest adaptation_request = {
+      last_frame_info_->pixel_count(),
+      stats_proxy_->GetStats().input_frame_rate,
+      AdaptationRequest::Mode::kAdaptDown};
+  bool downgrade_requested =
+      last_adaptation_request_ &&
+      last_adaptation_request_->mode_ == AdaptationRequest::Mode::kAdaptDown;
+
+  int max_downgrades = 0;
+  switch (degradation_preference_) {
+    case DegradationPreference::kBalanced:
+      FALLTHROUGH();
+    case DegradationPreference::kMaintainFramerate:
+      max_downgrades = kMaxCpuResolutionDowngrades;
+      if (downgrade_requested &&
+          adaptation_request.input_pixel_count_ >=
+              last_adaptation_request_->input_pixel_count_) {
+        // Don't request lower resolution if the current resolution is not
+        // lower than the last time we asked for the resolution to be lowered.
+        return;
+      }
+      break;
+    case DegradationPreference::kMaintainResolution:
+      max_downgrades = kMaxCpuFramerateDowngrades;
+      if (adaptation_request.framerate_fps_ <= 0 ||
+          (downgrade_requested &&
+           adaptation_request.framerate_fps_ < kMinFramerateFps)) {
+        // If no input fps estimate available, can't determine how to scale down
+        // framerate. Otherwise, don't request lower framerate if we don't have
+        // a valid frame rate. Since framerate, unlike resolution, is a measure
+        // we have to estimate, and can fluctuate naturally over time, don't
+        // make the same kind of limitations as for resolution, but trust the
+        // overuse detector to not trigger too often.
+        return;
+      }
+      break;
+    case DegradationPreference::kDegradationDisabled:
+      return;
   }
-  last_adaptation_request_.emplace(AdaptationRequest{
-      current_pixel_count, AdaptationRequest::Mode::kAdaptDown});
+
+  last_adaptation_request_.emplace(adaptation_request);
+  const std::vector<int>& scale_counter = GetScaleCounters();
 
   switch (reason) {
     case kQuality:
-      stats_proxy_->OnQualityRestrictedResolutionChanged(
-          scale_counter_[reason] + 1);
+      stats_proxy_->OnQualityRestrictedResolutionChanged(scale_counter[reason] +
+                                                         1);
       break;
     case kCpu:
-      if (scale_counter_[reason] >= kMaxCpuDowngrades)
+      if (scale_counter[reason] >= max_downgrades)
         return;
       // Update stats accordingly.
       stats_proxy_->OnCpuRestrictedResolutionChanged(true);
       break;
   }
-  ++scale_counter_[reason];
-  source_proxy_->RequestResolutionLowerThan(current_pixel_count);
-  LOG(LS_INFO) << "Scaling down resolution.";
+
+  IncrementScaleCounter(reason, 1);
+
+  switch (degradation_preference_) {
+    case DegradationPreference::kBalanced:
+      FALLTHROUGH();
+    case DegradationPreference::kMaintainFramerate:
+      source_proxy_->RequestResolutionLowerThan(
+          adaptation_request.input_pixel_count_);
+      LOG(LS_INFO) << "Scaling down resolution.";
+      break;
+    case DegradationPreference::kMaintainResolution:
+      source_proxy_->RequestFramerateLowerThan(
+          adaptation_request.framerate_fps_);
+      LOG(LS_INFO) << "Scaling down framerate.";
+      break;
+    case DegradationPreference::kDegradationDisabled:
+      RTC_NOTREACHED();
+  }
+
   for (size_t i = 0; i < kScaleReasonSize; ++i) {
-    LOG(LS_INFO) << "Scaled " << scale_counter_[i]
+    LOG(LS_INFO) << "Scaled " << GetScaleCounters()[i]
                  << " times for reason: " << (i ? "cpu" : "quality");
   }
 }
 
 void ViEEncoder::AdaptUp(AdaptReason reason) {
   RTC_DCHECK_RUN_ON(&encoder_queue_);
-  if (scale_counter_[reason] == 0 ||
-      degradation_preference_ != DegradationPreference::kBalanced) {
+  int scale_counter = GetScaleCounters()[reason];
+  if (scale_counter == 0)
     return;
+  RTC_DCHECK_GT(scale_counter, 0);
+  AdaptationRequest adaptation_request = {
+      last_frame_info_->pixel_count(),
+      stats_proxy_->GetStats().input_frame_rate,
+      AdaptationRequest::Mode::kAdaptUp};
+
+  bool adapt_up_requested =
+      last_adaptation_request_ &&
+      last_adaptation_request_->mode_ == AdaptationRequest::Mode::kAdaptUp;
+  switch (degradation_preference_) {
+    case DegradationPreference::kBalanced:
+      FALLTHROUGH();
+    case DegradationPreference::kMaintainFramerate:
+      if (adapt_up_requested &&
+          adaptation_request.input_pixel_count_ <=
+              last_adaptation_request_->input_pixel_count_) {
+        // Don't request higher resolution if the current resolution is not
+        // higher than the last time we asked for the resolution to be higher.
+        return;
+      }
+      break;
+    case DegradationPreference::kMaintainResolution:
+      // TODO(sprang): Don't request higher framerate if we are already at
+      // max requested fps?
+      break;
+    case DegradationPreference::kDegradationDisabled:
+      return;
   }
-  // Only scale if resolution is higher than last time we requested higher
-  // resolution.
-  RTC_DCHECK(static_cast<bool>(last_frame_info_));
-  int current_pixel_count = last_frame_info_->pixel_count();
-  if (last_adaptation_request_ &&
-      last_adaptation_request_->mode_ == AdaptationRequest::Mode::kAdaptUp &&
-      current_pixel_count <= last_adaptation_request_->input_pixel_count_) {
-    // Don't request higher resolution if the current resolution is not higher
-    // than the last time we asked for the resolution to be higher.
-    return;
-  }
-  last_adaptation_request_.emplace(AdaptationRequest{
-      current_pixel_count, AdaptationRequest::Mode::kAdaptUp});
 
   switch (reason) {
     case kQuality:
-      stats_proxy_->OnQualityRestrictedResolutionChanged(
-          scale_counter_[reason] - 1);
+      stats_proxy_->OnQualityRestrictedResolutionChanged(scale_counter - 1);
       break;
     case kCpu:
       // Update stats accordingly.
-      stats_proxy_->OnCpuRestrictedResolutionChanged(scale_counter_[reason] >
-                                                     1);
+      stats_proxy_->OnCpuRestrictedResolutionChanged(scale_counter > 1);
       break;
   }
-  --scale_counter_[reason];
-  source_proxy_->RequestHigherResolutionThan(current_pixel_count);
-  LOG(LS_INFO) << "Scaling up resolution.";
+
+  // Decrease counter of how many times we have scaled down, for this
+  // degradation preference mode and reason.
+  IncrementScaleCounter(reason, -1);
+
+  // Get a sum of how many times have scaled down, in total, for this
+  // degradation preference mode. If it is 0, remove any restraints.
+  const std::vector<int>& current_scale_counters = GetScaleCounters();
+  const int scale_sum = std::accumulate(current_scale_counters.begin(),
+                                        current_scale_counters.end(), 0);
+  switch (degradation_preference_) {
+    case DegradationPreference::kBalanced:
+      FALLTHROUGH();
+    case DegradationPreference::kMaintainFramerate:
+      if (scale_sum == 0) {
+        LOG(LS_INFO) << "Removing resolution down-scaling setting.";
+        source_proxy_->RequestHigherResolutionThan(
+            std::numeric_limits<int>::max());
+      } else {
+        source_proxy_->RequestHigherResolutionThan(
+            adaptation_request.input_pixel_count_);
+        LOG(LS_INFO) << "Scaling up resolution.";
+      }
+      break;
+    case DegradationPreference::kMaintainResolution:
+      if (scale_sum == 0) {
+        LOG(LS_INFO) << "Removing framerate down-scaling setting.";
+        source_proxy_->RequestHigherFramerateThan(
+            std::numeric_limits<int>::max());
+      } else {
+        source_proxy_->RequestHigherFramerateThan(
+            adaptation_request.framerate_fps_);
+        LOG(LS_INFO) << "Scaling up framerate.";
+      }
+      break;
+    case DegradationPreference::kDegradationDisabled:
+      RTC_NOTREACHED();
+  }
+
   for (size_t i = 0; i < kScaleReasonSize; ++i) {
-    LOG(LS_INFO) << "Scaled " << scale_counter_[i]
+    LOG(LS_INFO) << "Scaled " << current_scale_counters[i]
                  << " times for reason: " << (i ? "cpu" : "quality");
   }
 }
 
+const std::vector<int>& ViEEncoder::GetScaleCounters() {
+  auto it = scale_counters_.find(degradation_preference_);
+  if (it == scale_counters_.end()) {
+    scale_counters_[degradation_preference_].resize(kScaleReasonSize);
+    return scale_counters_[degradation_preference_];
+  }
+  return it->second;
+}
+
+void ViEEncoder::IncrementScaleCounter(int reason, int delta) {
+  // Get the counters and validate. This may also lazily initialize the state.
+  const std::vector<int>& counter = GetScaleCounters();
+  if (delta < 0) {
+    RTC_DCHECK_GE(counter[reason], delta);
+  }
+  scale_counters_[degradation_preference_][reason] += delta;
+}
+
 }  // namespace webrtc
diff --git a/webrtc/video/vie_encoder.h b/webrtc/video/vie_encoder.h
index c0b80ba..d0325fe 100644
--- a/webrtc/video/vie_encoder.h
+++ b/webrtc/video/vie_encoder.h
@@ -11,6 +11,7 @@
 #ifndef WEBRTC_VIDEO_VIE_ENCODER_H_
 #define WEBRTC_VIDEO_VIE_ENCODER_H_
 
+#include <map>
 #include <memory>
 #include <string>
 #include <vector>
@@ -62,7 +63,9 @@
   };
 
   // Downscale resolution at most 2 times for CPU reasons.
-  static const int kMaxCpuDowngrades = 2;
+  static const int kMaxCpuResolutionDowngrades = 2;
+  // Downscale framerate at most 4 times.
+  static const int kMaxCpuFramerateDowngrades = 4;
 
   ViEEncoder(uint32_t number_of_cores,
              SendStatisticsProxy* stats_proxy,
@@ -174,6 +177,11 @@
   void TraceFrameDropStart();
   void TraceFrameDropEnd();
 
+  const std::vector<int>& GetScaleCounters()
+      EXCLUSIVE_LOCKS_REQUIRED(&encoder_queue_);
+  void IncrementScaleCounter(int reason, int delta)
+      EXCLUSIVE_LOCKS_REQUIRED(&encoder_queue_);
+
   rtc::Event shutdown_event_;
 
   const uint32_t number_of_cores_;
@@ -216,8 +224,11 @@
   uint64_t picture_id_rpsi_ ACCESS_ON(&encoder_queue_);
   Clock* const clock_;
   // Counters used for deciding if the video resolution is currently
-  // restricted, and if so, why.
-  std::vector<int> scale_counter_ ACCESS_ON(&encoder_queue_);
+  // restricted, and if so, why, on a per degradation preference basis.
+  // TODO(sprang): Replace this with a state holding a relative overuse measure
+  // instead, that can be translated into suitable down-scale or fps limit.
+  std::map<const VideoSendStream::DegradationPreference, std::vector<int>>
+      scale_counters_ ACCESS_ON(&encoder_queue_);
   // Set depending on degradation preferences
   VideoSendStream::DegradationPreference degradation_preference_
       ACCESS_ON(&encoder_queue_);
@@ -225,6 +236,8 @@
   struct AdaptationRequest {
     // The pixel count produced by the source at the time of the adaptation.
     int input_pixel_count_;
+    // Framerate received from the source at the time of the adaptation.
+    int framerate_fps_;
     // Indicates if request was to adapt up or down.
     enum class Mode { kAdaptUp, kAdaptDown } mode_;
   };
diff --git a/webrtc/video/vie_encoder_unittest.cc b/webrtc/video/vie_encoder_unittest.cc
index eedf771..d7bddeb 100644
--- a/webrtc/video/vie_encoder_unittest.cc
+++ b/webrtc/video/vie_encoder_unittest.cc
@@ -12,6 +12,7 @@
 #include <utility>
 
 #include "webrtc/api/video/i420_buffer.h"
+#include "webrtc/base/fakeclock.h"
 #include "webrtc/base/logging.h"
 #include "webrtc/media/base/videoadapter.h"
 #include "webrtc/modules/video_coding/utility/default_video_bitrate_allocator.h"
@@ -33,7 +34,9 @@
 #else
 const int kMinPixelsPerFrame = 120 * 90;
 #endif
-}
+const int kMinFramerateFps = 2;
+const int64_t kFrameTimeoutMs = 100;
+}  // namespace
 
 namespace webrtc {
 
@@ -143,16 +146,17 @@
     int cropped_height = 0;
     int out_width = 0;
     int out_height = 0;
-    if (adaption_enabled() &&
-        adapter_.AdaptFrameResolution(video_frame.width(), video_frame.height(),
-                                      video_frame.timestamp_us() * 1000,
-                                      &cropped_width, &cropped_height,
-                                      &out_width, &out_height)) {
-      VideoFrame adapted_frame(
-          new rtc::RefCountedObject<TestBuffer>(nullptr, out_width, out_height),
-          99, 99, kVideoRotation_0);
-      adapted_frame.set_ntp_time_ms(video_frame.ntp_time_ms());
-      test::FrameForwarder::IncomingCapturedFrame(adapted_frame);
+    if (adaption_enabled()) {
+      if (adapter_.AdaptFrameResolution(
+              video_frame.width(), video_frame.height(),
+              video_frame.timestamp_us() * 1000, &cropped_width,
+              &cropped_height, &out_width, &out_height)) {
+        VideoFrame adapted_frame(new rtc::RefCountedObject<TestBuffer>(
+                                     nullptr, out_width, out_height),
+                                 99, 99, kVideoRotation_0);
+        adapted_frame.set_ntp_time_ms(video_frame.ntp_time_ms());
+        test::FrameForwarder::IncomingCapturedFrame(adapted_frame);
+      }
     } else {
       test::FrameForwarder::IncomingCapturedFrame(video_frame);
     }
@@ -161,14 +165,45 @@
   void AddOrUpdateSink(rtc::VideoSinkInterface<VideoFrame>* sink,
                        const rtc::VideoSinkWants& wants) override {
     rtc::CritScope cs(&crit_);
-    adapter_.OnResolutionRequest(wants.target_pixel_count,
-                                 wants.max_pixel_count);
+    adapter_.OnResolutionFramerateRequest(wants.target_pixel_count,
+                                          wants.max_pixel_count,
+                                          wants.max_framerate_fps);
     test::FrameForwarder::AddOrUpdateSink(sink, wants);
   }
 
   cricket::VideoAdapter adapter_;
   bool adaptation_enabled_ GUARDED_BY(crit_);
 };
+
+class MockableSendStatisticsProxy : public SendStatisticsProxy {
+ public:
+  MockableSendStatisticsProxy(Clock* clock,
+                              const VideoSendStream::Config& config,
+                              VideoEncoderConfig::ContentType content_type)
+      : SendStatisticsProxy(clock, config, content_type) {}
+
+  VideoSendStream::Stats GetStats() override {
+    rtc::CritScope cs(&lock_);
+    if (mock_stats_)
+      return *mock_stats_;
+    return SendStatisticsProxy::GetStats();
+  }
+
+  void SetMockStats(const VideoSendStream::Stats& stats) {
+    rtc::CritScope cs(&lock_);
+    mock_stats_.emplace(stats);
+  }
+
+  void ResetMockStats() {
+    rtc::CritScope cs(&lock_);
+    mock_stats_.reset();
+  }
+
+ private:
+  rtc::CriticalSection lock_;
+  rtc::Optional<VideoSendStream::Stats> mock_stats_ GUARDED_BY(lock_);
+};
+
 }  // namespace
 
 class ViEEncoderTest : public ::testing::Test {
@@ -180,7 +215,7 @@
         codec_width_(320),
         codec_height_(240),
         fake_encoder_(),
-        stats_proxy_(new SendStatisticsProxy(
+        stats_proxy_(new MockableSendStatisticsProxy(
             Clock::GetRealTimeClock(),
             video_send_config_,
             webrtc::VideoEncoderConfig::ContentType::kRealtimeVideo)),
@@ -206,8 +241,9 @@
     vie_encoder_.reset(new ViEEncoderUnderTest(
         stats_proxy_.get(), video_send_config_.encoder_settings));
     vie_encoder_->SetSink(&sink_, false /* rotation_applied */);
-    vie_encoder_->SetSource(&video_source_,
-                            VideoSendStream::DegradationPreference::kBalanced);
+    vie_encoder_->SetSource(
+        &video_source_,
+        VideoSendStream::DegradationPreference::kMaintainFramerate);
     vie_encoder_->SetStartBitrate(kTargetBitrateBps);
     vie_encoder_->ConfigureEncoder(std::move(video_encoder_config),
                                    kMaxPayloadLength, nack_enabled);
@@ -242,6 +278,7 @@
         new rtc::RefCountedObject<TestBuffer>(nullptr, width, height), 99, 99,
         kVideoRotation_0);
     frame.set_ntp_time_ms(ntp_time_ms);
+    frame.set_timestamp_us(ntp_time_ms * 1000);
     return frame;
   }
 
@@ -334,9 +371,14 @@
 
     void WaitForEncodedFrame(uint32_t expected_width,
                              uint32_t expected_height) {
+      EXPECT_TRUE(encoded_frame_event_.Wait(kDefaultTimeoutMs));
+      CheckLastFrameSizeMathces(expected_width, expected_height);
+    }
+
+    void CheckLastFrameSizeMathces(uint32_t expected_width,
+                                   uint32_t expected_height) {
       uint32_t width = 0;
       uint32_t height = 0;
-      EXPECT_TRUE(encoded_frame_event_.Wait(kDefaultTimeoutMs));
       {
         rtc::CritScope lock(&crit_);
         width = last_width_;
@@ -348,6 +390,10 @@
 
     void ExpectDroppedFrame() { EXPECT_FALSE(encoded_frame_event_.Wait(100)); }
 
+    bool WaitForFrame(int64_t timeout_ms) {
+      return encoded_frame_event_.Wait(timeout_ms);
+    }
+
     void SetExpectNoFrames() {
       rtc::CritScope lock(&crit_);
       expect_frames_ = false;
@@ -400,7 +446,7 @@
   int codec_width_;
   int codec_height_;
   TestEncoder fake_encoder_;
-  std::unique_ptr<SendStatisticsProxy> stats_proxy_;
+  std::unique_ptr<MockableSendStatisticsProxy> stats_proxy_;
   TestSink sink_;
   AdaptingFrameForwarder video_source_;
   std::unique_ptr<ViEEncoderUnderTest> vie_encoder_;
@@ -618,8 +664,9 @@
 TEST_F(ViEEncoderTest, SwitchSourceDeregisterEncoderAsSink) {
   EXPECT_TRUE(video_source_.has_sinks());
   test::FrameForwarder new_video_source;
-  vie_encoder_->SetSource(&new_video_source,
-                          VideoSendStream::DegradationPreference::kBalanced);
+  vie_encoder_->SetSource(
+      &new_video_source,
+      VideoSendStream::DegradationPreference::kMaintainFramerate);
   EXPECT_FALSE(video_source_.has_sinks());
   EXPECT_TRUE(new_video_source.has_sinks());
 
@@ -637,14 +684,15 @@
   vie_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
 
   EXPECT_FALSE(video_source_.sink_wants().target_pixel_count);
-  EXPECT_FALSE(video_source_.sink_wants().max_pixel_count);
+  EXPECT_EQ(std::numeric_limits<int>::max(),
+            video_source_.sink_wants().max_pixel_count);
 
   int frame_width = 1280;
   int frame_height = 720;
 
   // Trigger CPU overuse kMaxCpuDowngrades times. Every time, ViEEncoder should
   // request lower resolution.
-  for (int i = 1; i <= ViEEncoder::kMaxCpuDowngrades; ++i) {
+  for (int i = 1; i <= ViEEncoder::kMaxCpuResolutionDowngrades; ++i) {
     video_source_.IncomingCapturedFrame(
         CreateFrame(i, frame_width, frame_height));
     sink_.WaitForEncodedFrame(i);
@@ -652,8 +700,7 @@
     vie_encoder_->TriggerCpuOveruse();
 
     EXPECT_FALSE(video_source_.sink_wants().target_pixel_count);
-    EXPECT_LT(video_source_.sink_wants().max_pixel_count.value_or(
-                  std::numeric_limits<int>::max()),
+    EXPECT_LT(video_source_.sink_wants().max_pixel_count,
               frame_width * frame_height);
 
     frame_width /= 2;
@@ -664,8 +711,8 @@
   // lower resolution.
   rtc::VideoSinkWants current_wants = video_source_.sink_wants();
   video_source_.IncomingCapturedFrame(CreateFrame(
-      ViEEncoder::kMaxCpuDowngrades + 1, frame_width, frame_height));
-  sink_.WaitForEncodedFrame(ViEEncoder::kMaxCpuDowngrades + 1);
+      ViEEncoder::kMaxCpuResolutionDowngrades + 1, frame_width, frame_height));
+  sink_.WaitForEncodedFrame(ViEEncoder::kMaxCpuResolutionDowngrades + 1);
   vie_encoder_->TriggerCpuOveruse();
   EXPECT_EQ(video_source_.sink_wants().target_pixel_count,
             current_wants.target_pixel_count);
@@ -677,57 +724,120 @@
   EXPECT_EQ(frame_width * frame_height * 5 / 3,
             video_source_.sink_wants().target_pixel_count.value_or(0));
   EXPECT_EQ(frame_width * frame_height * 4,
-            video_source_.sink_wants().max_pixel_count.value_or(0));
+            video_source_.sink_wants().max_pixel_count);
 
   vie_encoder_->Stop();
 }
 
-TEST_F(ViEEncoderTest,
-       ResolutionSinkWantsResetOnSetSourceWithDisabledResolutionScaling) {
+TEST_F(ViEEncoderTest, SinkWantsStoredByDegradationPreference) {
   vie_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
 
   EXPECT_FALSE(video_source_.sink_wants().target_pixel_count);
-  EXPECT_FALSE(video_source_.sink_wants().max_pixel_count);
+  EXPECT_EQ(std::numeric_limits<int>::max(),
+            video_source_.sink_wants().max_pixel_count);
+  EXPECT_EQ(std::numeric_limits<int>::max(),
+            video_source_.sink_wants().max_framerate_fps);
 
-  int frame_width = 1280;
-  int frame_height = 720;
+  const int kFrameWidth = 1280;
+  const int kFrameHeight = 720;
+  const int kFrameIntervalMs = 1000 / 30;
+
+  int frame_timestamp = 1;
 
   video_source_.IncomingCapturedFrame(
-      CreateFrame(1, frame_width, frame_height));
-  sink_.WaitForEncodedFrame(1);
+      CreateFrame(frame_timestamp, kFrameWidth, kFrameHeight));
+  sink_.WaitForEncodedFrame(frame_timestamp);
+  frame_timestamp += kFrameIntervalMs;
+
   // Trigger CPU overuse.
   vie_encoder_->TriggerCpuOveruse();
-
   video_source_.IncomingCapturedFrame(
-      CreateFrame(2, frame_width, frame_height));
-  sink_.WaitForEncodedFrame(2);
-  EXPECT_FALSE(video_source_.sink_wants().target_pixel_count);
-  EXPECT_LT(video_source_.sink_wants().max_pixel_count.value_or(
-                std::numeric_limits<int>::max()),
-            frame_width * frame_height);
+      CreateFrame(frame_timestamp, kFrameWidth, kFrameHeight));
+  sink_.WaitForEncodedFrame(frame_timestamp);
+  frame_timestamp += kFrameIntervalMs;
 
-  // Set new source.
+  // Default degradation preference in maintain-framerate, so will lower max
+  // wanted resolution.
+  EXPECT_FALSE(video_source_.sink_wants().target_pixel_count);
+  EXPECT_LT(video_source_.sink_wants().max_pixel_count,
+            kFrameWidth * kFrameHeight);
+  EXPECT_EQ(std::numeric_limits<int>::max(),
+            video_source_.sink_wants().max_framerate_fps);
+
+  // Set new source, switch to maintain-resolution.
   test::FrameForwarder new_video_source;
   vie_encoder_->SetSource(
       &new_video_source,
       VideoSendStream::DegradationPreference::kMaintainResolution);
 
+  // Initially no degradation registered.
   EXPECT_FALSE(new_video_source.sink_wants().target_pixel_count);
-  EXPECT_FALSE(new_video_source.sink_wants().max_pixel_count);
+  EXPECT_EQ(std::numeric_limits<int>::max(),
+            new_video_source.sink_wants().max_pixel_count);
+  EXPECT_EQ(std::numeric_limits<int>::max(),
+            new_video_source.sink_wants().max_framerate_fps);
 
+  // Force an input frame rate to be available, or the adaptation call won't
+  // know what framerate to adapt form.
+  VideoSendStream::Stats stats = stats_proxy_->GetStats();
+  stats.input_frame_rate = 30;
+  stats_proxy_->SetMockStats(stats);
+
+  vie_encoder_->TriggerCpuOveruse();
   new_video_source.IncomingCapturedFrame(
-      CreateFrame(3, frame_width, frame_height));
-  sink_.WaitForEncodedFrame(3);
+      CreateFrame(frame_timestamp, kFrameWidth, kFrameHeight));
+  sink_.WaitForEncodedFrame(frame_timestamp);
+  frame_timestamp += kFrameIntervalMs;
+
+  // Some framerate constraint should be set.
   EXPECT_FALSE(new_video_source.sink_wants().target_pixel_count);
-  EXPECT_FALSE(new_video_source.sink_wants().max_pixel_count);
+  EXPECT_EQ(std::numeric_limits<int>::max(),
+            new_video_source.sink_wants().max_pixel_count);
+  EXPECT_TRUE(new_video_source.sink_wants().max_framerate_fps);
+
+  // Turn of degradation completely.
+  vie_encoder_->SetSource(
+      &new_video_source,
+      VideoSendStream::DegradationPreference::kDegradationDisabled);
+
+  // Initially no degradation registered.
+  EXPECT_FALSE(new_video_source.sink_wants().target_pixel_count);
+  EXPECT_EQ(std::numeric_limits<int>::max(),
+            new_video_source.sink_wants().max_pixel_count);
+  EXPECT_EQ(std::numeric_limits<int>::max(),
+            new_video_source.sink_wants().max_framerate_fps);
+
+  vie_encoder_->TriggerCpuOveruse();
+  new_video_source.IncomingCapturedFrame(
+      CreateFrame(frame_timestamp, kFrameWidth, kFrameHeight));
+  sink_.WaitForEncodedFrame(frame_timestamp);
+  frame_timestamp += kFrameIntervalMs;
+
+  // Still no degradation.
+  EXPECT_FALSE(new_video_source.sink_wants().target_pixel_count);
+  EXPECT_EQ(std::numeric_limits<int>::max(),
+            new_video_source.sink_wants().max_pixel_count);
+  EXPECT_EQ(std::numeric_limits<int>::max(),
+            new_video_source.sink_wants().max_framerate_fps);
 
   // Calling SetSource with resolution scaling enabled apply the old SinkWants.
-  vie_encoder_->SetSource(&new_video_source,
-                          VideoSendStream::DegradationPreference::kBalanced);
-  EXPECT_LT(new_video_source.sink_wants().max_pixel_count.value_or(
-                std::numeric_limits<int>::max()),
-            frame_width * frame_height);
+  vie_encoder_->SetSource(
+      &new_video_source,
+      VideoSendStream::DegradationPreference::kMaintainFramerate);
+  EXPECT_LT(new_video_source.sink_wants().max_pixel_count,
+            kFrameWidth * kFrameHeight);
   EXPECT_FALSE(new_video_source.sink_wants().target_pixel_count);
+  EXPECT_EQ(std::numeric_limits<int>::max(),
+            new_video_source.sink_wants().max_framerate_fps);
+
+  // Calling SetSource with framerate scaling enabled apply the old SinkWants.
+  vie_encoder_->SetSource(
+      &new_video_source,
+      VideoSendStream::DegradationPreference::kMaintainResolution);
+  EXPECT_FALSE(new_video_source.sink_wants().target_pixel_count);
+  EXPECT_EQ(std::numeric_limits<int>::max(),
+            new_video_source.sink_wants().max_pixel_count);
+  EXPECT_TRUE(new_video_source.sink_wants().max_framerate_fps);
 
   vie_encoder_->Stop();
 }
@@ -792,8 +902,9 @@
 
   // Set new source with adaptation still enabled.
   test::FrameForwarder new_video_source;
-  vie_encoder_->SetSource(&new_video_source,
-                          VideoSendStream::DegradationPreference::kBalanced);
+  vie_encoder_->SetSource(
+      &new_video_source,
+      VideoSendStream::DegradationPreference::kMaintainFramerate);
 
   new_video_source.IncomingCapturedFrame(
       CreateFrame(3, frame_width, frame_height));
@@ -805,7 +916,7 @@
   // Set adaptation disabled.
   vie_encoder_->SetSource(
       &new_video_source,
-      VideoSendStream::DegradationPreference::kMaintainResolution);
+      VideoSendStream::DegradationPreference::kDegradationDisabled);
 
   new_video_source.IncomingCapturedFrame(
       CreateFrame(4, frame_width, frame_height));
@@ -815,8 +926,9 @@
   EXPECT_EQ(1, stats.number_of_cpu_adapt_changes);
 
   // Set adaptation back to enabled.
-  vie_encoder_->SetSource(&new_video_source,
-                          VideoSendStream::DegradationPreference::kBalanced);
+  vie_encoder_->SetSource(
+      &new_video_source,
+      VideoSendStream::DegradationPreference::kMaintainFramerate);
 
   new_video_source.IncomingCapturedFrame(
       CreateFrame(5, frame_width, frame_height));
@@ -925,8 +1037,9 @@
 
   // Set new source with adaptation still enabled.
   test::FrameForwarder new_video_source;
-  vie_encoder_->SetSource(&new_video_source,
-                          VideoSendStream::DegradationPreference::kBalanced);
+  vie_encoder_->SetSource(
+      &new_video_source,
+      VideoSendStream::DegradationPreference::kMaintainFramerate);
 
   new_video_source.IncomingCapturedFrame(
       CreateFrame(sequence, frame_width, frame_height));
@@ -935,7 +1048,7 @@
   EXPECT_TRUE(stats.cpu_limited_resolution);
   EXPECT_EQ(1, stats.number_of_cpu_adapt_changes);
 
-  // Set adaptation disabled.
+  // Set cpu adaptation by frame dropping.
   vie_encoder_->SetSource(
       &new_video_source,
       VideoSendStream::DegradationPreference::kMaintainResolution);
@@ -943,18 +1056,58 @@
       CreateFrame(sequence, frame_width, frame_height));
   sink_.WaitForEncodedFrame(sequence++);
   stats = stats_proxy_->GetStats();
+  // Not adapted at first.
   EXPECT_FALSE(stats.cpu_limited_resolution);
   EXPECT_EQ(1, stats.number_of_cpu_adapt_changes);
 
-  // Switch back the source with adaptation enabled.
-  vie_encoder_->SetSource(&video_source_,
-                          VideoSendStream::DegradationPreference::kBalanced);
+  // Force an input frame rate to be available, or the adaptation call won't
+  // know what framerate to adapt form.
+  VideoSendStream::Stats mock_stats = stats_proxy_->GetStats();
+  mock_stats.input_frame_rate = 30;
+  stats_proxy_->SetMockStats(mock_stats);
+  vie_encoder_->TriggerCpuOveruse();
+  stats_proxy_->ResetMockStats();
+
+  new_video_source.IncomingCapturedFrame(
+      CreateFrame(sequence, frame_width, frame_height));
+  sink_.WaitForEncodedFrame(sequence++);
+
+  // Framerate now adapted.
+  stats = stats_proxy_->GetStats();
+  EXPECT_TRUE(stats.cpu_limited_resolution);
+  EXPECT_EQ(2, stats.number_of_cpu_adapt_changes);
+
+  // Disable CPU adaptation.
+  vie_encoder_->SetSource(
+      &new_video_source,
+      VideoSendStream::DegradationPreference::kDegradationDisabled);
+  new_video_source.IncomingCapturedFrame(
+      CreateFrame(sequence, frame_width, frame_height));
+  sink_.WaitForEncodedFrame(sequence++);
+
+  stats = stats_proxy_->GetStats();
+  EXPECT_FALSE(stats.cpu_limited_resolution);
+  EXPECT_EQ(2, stats.number_of_cpu_adapt_changes);
+
+  // Try to trigger overuse. Should not succeed.
+  stats_proxy_->SetMockStats(mock_stats);
+  vie_encoder_->TriggerCpuOveruse();
+  stats_proxy_->ResetMockStats();
+
+  stats = stats_proxy_->GetStats();
+  EXPECT_FALSE(stats.cpu_limited_resolution);
+  EXPECT_EQ(2, stats.number_of_cpu_adapt_changes);
+
+  // Switch back the source with resolution adaptation enabled.
+  vie_encoder_->SetSource(
+      &video_source_,
+      VideoSendStream::DegradationPreference::kMaintainFramerate);
   video_source_.IncomingCapturedFrame(
       CreateFrame(sequence, frame_width, frame_height));
   sink_.WaitForEncodedFrame(sequence++);
   stats = stats_proxy_->GetStats();
   EXPECT_TRUE(stats.cpu_limited_resolution);
-  EXPECT_EQ(1, stats.number_of_cpu_adapt_changes);
+  EXPECT_EQ(2, stats.number_of_cpu_adapt_changes);
 
   // Trigger CPU normal usage.
   vie_encoder_->TriggerCpuNormalUsage();
@@ -963,7 +1116,28 @@
   sink_.WaitForEncodedFrame(sequence++);
   stats = stats_proxy_->GetStats();
   EXPECT_FALSE(stats.cpu_limited_resolution);
-  EXPECT_EQ(2, stats.number_of_cpu_adapt_changes);
+  EXPECT_EQ(3, stats.number_of_cpu_adapt_changes);
+
+  // Back to the source with adaptation off, set it back to maintain-resolution.
+  vie_encoder_->SetSource(
+      &new_video_source,
+      VideoSendStream::DegradationPreference::kMaintainResolution);
+  new_video_source.IncomingCapturedFrame(
+      CreateFrame(sequence, frame_width, frame_height));
+  sink_.WaitForEncodedFrame(sequence++);
+  stats = stats_proxy_->GetStats();
+  // Disabled, since we previously switched the source too disabled.
+  EXPECT_FALSE(stats.cpu_limited_resolution);
+  EXPECT_EQ(3, stats.number_of_cpu_adapt_changes);
+
+  // Trigger CPU normal usage.
+  vie_encoder_->TriggerCpuNormalUsage();
+  new_video_source.IncomingCapturedFrame(
+      CreateFrame(sequence, frame_width, frame_height));
+  sink_.WaitForEncodedFrame(sequence++);
+  stats = stats_proxy_->GetStats();
+  EXPECT_FALSE(stats.cpu_limited_resolution);
+  EXPECT_EQ(4, stats.number_of_cpu_adapt_changes);
 
   vie_encoder_->Stop();
 }
@@ -988,7 +1162,8 @@
 
   // Expect no scaling to begin with
   EXPECT_FALSE(video_source_.sink_wants().target_pixel_count);
-  EXPECT_FALSE(video_source_.sink_wants().max_pixel_count);
+  EXPECT_EQ(std::numeric_limits<int>::max(),
+            video_source_.sink_wants().max_pixel_count);
 
   video_source_.IncomingCapturedFrame(
       CreateFrame(1, frame_width, frame_height));
@@ -1003,7 +1178,7 @@
 
   // Expect a scale down.
   EXPECT_TRUE(video_source_.sink_wants().max_pixel_count);
-  EXPECT_LT(*video_source_.sink_wants().max_pixel_count,
+  EXPECT_LT(video_source_.sink_wants().max_pixel_count,
             frame_width * frame_height);
 
   // Set adaptation disabled.
@@ -1019,7 +1194,8 @@
   sink_.WaitForEncodedFrame(3);
 
   // Expect no scaling
-  EXPECT_FALSE(new_video_source.sink_wants().max_pixel_count);
+  EXPECT_EQ(std::numeric_limits<int>::max(),
+            new_video_source.sink_wants().max_pixel_count);
 
   // Trigger scale up
   vie_encoder_->TriggerQualityHigh();
@@ -1028,7 +1204,8 @@
   sink_.WaitForEncodedFrame(4);
 
   // Expect nothing to change, still no scaling
-  EXPECT_FALSE(new_video_source.sink_wants().max_pixel_count);
+  EXPECT_EQ(std::numeric_limits<int>::max(),
+            new_video_source.sink_wants().max_pixel_count);
 
   vie_encoder_->Stop();
 }
@@ -1044,7 +1221,7 @@
     sink_.WaitForEncodedFrame(i);
     // Trigger scale down
     vie_encoder_->TriggerQualityLow();
-    EXPECT_GE(*video_source_.sink_wants().max_pixel_count, kMinPixelsPerFrame);
+    EXPECT_GE(video_source_.sink_wants().max_pixel_count, kMinPixelsPerFrame);
   }
 
   vie_encoder_->Stop();
@@ -1137,10 +1314,9 @@
   sink_.ExpectDroppedFrame();
 
   // Expect the sink_wants to specify a scaled frame.
-  EXPECT_TRUE(video_source_.sink_wants().max_pixel_count);
-  EXPECT_LT(*video_source_.sink_wants().max_pixel_count, 1000 * 1000);
+  EXPECT_LT(video_source_.sink_wants().max_pixel_count, 1000 * 1000);
 
-  int last_pixel_count = *video_source_.sink_wants().max_pixel_count;
+  int last_pixel_count = video_source_.sink_wants().max_pixel_count;
 
   // Next frame is scaled
   video_source_.IncomingCapturedFrame(
@@ -1149,7 +1325,7 @@
   // Expect to drop this frame, the wait should time out.
   sink_.ExpectDroppedFrame();
 
-  EXPECT_LT(*video_source_.sink_wants().max_pixel_count, last_pixel_count);
+  EXPECT_LT(video_source_.sink_wants().max_pixel_count, last_pixel_count);
 
   vie_encoder_->Stop();
 }
@@ -1173,8 +1349,7 @@
   sink_.WaitForEncodedFrame(i);
 
   // Expect the sink_wants to specify a scaled frame.
-  EXPECT_TRUE(video_source_.sink_wants().max_pixel_count);
-  EXPECT_LT(*video_source_.sink_wants().max_pixel_count, 1000 * 1000);
+  EXPECT_LT(video_source_.sink_wants().max_pixel_count, 1000 * 1000);
 
   vie_encoder_->Stop();
 }
@@ -1235,7 +1410,7 @@
       CreateFrame(2, kFrameWidth, kFrameHeight));
   sink_.WaitForEncodedFrame((kFrameWidth * 3) / 4, (kFrameHeight * 3) / 4);
 
-  // Trigger CPU normal use, return to original resoluton;
+  // Trigger CPU normal use, return to original resolution;
   vie_encoder_->TriggerCpuNormalUsage();
   video_source_.IncomingCapturedFrame(
       CreateFrame(3, kFrameWidth, kFrameHeight));
@@ -1243,4 +1418,159 @@
 
   vie_encoder_->Stop();
 }
+
+TEST_F(ViEEncoderTest, AdaptsFrameOnOveruseWithMaintainResolution) {
+  const int kDefaultFramerateFps = 30;
+  const int kFrameIntervalMs = rtc::kNumMillisecsPerSec / kDefaultFramerateFps;
+  const int kFrameWidth = 1280;
+  const int kFrameHeight = 720;
+  rtc::ScopedFakeClock fake_clock;
+
+  vie_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+  vie_encoder_->SetSource(
+      &video_source_,
+      VideoSendStream::DegradationPreference::kMaintainResolution);
+  video_source_.set_adaptation_enabled(true);
+
+  fake_clock.SetTimeMicros(kFrameIntervalMs * 1000);
+  int64_t timestamp_ms = kFrameIntervalMs;
+
+  video_source_.IncomingCapturedFrame(
+      CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+  sink_.WaitForEncodedFrame(timestamp_ms);
+
+  // Try to trigger overuse. No fps estimate available => no effect.
+  vie_encoder_->TriggerCpuOveruse();
+
+  // Insert frames for one second to get a stable estimate.
+  for (int i = 0; i < kDefaultFramerateFps; ++i) {
+    timestamp_ms += kFrameIntervalMs;
+    fake_clock.AdvanceTimeMicros(kFrameIntervalMs * 1000);
+    video_source_.IncomingCapturedFrame(
+        CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+    sink_.WaitForEncodedFrame(timestamp_ms);
+  }
+
+  // Trigger CPU overuse, reduce framerate by 2/3.
+  vie_encoder_->TriggerCpuOveruse();
+  int num_frames_dropped = 0;
+  for (int i = 0; i < kDefaultFramerateFps; ++i) {
+    timestamp_ms += kFrameIntervalMs;
+    fake_clock.AdvanceTimeMicros(kFrameIntervalMs * 1000);
+    video_source_.IncomingCapturedFrame(
+        CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+    if (!sink_.WaitForFrame(kFrameTimeoutMs)) {
+      ++num_frames_dropped;
+    } else {
+      sink_.CheckLastFrameSizeMathces(kFrameWidth, kFrameHeight);
+    }
+  }
+
+  // TODO(sprang): Find where there's rounding errors or stuff causing the
+  // margin here to be a little larger than we'd like (input fps estimate is
+  // off) and the frame dropping is a little too aggressive.
+  const int kErrorMargin = 5;
+  EXPECT_NEAR(num_frames_dropped,
+              kDefaultFramerateFps - (kDefaultFramerateFps * 2 / 3),
+              kErrorMargin);
+
+  // Trigger CPU overuse, reduce framerate by 2/3 again.
+  vie_encoder_->TriggerCpuOveruse();
+  num_frames_dropped = 0;
+  for (int i = 0; i < kDefaultFramerateFps; ++i) {
+    timestamp_ms += kFrameIntervalMs;
+    fake_clock.AdvanceTimeMicros(kFrameIntervalMs * 1000);
+    video_source_.IncomingCapturedFrame(
+        CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+    if (!sink_.WaitForFrame(kFrameTimeoutMs)) {
+      ++num_frames_dropped;
+    } else {
+      sink_.CheckLastFrameSizeMathces(kFrameWidth, kFrameHeight);
+    }
+  }
+  EXPECT_NEAR(num_frames_dropped,
+              kDefaultFramerateFps - (kDefaultFramerateFps * 4 / 9),
+              kErrorMargin);
+
+  // Go back up one step.
+  vie_encoder_->TriggerCpuNormalUsage();
+  num_frames_dropped = 0;
+  for (int i = 0; i < kDefaultFramerateFps; ++i) {
+    timestamp_ms += kFrameIntervalMs;
+    fake_clock.AdvanceTimeMicros(kFrameIntervalMs * 1000);
+    video_source_.IncomingCapturedFrame(
+        CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+    if (!sink_.WaitForFrame(kFrameTimeoutMs)) {
+      ++num_frames_dropped;
+    } else {
+      sink_.CheckLastFrameSizeMathces(kFrameWidth, kFrameHeight);
+    }
+  }
+  EXPECT_NEAR(num_frames_dropped,
+              kDefaultFramerateFps - (kDefaultFramerateFps * 2 / 3),
+              kErrorMargin);
+
+  // Go back up to original mode.
+  vie_encoder_->TriggerCpuNormalUsage();
+  num_frames_dropped = 0;
+  for (int i = 0; i < kDefaultFramerateFps; ++i) {
+    timestamp_ms += kFrameIntervalMs;
+    fake_clock.AdvanceTimeMicros(kFrameIntervalMs * 1000);
+    video_source_.IncomingCapturedFrame(
+        CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+    if (!sink_.WaitForFrame(kFrameTimeoutMs)) {
+      ++num_frames_dropped;
+    } else {
+      sink_.CheckLastFrameSizeMathces(kFrameWidth, kFrameHeight);
+    }
+  }
+  EXPECT_NEAR(num_frames_dropped, 0, kErrorMargin);
+
+  vie_encoder_->Stop();
+}
+
+TEST_F(ViEEncoderTest, DoesntAdaptDownPastMinFramerate) {
+  const int kFramerateFps = 5;
+  const int kFrameIntervalMs = rtc::kNumMillisecsPerSec / kFramerateFps;
+  const int kMinFpsFrameInterval = rtc::kNumMillisecsPerSec / kMinFramerateFps;
+  const int kFrameWidth = 1280;
+  const int kFrameHeight = 720;
+
+  rtc::ScopedFakeClock fake_clock;
+  vie_encoder_->OnBitrateUpdated(kTargetBitrateBps, 0, 0);
+  vie_encoder_->SetSource(
+      &video_source_,
+      VideoSendStream::DegradationPreference::kMaintainResolution);
+  video_source_.set_adaptation_enabled(true);
+
+  fake_clock.SetTimeMicros(kFrameIntervalMs * 1000);
+  int64_t timestamp_ms = kFrameIntervalMs;
+
+  // Trigger overuse as much as we can.
+  for (int i = 0; i < ViEEncoder::kMaxCpuResolutionDowngrades; ++i) {
+    // Insert frames to get a new fps estimate...
+    for (int j = 0; j < kFramerateFps; ++j) {
+      video_source_.IncomingCapturedFrame(
+          CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+      timestamp_ms += kFrameIntervalMs;
+      fake_clock.AdvanceTimeMicros(kFrameIntervalMs * 1000);
+    }
+    // ...and then try to adapt again.
+    vie_encoder_->TriggerCpuOveruse();
+  }
+
+  // Drain any frame in the pipeline.
+  sink_.WaitForFrame(kDefaultTimeoutMs);
+
+  // Insert frames at min fps, all should go through.
+  for (int i = 0; i < 10; ++i) {
+    timestamp_ms += kMinFpsFrameInterval;
+    fake_clock.AdvanceTimeMicros(kMinFpsFrameInterval * 1000);
+    video_source_.IncomingCapturedFrame(
+        CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
+    sink_.WaitForEncodedFrame(timestamp_ms);
+  }
+
+  vie_encoder_->Stop();
+}
 }  // namespace webrtc
diff --git a/webrtc/video_send_stream.h b/webrtc/video_send_stream.h
index 680eb02..8a82a6d 100644
--- a/webrtc/video_send_stream.h
+++ b/webrtc/video_send_stream.h
@@ -214,12 +214,21 @@
 
   // Based on the spec in
   // https://w3c.github.io/webrtc-pc/#idl-def-rtcdegradationpreference.
+  // These options are enforced on a best-effort basis. For instance, all of
+  // these options may suffer some frame drops in order to avoid queuing.
+  // TODO(sprang): Look into possibility of more strictly enforcing the
+  // maintain-framerate option.
   enum class DegradationPreference {
+    // Don't take any actions based on over-utilization signals.
+    kDegradationDisabled,
+    // On over-use, request lost resolution, possibly causing down-scaling.
     kMaintainResolution,
-    // TODO(perkj): Implement kMaintainFrameRate. kBalanced will drop frames
-    // if the encoder overshoots or the encoder can not encode fast enough.
+    // On over-use, request lower frame rate, possible causing frame drops.
+    kMaintainFramerate,
+    // Try to strike a "pleasing" balance between frame rate or resolution.
     kBalanced,
   };
+
   virtual void SetSource(
       rtc::VideoSourceInterface<webrtc::VideoFrame>* source,
       const DegradationPreference& degradation_preference) = 0;