Revert of Adding support for simulcast and spatial layers into VideoQualityTest (patchset #10 id:180001 of https://codereview.webrtc.org/1353263005/ )

Reason for revert:
Temporarily reverting as this causes some issues with perf tests. Especially tests with packet loss no longer works.

Original issue's description:
> Adding support for simulcast and spatial layers into VideoQualityTest
>
> The CL includes several changes:
> - Adding flags describing the streams and spatial layers.
> - Reorganizing the order of the flags, to make them easier to maintain.
> - Adding a member .params_ to VideoQualityAnalyzer.
>     (instead of passing it to every member function manually)
> - Updating VideoAnalyzer to support simulcast.
>     (select appropriate ssrc and fix timestamps which are sometimes increased by 1)
> - VP9EncoderImpl already had code for automatic calculation of bitrate for each layer.
>     Changing to first read bitrates and resolution ratios from the flags, if specified.
>     If not specified, reverting to the old code are setting the values automatically.
> - Changing the parameters in LayerFilteringTransport, replacing
>     xx_discard_thresholds with selected_xx, to make it easier to use for the end user.
>
> Committed: https://crrev.com/87f83a9a27d657731ccb54025bc04ccad0da136e
> Cr-Commit-Position: refs/heads/master@{#10215}

TBR=pbos@webrtc.org,mflodman@webrtc.org,ivica@webrtc.org
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true

Review URL: https://codereview.webrtc.org/1397363002

Cr-Commit-Position: refs/heads/master@{#10252}
diff --git a/webrtc/common_types.h b/webrtc/common_types.h
index dfbc706..c11c4d7 100644
--- a/webrtc/common_types.h
+++ b/webrtc/common_types.h
@@ -548,7 +548,6 @@
 enum { kConfigParameterSize = 128};
 enum { kPayloadNameSize = 32};
 enum { kMaxSimulcastStreams = 4};
-enum { kMaxSpatialLayers = 5};
 enum { kMaxTemporalStreams = 4};
 
 enum VideoCodecComplexity
@@ -678,13 +677,6 @@
   }
 };
 
-struct SpatialLayer {
-  int scaling_factor_num;
-  int scaling_factor_den;
-  int target_bitrate_bps;
-  // TODO(ivica): Add max_quantizer and min_quantizer?
-};
-
 enum VideoCodecMode {
   kRealtimeVideo,
   kScreensharing
@@ -711,7 +703,6 @@
   unsigned int        qpMax;
   unsigned char       numberOfSimulcastStreams;
   SimulcastStream     simulcastStream[kMaxSimulcastStreams];
-  SpatialLayer        spatialLayers[kMaxSpatialLayers];
 
   VideoCodecMode      mode;
 
diff --git a/webrtc/config.h b/webrtc/config.h
index 4b863c8..5271163 100644
--- a/webrtc/config.h
+++ b/webrtc/config.h
@@ -104,7 +104,6 @@
   std::string ToString() const;
 
   std::vector<VideoStream> streams;
-  std::vector<SpatialLayer> spatial_layers;
   ContentType content_type;
   void* encoder_specific_settings;
 
diff --git a/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc b/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
index 50ec305..deb3bca 100644
--- a/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
+++ b/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
@@ -112,72 +112,42 @@
   return WEBRTC_VIDEO_CODEC_OK;
 }
 
-bool VP9EncoderImpl::ExplicitlyConfiguredSpatialLayers() const {
-  // We check target_bitrate_bps of the 0th layer to see if the spatial layers
-  // (i.e. bitrates) were explicitly configured.
-  return num_spatial_layers_ > 1 &&
-         codec_.spatialLayers[0].target_bitrate_bps > 0;
-}
-
 bool VP9EncoderImpl::SetSvcRates() {
+  float rate_ratio[VPX_MAX_LAYERS] = {0};
+  float total = 0;
   uint8_t i = 0;
 
-  if (ExplicitlyConfiguredSpatialLayers()) {
-    if (num_temporal_layers_ > 1) {
-      LOG(LS_ERROR) << "Multiple temporal layers when manually specifying "
-                       "spatial layers not implemented yet!";
+  for (i = 0; i < num_spatial_layers_; ++i) {
+    if (svc_internal_.svc_params.scaling_factor_num[i] <= 0 ||
+        svc_internal_.svc_params.scaling_factor_den[i] <= 0) {
       return false;
     }
-    int total_bitrate_bps = 0;
-    for (i = 0; i < num_spatial_layers_; ++i)
-      total_bitrate_bps += codec_.spatialLayers[i].target_bitrate_bps;
-    // If total bitrate differs now from what has been specified at the
-    // beginning, update the bitrates in the same ratio as before.
-    for (i = 0; i < num_spatial_layers_; ++i) {
-      config_->ss_target_bitrate[i] =
-          config_->layer_target_bitrate[i] = static_cast<int>(
-              static_cast<int64_t>(config_->rc_target_bitrate) *
-              codec_.spatialLayers[i].target_bitrate_bps / total_bitrate_bps);
-    }
-  } else {
-    float rate_ratio[VPX_MAX_LAYERS] = {0};
-    float total = 0;
+    rate_ratio[i] = static_cast<float>(
+        svc_internal_.svc_params.scaling_factor_num[i]) /
+        svc_internal_.svc_params.scaling_factor_den[i];
+    total += rate_ratio[i];
+  }
 
-    for (i = 0; i < num_spatial_layers_; ++i) {
-      if (svc_internal_.svc_params.scaling_factor_num[i] <= 0 ||
-          svc_internal_.svc_params.scaling_factor_den[i] <= 0) {
-        LOG(LS_ERROR) << "Scaling factors not specified!";
-        return false;
-      }
-      rate_ratio[i] = static_cast<float>(
-          svc_internal_.svc_params.scaling_factor_num[i]) /
-          svc_internal_.svc_params.scaling_factor_den[i];
-      total += rate_ratio[i];
-    }
-
-    for (i = 0; i < num_spatial_layers_; ++i) {
-      config_->ss_target_bitrate[i] = static_cast<unsigned int>(
-          config_->rc_target_bitrate * rate_ratio[i] / total);
-      if (num_temporal_layers_ == 1) {
-        config_->layer_target_bitrate[i] = config_->ss_target_bitrate[i];
-      } else if (num_temporal_layers_ == 2) {
-        config_->layer_target_bitrate[i * num_temporal_layers_] =
-            config_->ss_target_bitrate[i] * 2 / 3;
-        config_->layer_target_bitrate[i * num_temporal_layers_ + 1] =
-            config_->ss_target_bitrate[i];
-      } else if (num_temporal_layers_ == 3) {
-        config_->layer_target_bitrate[i * num_temporal_layers_] =
-            config_->ss_target_bitrate[i] / 2;
-        config_->layer_target_bitrate[i * num_temporal_layers_ + 1] =
-            config_->layer_target_bitrate[i * num_temporal_layers_] +
-            (config_->ss_target_bitrate[i] / 4);
-        config_->layer_target_bitrate[i * num_temporal_layers_ + 2] =
-            config_->ss_target_bitrate[i];
-      } else {
-        LOG(LS_ERROR) << "Unsupported number of temporal layers: "
-                      << num_temporal_layers_;
-        return false;
-      }
+  for (i = 0; i < num_spatial_layers_; ++i) {
+    config_->ss_target_bitrate[i] = static_cast<unsigned int>(
+        config_->rc_target_bitrate * rate_ratio[i] / total);
+    if (num_temporal_layers_ == 1) {
+      config_->layer_target_bitrate[i] = config_->ss_target_bitrate[i];
+    } else if (num_temporal_layers_ == 2) {
+      config_->layer_target_bitrate[i * num_temporal_layers_] =
+          config_->ss_target_bitrate[i] * 2 / 3;
+      config_->layer_target_bitrate[i * num_temporal_layers_ + 1] =
+          config_->ss_target_bitrate[i];
+    } else if (num_temporal_layers_ == 3) {
+      config_->layer_target_bitrate[i * num_temporal_layers_] =
+          config_->ss_target_bitrate[i] / 2;
+      config_->layer_target_bitrate[i * num_temporal_layers_ + 1] =
+          config_->layer_target_bitrate[i * num_temporal_layers_] +
+          (config_->ss_target_bitrate[i] / 4);
+      config_->layer_target_bitrate[i * num_temporal_layers_ + 2] =
+          config_->ss_target_bitrate[i];
+    } else {
+      return false;
     }
   }
 
@@ -379,24 +349,14 @@
 int VP9EncoderImpl::InitAndSetControlSettings(const VideoCodec* inst) {
   config_->ss_number_layers = num_spatial_layers_;
 
-  if (ExplicitlyConfiguredSpatialLayers()) {
-    for (int i = 0; i < num_spatial_layers_; ++i) {
-      const auto &layer = codec_.spatialLayers[i];
-      svc_internal_.svc_params.max_quantizers[i] = config_->rc_max_quantizer;
-      svc_internal_.svc_params.min_quantizers[i] = config_->rc_min_quantizer;
-      svc_internal_.svc_params.scaling_factor_num[i] = layer.scaling_factor_num;
-      svc_internal_.svc_params.scaling_factor_den[i] = layer.scaling_factor_den;
-    }
-  } else {
-    int scaling_factor_num = 256;
-    for (int i = num_spatial_layers_ - 1; i >= 0; --i) {
-      svc_internal_.svc_params.max_quantizers[i] = config_->rc_max_quantizer;
-      svc_internal_.svc_params.min_quantizers[i] = config_->rc_min_quantizer;
-      // 1:2 scaling in each dimension.
-      svc_internal_.svc_params.scaling_factor_num[i] = scaling_factor_num;
-      svc_internal_.svc_params.scaling_factor_den[i] = 256;
-      scaling_factor_num /= 2;
-    }
+  int scaling_factor_num = 256;
+  for (int i = num_spatial_layers_ - 1; i >= 0; --i) {
+    svc_internal_.svc_params.max_quantizers[i] = config_->rc_max_quantizer;
+    svc_internal_.svc_params.min_quantizers[i] = config_->rc_min_quantizer;
+    // 1:2 scaling in each dimension.
+    svc_internal_.svc_params.scaling_factor_num[i] = scaling_factor_num;
+    svc_internal_.svc_params.scaling_factor_den[i] = 256;
+    scaling_factor_num /= 2;
   }
 
   if (!SetSvcRates()) {
diff --git a/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h b/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h
index 646b398..c164a63 100644
--- a/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h
+++ b/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h
@@ -56,7 +56,6 @@
                              const vpx_codec_cx_pkt& pkt,
                              uint32_t timestamp);
 
-  bool ExplicitlyConfiguredSpatialLayers() const;
   bool SetSvcRates();
 
   virtual int GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt);
diff --git a/webrtc/test/layer_filtering_transport.cc b/webrtc/test/layer_filtering_transport.cc
index e3bca86..b9bc24d 100644
--- a/webrtc/test/layer_filtering_transport.cc
+++ b/webrtc/test/layer_filtering_transport.cc
@@ -23,18 +23,13 @@
     const FakeNetworkPipe::Config& config,
     uint8_t vp8_video_payload_type,
     uint8_t vp9_video_payload_type,
-    int selected_tl,
-    int selected_sl)
+    uint8_t tl_discard_threshold,
+    uint8_t sl_discard_threshold)
     : test::DirectTransport(config),
       vp8_video_payload_type_(vp8_video_payload_type),
       vp9_video_payload_type_(vp9_video_payload_type),
-      selected_tl_(selected_tl),
-      selected_sl_(selected_sl),
-      discarded_last_packet_(false) {
-}
-
-bool LayerFilteringTransport::DiscardedLastPacket() const {
-  return discarded_last_packet_;
+      tl_discard_threshold_(tl_discard_threshold),
+      sl_discard_threshold_(sl_discard_threshold) {
 }
 
 uint16_t LayerFilteringTransport::NextSequenceNumber(uint32_t ssrc) {
@@ -47,7 +42,7 @@
 bool LayerFilteringTransport::SendRtp(const uint8_t* packet,
                                       size_t length,
                                       const PacketOptions& options) {
-  if (selected_tl_ == -1 && selected_sl_ == -1) {
+  if (tl_discard_threshold_ == 0 && sl_discard_threshold_ == 0) {
     // Nothing to change, forward the packet immediately.
     return test::DirectTransport::SendRtp(packet, length, options);
   }
@@ -70,24 +65,23 @@
         RtpDepacketizer::Create(is_vp8 ? kRtpVideoVp8 : kRtpVideoVp9));
     RtpDepacketizer::ParsedPayload parsed_payload;
     if (depacketizer->Parse(&parsed_payload, payload, payload_data_length)) {
-      const int temporal_idx = static_cast<int>(
+      const uint8_t temporalIdx =
           is_vp8 ? parsed_payload.type.Video.codecHeader.VP8.temporalIdx
-                 : parsed_payload.type.Video.codecHeader.VP9.temporal_idx);
-      const int spatial_idx = static_cast<int>(
+                 : parsed_payload.type.Video.codecHeader.VP9.temporal_idx;
+      const uint8_t spatialIdx =
           is_vp8 ? kNoSpatialIdx
-                 : parsed_payload.type.Video.codecHeader.VP9.spatial_idx);
-      if (selected_sl_ >= 0 &&
-          spatial_idx == selected_sl_ &&
+                 : parsed_payload.type.Video.codecHeader.VP9.spatial_idx;
+      if (sl_discard_threshold_ > 0 &&
+          spatialIdx == sl_discard_threshold_ - 1 &&
           parsed_payload.type.Video.codecHeader.VP9.end_of_frame) {
         // This layer is now the last in the superframe.
         set_marker_bit = true;
       }
-      if ((selected_tl_ >= 0 && temporal_idx != kNoTemporalIdx &&
-           temporal_idx > selected_tl_) ||
-          (selected_sl_ >= 0 && spatial_idx != kNoSpatialIdx &&
-           spatial_idx > selected_sl_)) {
-        discarded_last_packet_ = true;
-        return true;
+      if ((tl_discard_threshold_ > 0 && temporalIdx != kNoTemporalIdx &&
+           temporalIdx >= tl_discard_threshold_) ||
+          (sl_discard_threshold_ > 0 && spatialIdx != kNoSpatialIdx &&
+           spatialIdx >= sl_discard_threshold_)) {
+        return true;  // Discard the packet.
       }
     } else {
       RTC_NOTREACHED() << "Parse error";
diff --git a/webrtc/test/layer_filtering_transport.h b/webrtc/test/layer_filtering_transport.h
index dc3182a..58c2dd5 100644
--- a/webrtc/test/layer_filtering_transport.h
+++ b/webrtc/test/layer_filtering_transport.h
@@ -24,9 +24,8 @@
   LayerFilteringTransport(const FakeNetworkPipe::Config& config,
                           uint8_t vp8_video_payload_type,
                           uint8_t vp9_video_payload_type,
-                          int selected_tl,
-                          int selected_sl);
-  bool DiscardedLastPacket() const;
+                          uint8_t tl_discard_threshold,
+                          uint8_t sl_discard_threshold);
   bool SendRtp(const uint8_t* data,
                size_t length,
                const PacketOptions& options) override;
@@ -36,13 +35,12 @@
   // Used to distinguish between VP8 and VP9.
   const uint8_t vp8_video_payload_type_;
   const uint8_t vp9_video_payload_type_;
-  // Discard or invalidate all temporal/spatial layers with id greater than the
-  // selected one. -1 to disable filtering.
-  const int selected_tl_;
-  const int selected_sl_;
+  // Discard all temporal/spatial layers with id greater or equal the
+  // threshold. 0 to disable.
+  const uint8_t tl_discard_threshold_;
+  const uint8_t sl_discard_threshold_;
   // Current sequence number for each SSRC separately.
   std::map<uint32_t, uint16_t> current_seq_nums_;
-  bool discarded_last_packet_;
 };
 
 }  // namespace test
diff --git a/webrtc/video/full_stack.cc b/webrtc/video/full_stack.cc
index 2810cd6..8511b82 100644
--- a/webrtc/video/full_stack.cc
+++ b/webrtc/video/full_stack.cc
@@ -23,15 +23,6 @@
   }
 };
 
-// VideoQualityTest::Params params = {
-//   { ... },      // Common.
-//   { ... },      // Video-specific settings.
-//   { ... },      // Screenshare-specific settings.
-//   { ... },      // Analyzer settings.
-//   pipe,         // FakeNetworkPipe::Config
-//   { ... },      // Spatial scalability.
-//   logs          // bool
-// };
 
 TEST_F(FullStackTest, ParisQcifWithoutPacketLoss) {
   VideoQualityTest::Params paris_qcif = {
@@ -129,16 +120,16 @@
 
 TEST_F(FullStackTest, ScreenshareSlidesVP8_2TL) {
   VideoQualityTest::Params screenshare = {
-      {1850, 1110, 5, 50000, 200000, 2000000, "VP8", 2, 1, 400000},
-      {},
-      {true, 10},
+      {1850, 1110, 5, 50000, 200000, 2000000, "VP8", 2, 400000},
+      {},          // Video-specific.
+      {true, 10},  // Screenshare-specific.
       {"screenshare_slides", 0.0, 0.0, kFullStackTestDurationSecs}};
   RunTest(screenshare);
 }
 
 TEST_F(FullStackTest, ScreenshareSlidesVP8_2TL_Scroll) {
   VideoQualityTest::Params config = {
-      {1850, 1110 / 2, 5, 50000, 200000, 2000000, "VP8", 2, 1, 400000},
+      {1850, 1110 / 2, 5, 50000, 200000, 2000000, "VP8", 2, 400000},
       {},
       {true, 10, 2},
       {"screenshare_slides_scrolling", 0.0, 0.0, kFullStackTestDurationSecs}};
@@ -147,7 +138,7 @@
 
 TEST_F(FullStackTest, ScreenshareSlidesVP9_2TL) {
   VideoQualityTest::Params screenshare = {
-      {1850, 1110, 5, 50000, 200000, 2000000, "VP9", 2, 1, 400000},
+      {1850, 1110, 5, 50000, 200000, 2000000, "VP9", 2, 400000},
       {},
       {true, 10},
       {"screenshare_slides_vp9_2tl", 0.0, 0.0, kFullStackTestDurationSecs}};
diff --git a/webrtc/video/screenshare_loopback.cc b/webrtc/video/screenshare_loopback.cc
index 277052c..9897783 100644
--- a/webrtc/video/screenshare_loopback.cc
+++ b/webrtc/video/screenshare_loopback.cc
@@ -20,7 +20,6 @@
 namespace webrtc {
 namespace flags {
 
-// Flags common with video loopback, with different default values.
 DEFINE_int32(width, 1850, "Video width (crops source).");
 size_t Width() {
   return static_cast<size_t>(FLAGS_width);
@@ -36,6 +35,21 @@
   return static_cast<int>(FLAGS_fps);
 }
 
+DEFINE_int32(slide_change_interval,
+             10,
+             "Interval (in seconds) between simulated slide changes.");
+int SlideChangeInterval() {
+  return static_cast<int>(FLAGS_slide_change_interval);
+}
+
+DEFINE_int32(
+    scroll_duration,
+    0,
+    "Duration (in seconds) during which a slide will be scrolled into place.");
+int ScrollDuration() {
+  return static_cast<int>(FLAGS_scroll_duration);
+}
+
 DEFINE_int32(min_bitrate, 50, "Call and stream min bitrate in kbps.");
 int MinBitrateKbps() {
   return static_cast<int>(FLAGS_min_bitrate);
@@ -57,40 +71,26 @@
 }
 
 DEFINE_int32(num_temporal_layers, 2, "Number of temporal layers to use.");
-int NumTemporalLayers() {
-  return static_cast<int>(FLAGS_num_temporal_layers);
-}
-
-// Flags common with video loopback, with equal default values.
-DEFINE_string(codec, "VP8", "Video codec to use.");
-std::string Codec() {
-  return static_cast<std::string>(FLAGS_codec);
-}
-
-DEFINE_int32(selected_tl,
-             -1,
-             "Temporal layer to show or analyze. -1 to disable filtering.");
-int SelectedTL() {
-  return static_cast<int>(FLAGS_selected_tl);
+size_t NumTemporalLayers() {
+  return static_cast<size_t>(FLAGS_num_temporal_layers);
 }
 
 DEFINE_int32(
-    duration,
+    tl_discard_threshold,
     0,
-    "Duration of the test in seconds. If 0, rendered will be shown instead.");
-int DurationSecs() {
-  return static_cast<int>(FLAGS_duration);
+    "Discard TLs with id greater or equal the threshold. 0 to disable.");
+size_t TLDiscardThreshold() {
+  return static_cast<size_t>(FLAGS_tl_discard_threshold);
 }
 
-DEFINE_string(output_filename, "", "Target graph data filename.");
-std::string OutputFilename() {
-  return static_cast<std::string>(FLAGS_output_filename);
+DEFINE_int32(min_transmit_bitrate, 400, "Min transmit bitrate incl. padding.");
+int MinTransmitBitrateKbps() {
+  return FLAGS_min_transmit_bitrate;
 }
 
-DEFINE_string(
-    graph_title, "", "If empty, title will be generated automatically.");
-std::string GraphTitle() {
-  return static_cast<std::string>(FLAGS_graph_title);
+DEFINE_string(codec, "VP8", "Video codec to use.");
+std::string Codec() {
+  return static_cast<std::string>(FLAGS_codec);
 }
 
 DEFINE_int32(loss_percent, 0, "Percentage of packets randomly lost.");
@@ -124,51 +124,21 @@
   return static_cast<int>(FLAGS_std_propagation_delay_ms);
 }
 
-DEFINE_int32(selected_stream, 0, "ID of the stream to show or analyze.");
-int SelectedStream() {
-  return static_cast<int>(FLAGS_selected_stream);
-}
-
-DEFINE_int32(num_spatial_layers, 1, "Number of spatial layers to use.");
-int NumSpatialLayers() {
-  return static_cast<int>(FLAGS_num_spatial_layers);
-}
-
-DEFINE_int32(selected_sl,
-             -1,
-             "Spatial layer to show or analyze. -1 to disable filtering.");
-int SelectedSL() {
-  return static_cast<int>(FLAGS_selected_sl);
-}
-
-DEFINE_string(stream0,
-              "",
-              "Comma separated values describing VideoStream for stream #0.");
-std::string Stream0() {
-  return static_cast<std::string>(FLAGS_stream0);
-}
-
-DEFINE_string(stream1,
-              "",
-              "Comma separated values describing VideoStream for stream #1.");
-std::string Stream1() {
-  return static_cast<std::string>(FLAGS_stream1);
-}
-
-DEFINE_string(
-    sl0, "", "Comma separated values describing SpatialLayer for layer #0.");
-std::string SL0() {
-  return static_cast<std::string>(FLAGS_sl0);
-}
-
-DEFINE_string(
-    sl1, "", "Comma separated values describing SpatialLayer for layer #1.");
-std::string SL1() {
-  return static_cast<std::string>(FLAGS_sl1);
-}
-
 DEFINE_bool(logs, false, "print logs to stderr");
 
+DEFINE_string(
+    output_filename,
+    "",
+    "Name of a target graph data file. If set, no preview will be shown.");
+std::string OutputFilename() {
+  return static_cast<std::string>(FLAGS_output_filename);
+}
+
+DEFINE_int32(duration, 60, "Duration of the test in seconds.");
+int DurationSecs() {
+  return static_cast<int>(FLAGS_duration);
+}
+
 DEFINE_bool(send_side_bwe, true, "Use send-side bandwidth estimation");
 
 DEFINE_string(
@@ -178,28 +148,6 @@
     "E.g. running with --force_fieldtrials=WebRTC-FooFeature/Enable/"
     " will assign the group Enable to field trial WebRTC-FooFeature. Multiple "
     "trials are separated by \"/\"");
-
-// Screenshare-specific flags.
-DEFINE_int32(min_transmit_bitrate, 400, "Min transmit bitrate incl. padding.");
-int MinTransmitBitrateKbps() {
-  return FLAGS_min_transmit_bitrate;
-}
-
-DEFINE_int32(slide_change_interval,
-             10,
-             "Interval (in seconds) between simulated slide changes.");
-int SlideChangeInterval() {
-  return static_cast<int>(FLAGS_slide_change_interval);
-}
-
-DEFINE_int32(
-    scroll_duration,
-    0,
-    "Duration (in seconds) during which a slide will be scrolled into place.");
-int ScrollDuration() {
-  return static_cast<int>(FLAGS_scroll_duration);
-}
-
 }  // namespace flags
 
 void Loopback() {
@@ -219,31 +167,20 @@
       {flags::Width(), flags::Height(), flags::Fps(),
        flags::MinBitrateKbps() * 1000, flags::TargetBitrateKbps() * 1000,
        flags::MaxBitrateKbps() * 1000, flags::Codec(),
-       flags::NumTemporalLayers(), flags::SelectedTL(),
-       flags::MinTransmitBitrateKbps() * 1000, call_bitrate_config,
+       flags::NumTemporalLayers(), flags::MinTransmitBitrateKbps() * 1000,
+       call_bitrate_config, flags::TLDiscardThreshold(),
        flags::FLAGS_send_side_bwe},
       {},  // Video specific.
       {true, flags::SlideChangeInterval(), flags::ScrollDuration()},
-      {"screenshare", 0.0, 0.0, flags::DurationSecs(), flags::OutputFilename(),
-       flags::GraphTitle()},
+      {"screenshare", 0.0, 0.0, flags::DurationSecs(), flags::OutputFilename()},
       pipe_config,
       flags::FLAGS_logs};
 
-  std::vector<std::string> stream_descriptors;
-  stream_descriptors.push_back(flags::Stream0());
-  stream_descriptors.push_back(flags::Stream1());
-  std::vector<std::string> SL_descriptors;
-  SL_descriptors.push_back(flags::SL0());
-  SL_descriptors.push_back(flags::SL1());
-  VideoQualityTest::FillScalabilitySettings(&params, stream_descriptors,
-      flags::SelectedStream(), flags::NumSpatialLayers(), flags::SelectedSL(),
-      SL_descriptors);
-
   VideoQualityTest test;
-  if (flags::DurationSecs())
-    test.RunWithAnalyzer(params);
-  else
+  if (flags::OutputFilename().empty())
     test.RunWithVideoRenderer(params);
+  else
+    test.RunWithAnalyzer(params);
 }
 }  // namespace webrtc
 
diff --git a/webrtc/video/video_loopback.cc b/webrtc/video/video_loopback.cc
index 9656434..0c06f85 100644
--- a/webrtc/video/video_loopback.cc
+++ b/webrtc/video/video_loopback.cc
@@ -20,7 +20,6 @@
 namespace webrtc {
 namespace flags {
 
-// Flags common with screenshare loopback, with different default values.
 DEFINE_int32(width, 640, "Video width.");
 size_t Width() {
   return static_cast<size_t>(FLAGS_width);
@@ -56,45 +55,11 @@
   return static_cast<int>(FLAGS_max_bitrate);
 }
 
-DEFINE_int32(num_temporal_layers,
-             1,
-             "Number of temporal layers. Set to 1-4 to override.");
-int NumTemporalLayers() {
-  return static_cast<int>(FLAGS_num_temporal_layers);
-}
-
-// Flags common with screenshare loopback, with equal default values.
 DEFINE_string(codec, "VP8", "Video codec to use.");
 std::string Codec() {
   return static_cast<std::string>(FLAGS_codec);
 }
 
-DEFINE_int32(selected_tl,
-             -1,
-             "Temporal layer to show or analyze. -1 to disable filtering.");
-int SelectedTL() {
-  return static_cast<int>(FLAGS_selected_tl);
-}
-
-DEFINE_int32(
-    duration,
-    0,
-    "Duration of the test in seconds. If 0, rendered will be shown instead.");
-int DurationSecs() {
-  return static_cast<int>(FLAGS_duration);
-}
-
-DEFINE_string(output_filename, "", "Target graph data filename.");
-std::string OutputFilename() {
-  return static_cast<std::string>(FLAGS_output_filename);
-}
-
-DEFINE_string(
-    graph_title, "", "If empty, title will be generated automatically.");
-std::string GraphTitle() {
-  return static_cast<std::string>(FLAGS_graph_title);
-}
-
 DEFINE_int32(loss_percent, 0, "Percentage of packets randomly lost.");
 int LossPercent() {
   return static_cast<int>(FLAGS_loss_percent);
@@ -126,53 +91,8 @@
   return static_cast<int>(FLAGS_std_propagation_delay_ms);
 }
 
-DEFINE_int32(selected_stream, 0, "ID of the stream to show or analyze.");
-int SelectedStream() {
-  return static_cast<int>(FLAGS_selected_stream);
-}
-
-DEFINE_int32(num_spatial_layers, 1, "Number of spatial layers to use.");
-int NumSpatialLayers() {
-  return static_cast<int>(FLAGS_num_spatial_layers);
-}
-
-DEFINE_int32(selected_sl,
-             -1,
-             "Spatial layer to show or analyze. -1 to disable filtering.");
-int SelectedSL() {
-  return static_cast<int>(FLAGS_selected_sl);
-}
-
-DEFINE_string(stream0,
-              "",
-              "Comma separated values describing VideoStream for stream #0.");
-std::string Stream0() {
-  return static_cast<std::string>(FLAGS_stream0);
-}
-
-DEFINE_string(stream1,
-              "",
-              "Comma separated values describing VideoStream for stream #1.");
-std::string Stream1() {
-  return static_cast<std::string>(FLAGS_stream1);
-}
-
-DEFINE_string(
-    sl0, "", "Comma separated values describing SpatialLayer for layer #0.");
-std::string SL0() {
-  return static_cast<std::string>(FLAGS_sl0);
-}
-
-DEFINE_string(
-    sl1, "", "Comma separated values describing SpatialLayer for layer #1.");
-std::string SL1() {
-  return static_cast<std::string>(FLAGS_sl1);
-}
-
 DEFINE_bool(logs, false, "print logs to stderr");
 
-DEFINE_bool(send_side_bwe, true, "Use send-side bandwidth estimation");
-
 DEFINE_string(
     force_fieldtrials,
     "",
@@ -181,7 +101,21 @@
     " will assign the group Enable to field trial WebRTC-FooFeature. Multiple "
     "trials are separated by \"/\"");
 
-// Video-specific flags.
+DEFINE_int32(num_temporal_layers,
+             1,
+             "Number of temporal layers. Set to 1-4 to override.");
+size_t NumTemporalLayers() {
+  return static_cast<size_t>(FLAGS_num_temporal_layers);
+}
+
+DEFINE_int32(
+    tl_discard_threshold,
+    0,
+    "Discard TLs with id greater or equal the threshold. 0 to disable.");
+size_t TLDiscardThreshold() {
+  return static_cast<size_t>(FLAGS_tl_discard_threshold);
+}
+
 DEFINE_string(clip,
               "",
               "Name of the clip to show. If empty, using chroma generator.");
@@ -189,6 +123,21 @@
   return static_cast<std::string>(FLAGS_clip);
 }
 
+DEFINE_string(
+    output_filename,
+    "",
+    "Name of a target graph data file. If set, no preview will be shown.");
+std::string OutputFilename() {
+  return static_cast<std::string>(FLAGS_output_filename);
+}
+
+DEFINE_int32(duration, 60, "Duration of the test in seconds.");
+int DurationSecs() {
+  return static_cast<int>(FLAGS_duration);
+}
+
+DEFINE_bool(send_side_bwe, true, "Use send-side bandwidth estimation");
+
 }  // namespace flags
 
 void Loopback() {
@@ -204,35 +153,27 @@
   call_bitrate_config.start_bitrate_bps = flags::StartBitrateKbps() * 1000;
   call_bitrate_config.max_bitrate_bps = flags::MaxBitrateKbps() * 1000;
 
+  std::string clip = flags::Clip();
+  std::string graph_title = clip.empty() ? "" : "video " + clip;
   VideoQualityTest::Params params{
       {flags::Width(), flags::Height(), flags::Fps(),
        flags::MinBitrateKbps() * 1000, flags::TargetBitrateKbps() * 1000,
        flags::MaxBitrateKbps() * 1000, flags::Codec(),
-       flags::NumTemporalLayers(), flags::SelectedTL(),
+       flags::NumTemporalLayers(),
        0,  // No min transmit bitrate.
-       call_bitrate_config, flags::FLAGS_send_side_bwe},
-      {flags::Clip()},
+       call_bitrate_config, flags::TLDiscardThreshold(),
+       flags::FLAGS_send_side_bwe},
+      {clip},
       {},  // Screenshare specific.
-      {"video", 0.0, 0.0, flags::DurationSecs(), flags::OutputFilename(),
-       flags::GraphTitle()},
+      {graph_title, 0.0, 0.0, flags::DurationSecs(), flags::OutputFilename()},
       pipe_config,
       flags::FLAGS_logs};
 
-  std::vector<std::string> stream_descriptors;
-  stream_descriptors.push_back(flags::Stream0());
-  stream_descriptors.push_back(flags::Stream1());
-  std::vector<std::string> SL_descriptors;
-  SL_descriptors.push_back(flags::SL0());
-  SL_descriptors.push_back(flags::SL1());
-  VideoQualityTest::FillScalabilitySettings(&params, stream_descriptors,
-      flags::SelectedStream(), flags::NumSpatialLayers(), flags::SelectedSL(),
-      SL_descriptors);
-
   VideoQualityTest test;
-  if (flags::DurationSecs())
-    test.RunWithAnalyzer(params);
-  else
+  if (flags::OutputFilename().empty())
     test.RunWithVideoRenderer(params);
+  else
+    test.RunWithAnalyzer(params);
 }
 }  // namespace webrtc
 
diff --git a/webrtc/video/video_quality_test.cc b/webrtc/video/video_quality_test.cc
index 9feca16..1513b81 100644
--- a/webrtc/video/video_quality_test.cc
+++ b/webrtc/video/video_quality_test.cc
@@ -12,7 +12,6 @@
 #include <algorithm>
 #include <deque>
 #include <map>
-#include <sstream>
 #include <vector>
 
 #include "testing/gtest/include/gtest/gtest.h"
@@ -46,22 +45,18 @@
                       public EncodedFrameObserver,
                       public EncodingTimeObserver {
  public:
-  VideoAnalyzer(test::LayerFilteringTransport* transport,
+  VideoAnalyzer(Transport* transport,
                 const std::string& test_label,
                 double avg_psnr_threshold,
                 double avg_ssim_threshold,
                 int duration_frames,
-                FILE* graph_data_output_file,
-                const std::string &graph_title,
-                uint32_t ssrc_to_analyze)
+                FILE* graph_data_output_file)
       : input_(nullptr),
         transport_(transport),
         receiver_(nullptr),
         send_stream_(nullptr),
         test_label_(test_label),
         graph_data_output_file_(graph_data_output_file),
-        graph_title_(graph_title),
-        ssrc_to_analyze_(ssrc_to_analyze),
         frames_to_process_(duration_frames),
         frames_recorded_(0),
         frames_processed_(0),
@@ -156,9 +151,6 @@
     RTPHeader header;
     parser->Parse(packet, length, &header);
 
-    int64_t current_time =
-        Clock::GetRealTimeClock()->CurrentNtpInMilliseconds();
-    bool result = transport_->SendRtp(packet, length, options);
     {
       rtc::CritScope lock(&crit_);
       if (rtp_timestamp_delta_ == 0) {
@@ -166,14 +158,13 @@
         first_send_frame_.Reset();
       }
       uint32_t timestamp = header.timestamp - rtp_timestamp_delta_;
-      send_times_[timestamp] = current_time;
-      if (!transport_->DiscardedLastPacket() &&
-          header.ssrc == ssrc_to_analyze_) {
-        encoded_frame_sizes_[timestamp] +=
-            length - (header.headerLength + header.paddingLength);
-      }
+      send_times_[timestamp] =
+          Clock::GetRealTimeClock()->CurrentNtpInMilliseconds();
+      encoded_frame_sizes_[timestamp] +=
+          length - (header.headerLength + header.paddingLength);
     }
-    return result;
+
+    return transport_->SendRtp(packet, length, options);
   }
 
   bool SendRtcp(const uint8_t* packet, size_t length) override {
@@ -203,11 +194,6 @@
     VideoFrame reference_frame = frames_.front();
     frames_.pop_front();
     assert(!reference_frame.IsZeroSize());
-    if (send_timestamp == reference_frame.timestamp() - 1) {
-      // TODO(ivica): Make this work for > 2 streams.
-      // Look at rtp_sender.c:RTPSender::BuildRTPHeader.
-      ++send_timestamp;
-    }
     EXPECT_EQ(reference_frame.timestamp(), send_timestamp);
     assert(reference_frame.timestamp() == send_timestamp);
 
@@ -261,7 +247,7 @@
   }
 
   VideoCaptureInput* input_;
-  test::LayerFilteringTransport* transport_;
+  Transport* transport_;
   PacketReceiver* receiver_;
   VideoSendStream* send_stream_;
 
@@ -336,13 +322,8 @@
     int64_t recv_time_ms = recv_times_[reference.timestamp()];
     recv_times_.erase(reference.timestamp());
 
-    // TODO(ivica): Make this work for > 2 streams.
-    auto it = encoded_frame_sizes_.find(reference.timestamp());
-    if (it == encoded_frame_sizes_.end())
-      it = encoded_frame_sizes_.find(reference.timestamp() - 1);
-    size_t encoded_size = it == encoded_frame_sizes_.end() ? 0 : it->second;
-    if (it != encoded_frame_sizes_.end())
-      encoded_frame_sizes_.erase(it);
+    size_t encoded_size = encoded_frame_sizes_[reference.timestamp()];
+    encoded_frame_sizes_.erase(reference.timestamp());
 
     VideoFrame reference_copy;
     VideoFrame render_copy;
@@ -530,7 +511,7 @@
                 return A.input_time_ms < B.input_time_ms;
               });
 
-    fprintf(out, "%s\n", graph_title_.c_str());
+    fprintf(out, "%s\n", test_label_.c_str());
     fprintf(out, "%" PRIuS "\n", samples_.size());
     fprintf(out,
             "dropped "
@@ -568,8 +549,6 @@
 
   const std::string test_label_;
   FILE* const graph_data_output_file_;
-  const std::string graph_title_;
-  const uint32_t ssrc_to_analyze_;
   std::vector<Sample> samples_ GUARDED_BY(comparison_lock_);
   std::map<int64_t, int> samples_encode_time_ms_ GUARDED_BY(comparison_lock_);
   test::Statistics sender_time_ GUARDED_BY(comparison_lock_);
@@ -609,173 +588,30 @@
   const rtc::scoped_ptr<EventWrapper> done_;
 };
 
-
 VideoQualityTest::VideoQualityTest() : clock_(Clock::GetRealTimeClock()) {}
 
+void VideoQualityTest::ValidateParams(const Params& params) {
+  RTC_CHECK_GE(params.common.max_bitrate_bps, params.common.target_bitrate_bps);
+  RTC_CHECK_GE(params.common.target_bitrate_bps, params.common.min_bitrate_bps);
+  RTC_CHECK_LT(params.common.tl_discard_threshold,
+               params.common.num_temporal_layers);
+}
+
 void VideoQualityTest::TestBody() {}
 
-std::string VideoQualityTest::GenerateGraphTitle() const {
-  std::stringstream ss;
-  ss << params_.common.codec;
-  ss << " (" << params_.common.target_bitrate_bps / 1000 << "kbps";
-  ss << ", " << params_.common.fps << " FPS";
-  if (params_.screenshare.scroll_duration)
-    ss << ", " << params_.screenshare.scroll_duration << "s scroll";
-  if (params_.ss.streams.size() > 1)
-    ss << ", Stream #" << params_.ss.selected_stream;
-  if (params_.ss.num_spatial_layers > 1)
-    ss << ", Layer #" << params_.ss.selected_sl;
-  ss << ")";
-  return ss.str();
-}
-
-void VideoQualityTest::CheckParams() {
-  // Add a default stream in none specified.
-  if (params_.ss.streams.empty())
-    params_.ss.streams.push_back(VideoQualityTest::DefaultVideoStream(params_));
-  if (params_.ss.num_spatial_layers == 0)
-    params_.ss.num_spatial_layers = 1;
-
-  // TODO(ivica): Should max_bitrate_bps == -1 represent inf max bitrate, as it
-  // does in some parts of the code?
-  RTC_CHECK_GE(params_.common.max_bitrate_bps,
-               params_.common.target_bitrate_bps);
-  RTC_CHECK_GE(params_.common.target_bitrate_bps,
-               params_.common.min_bitrate_bps);
-  RTC_CHECK_LT(params_.common.selected_tl,
-               params_.common.num_temporal_layers);
-  RTC_CHECK_LT(params_.ss.selected_stream, params_.ss.streams.size());
-  for (const VideoStream& stream : params_.ss.streams) {
-    RTC_CHECK_GE(stream.min_bitrate_bps, 0);
-    RTC_CHECK_GE(stream.target_bitrate_bps, stream.min_bitrate_bps);
-    RTC_CHECK_GE(stream.max_bitrate_bps, stream.target_bitrate_bps);
-    RTC_CHECK_EQ(static_cast<int>(stream.temporal_layer_thresholds_bps.size()),
-                 params_.common.num_temporal_layers - 1);
-  }
-  // TODO(ivica): Should we check if the sum of all streams/layers is equal to
-  // the total bitrate? We anyway have to update them in the case bitrate
-  // estimator changes the total bitrates.
-  RTC_CHECK_GE(params_.ss.num_spatial_layers, 1);
-  RTC_CHECK_LT(params_.ss.selected_sl, params_.ss.num_spatial_layers);
-  RTC_CHECK(params_.ss.spatial_layers.empty() ||
-            params_.ss.spatial_layers.size() ==
-                static_cast<size_t>(params_.ss.num_spatial_layers));
-  if (params_.common.codec == "VP8") {
-    RTC_CHECK_EQ(params_.ss.num_spatial_layers, 1);
-  } else if (params_.common.codec == "VP9") {
-    RTC_CHECK_EQ(params_.ss.streams.size(), 1u);
-  }
-}
-
-// Static.
-std::vector<int> VideoQualityTest::ParseCSV(const std::string &str) {
-  // Parse comma separated nonnegative integers, where some elements may be
-  // empty. The empty values are replaced with -1.
-  // E.g. "10,-20,,30,40" --> {10, 20, -1, 30,40}
-  // E.g. ",,10,,20," --> {-1, -1, 10, -1, 20, -1}
-  std::vector<int> result;
-  if (str.empty()) return result;
-
-  const char* p = str.c_str();
-  int value = -1;
-  int pos;
-  while (*p) {
-    if (*p == ',') {
-      result.push_back(value);
-      value = -1;
-      ++p;
-      continue;
-    }
-    RTC_CHECK_EQ(sscanf(p, "%d%n", &value, &pos), 1)
-        << "Unexpected non-number value.";
-    p += pos;
-  }
-  result.push_back(value);
-  return result;
-}
-
-// Static.
-VideoStream VideoQualityTest::DefaultVideoStream(const Params& params) {
-  VideoStream stream;
-  stream.width = params.common.width;
-  stream.height = params.common.height;
-  stream.max_framerate = params.common.fps;
-  stream.min_bitrate_bps = params.common.min_bitrate_bps;
-  stream.target_bitrate_bps = params.common.target_bitrate_bps;
-  stream.max_bitrate_bps = params.common.max_bitrate_bps;
-  stream.max_qp = 52;
-  if (params.common.num_temporal_layers == 2)
-    stream.temporal_layer_thresholds_bps.push_back(stream.target_bitrate_bps);
-  return stream;
-}
-
-// Static.
-void VideoQualityTest::FillScalabilitySettings(
-    Params* params,
-    const std::vector<std::string>& stream_descriptors,
-    size_t selected_stream,
-    int num_spatial_layers,
-    int selected_sl,
-    const std::vector<std::string>& sl_descriptors) {
-  // Read VideoStream and SpatialLayer elements from a list of comma separated
-  // lists. To use a default value for an element, use -1 or leave empty.
-  // Validity checks performed in CheckParams.
-
-  RTC_CHECK(params->ss.streams.empty());
-  for (auto descriptor : stream_descriptors) {
-    if (descriptor.empty())
-      continue;
-    std::vector<int> v = VideoQualityTest::ParseCSV(descriptor);
-    VideoStream stream(VideoQualityTest::DefaultVideoStream(*params));
-    if (v[0] != -1) stream.width = static_cast<size_t>(v[0]);
-    if (v[1] != -1) stream.height = static_cast<size_t>(v[1]);
-    if (v[2] != -1) stream.max_framerate = v[2];
-    if (v[3] != -1) stream.min_bitrate_bps = v[3];
-    if (v[4] != -1) stream.target_bitrate_bps = v[4];
-    if (v[5] != -1) stream.max_bitrate_bps = v[5];
-    if (v.size() > 6 && v[6] != -1) stream.max_qp = v[6];
-    if (v.size() > 7) {
-      stream.temporal_layer_thresholds_bps.clear();
-      stream.temporal_layer_thresholds_bps.insert(
-          stream.temporal_layer_thresholds_bps.end(), v.begin() + 7, v.end());
-    } else {
-      // Automatic TL thresholds for more than two layers not supported.
-      RTC_CHECK_LE(params->common.num_temporal_layers, 2);
-    }
-    params->ss.streams.push_back(stream);
-  }
-  params->ss.selected_stream = selected_stream;
-
-  params->ss.num_spatial_layers = num_spatial_layers ? num_spatial_layers : 1;
-  params->ss.selected_sl = selected_sl;
-  RTC_CHECK(params->ss.spatial_layers.empty());
-  for (auto descriptor : sl_descriptors) {
-    if (descriptor.empty())
-      continue;
-    std::vector<int> v = VideoQualityTest::ParseCSV(descriptor);
-    RTC_CHECK_GT(v[2], 0);
-
-    SpatialLayer layer;
-    layer.scaling_factor_num = v[0] == -1 ? 1 : v[0];
-    layer.scaling_factor_den = v[1] == -1 ? 1 : v[1];
-    layer.target_bitrate_bps = v[2];
-    params->ss.spatial_layers.push_back(layer);
-  }
-}
-
-void VideoQualityTest::SetupCommon(Transport* send_transport,
-                                   Transport* recv_transport) {
-  if (params_.logs)
+void VideoQualityTest::SetupFullStack(const Params& params,
+                                      Transport* send_transport,
+                                      Transport* recv_transport) {
+  if (params.logs)
     trace_to_stderr_.reset(new test::TraceToStderr);
 
-  size_t num_streams = params_.ss.streams.size();
-  CreateSendConfig(num_streams, send_transport);
+  CreateSendConfig(1, send_transport);
 
   int payload_type;
-  if (params_.common.codec == "VP8") {
+  if (params.common.codec == "VP8") {
     encoder_.reset(VideoEncoder::Create(VideoEncoder::kVp8));
     payload_type = kPayloadTypeVP8;
-  } else if (params_.common.codec == "VP9") {
+  } else if (params.common.codec == "VP9") {
     encoder_.reset(VideoEncoder::Create(VideoEncoder::kVp9));
     payload_type = kPayloadTypeVP9;
   } else {
@@ -783,15 +619,15 @@
     return;
   }
   send_config_.encoder_settings.encoder = encoder_.get();
-  send_config_.encoder_settings.payload_name = params_.common.codec;
+  send_config_.encoder_settings.payload_name = params.common.codec;
   send_config_.encoder_settings.payload_type = payload_type;
+
   send_config_.rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+  send_config_.rtp.rtx.ssrcs.push_back(kSendRtxSsrcs[0]);
   send_config_.rtp.rtx.payload_type = kSendRtxPayloadType;
-  for (size_t i = 0; i < num_streams; ++i)
-    send_config_.rtp.rtx.ssrcs.push_back(kSendRtxSsrcs[i]);
 
   send_config_.rtp.extensions.clear();
-  if (params_.common.send_side_bwe) {
+  if (params.common.send_side_bwe) {
     send_config_.rtp.extensions.push_back(RtpExtension(
         RtpExtension::kTransportSequenceNumber, kTransportSeqExtensionId));
   } else {
@@ -799,41 +635,49 @@
         RtpExtension(RtpExtension::kAbsSendTime, kAbsSendTimeExtensionId));
   }
 
-  encoder_config_.min_transmit_bitrate_bps = params_.common.min_transmit_bps;
-  encoder_config_.streams = params_.ss.streams;
-  encoder_config_.spatial_layers = params_.ss.spatial_layers;
+  // Automatically fill out streams[0] with params.
+  VideoStream* stream = &encoder_config_.streams[0];
+  stream->width = params.common.width;
+  stream->height = params.common.height;
+  stream->min_bitrate_bps = params.common.min_bitrate_bps;
+  stream->target_bitrate_bps = params.common.target_bitrate_bps;
+  stream->max_bitrate_bps = params.common.max_bitrate_bps;
+  stream->max_framerate = static_cast<int>(params.common.fps);
+
+  stream->temporal_layer_thresholds_bps.clear();
+  if (params.common.num_temporal_layers > 1) {
+    stream->temporal_layer_thresholds_bps.push_back(stream->target_bitrate_bps);
+  }
 
   CreateMatchingReceiveConfigs(recv_transport);
 
-  for (size_t i = 0; i < num_streams; ++i) {
-    receive_configs_[i].rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
-    receive_configs_[i].rtp.rtx[kSendRtxPayloadType].ssrc = kSendRtxSsrcs[i];
-    receive_configs_[i].rtp.rtx[kSendRtxPayloadType].payload_type =
-        kSendRtxPayloadType;
-  }
+  receive_configs_[0].rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+  receive_configs_[0].rtp.rtx[kSendRtxPayloadType].ssrc = kSendRtxSsrcs[0];
+  receive_configs_[0].rtp.rtx[kSendRtxPayloadType].payload_type =
+      kSendRtxPayloadType;
+
+  encoder_config_.min_transmit_bitrate_bps = params.common.min_transmit_bps;
 }
 
-void VideoQualityTest::SetupScreenshare() {
-  RTC_CHECK(params_.screenshare.enabled);
+void VideoQualityTest::SetupScreenshare(const Params& params) {
+  RTC_CHECK(params.screenshare.enabled);
 
   // Fill out codec settings.
   encoder_config_.content_type = VideoEncoderConfig::ContentType::kScreen;
-  if (params_.common.codec == "VP8") {
+  if (params.common.codec == "VP8") {
     codec_settings_.VP8 = VideoEncoder::GetDefaultVp8Settings();
     codec_settings_.VP8.denoisingOn = false;
     codec_settings_.VP8.frameDroppingOn = false;
     codec_settings_.VP8.numberOfTemporalLayers =
-        static_cast<unsigned char>(params_.common.num_temporal_layers);
+        static_cast<unsigned char>(params.common.num_temporal_layers);
     encoder_config_.encoder_specific_settings = &codec_settings_.VP8;
-  } else if (params_.common.codec == "VP9") {
+  } else if (params.common.codec == "VP9") {
     codec_settings_.VP9 = VideoEncoder::GetDefaultVp9Settings();
     codec_settings_.VP9.denoisingOn = false;
     codec_settings_.VP9.frameDroppingOn = false;
     codec_settings_.VP9.numberOfTemporalLayers =
-        static_cast<unsigned char>(params_.common.num_temporal_layers);
+        static_cast<unsigned char>(params.common.num_temporal_layers);
     encoder_config_.encoder_specific_settings = &codec_settings_.VP9;
-    codec_settings_.VP9.numberOfSpatialLayers =
-        static_cast<unsigned char>(params_.ss.num_spatial_layers);
   }
 
   // Setup frame generator.
@@ -845,127 +689,105 @@
   slides.push_back(test::ResourcePath("photo_1850_1110", "yuv"));
   slides.push_back(test::ResourcePath("difficult_photo_1850_1110", "yuv"));
 
-  if (params_.screenshare.scroll_duration == 0) {
+  if (params.screenshare.scroll_duration == 0) {
     // Cycle image every slide_change_interval seconds.
     frame_generator_.reset(test::FrameGenerator::CreateFromYuvFile(
         slides, kWidth, kHeight,
-        params_.screenshare.slide_change_interval * params_.common.fps));
+        params.screenshare.slide_change_interval * params.common.fps));
   } else {
-    RTC_CHECK_LE(params_.common.width, kWidth);
-    RTC_CHECK_LE(params_.common.height, kHeight);
-    RTC_CHECK_GT(params_.screenshare.slide_change_interval, 0);
-    const int kPauseDurationMs = (params_.screenshare.slide_change_interval -
-                                  params_.screenshare.scroll_duration) * 1000;
-    RTC_CHECK_LE(params_.screenshare.scroll_duration,
-                 params_.screenshare.slide_change_interval);
+    RTC_CHECK_LE(params.common.width, kWidth);
+    RTC_CHECK_LE(params.common.height, kHeight);
+    RTC_CHECK_GT(params.screenshare.slide_change_interval, 0);
+    const int kPauseDurationMs = (params.screenshare.slide_change_interval -
+                                  params.screenshare.scroll_duration) * 1000;
+    RTC_CHECK_LE(params.screenshare.scroll_duration,
+                 params.screenshare.slide_change_interval);
 
-    frame_generator_.reset(
-        test::FrameGenerator::CreateScrollingInputFromYuvFiles(
-            clock_, slides, kWidth, kHeight, params_.common.width,
-            params_.common.height, params_.screenshare.scroll_duration * 1000,
-            kPauseDurationMs));
+    if (params.screenshare.scroll_duration) {
+      frame_generator_.reset(
+          test::FrameGenerator::CreateScrollingInputFromYuvFiles(
+              clock_, slides, kWidth, kHeight, params.common.width,
+              params.common.height, params.screenshare.scroll_duration * 1000,
+              kPauseDurationMs));
+    } else {
+      frame_generator_.reset(test::FrameGenerator::CreateFromYuvFile(
+              slides, kWidth, kHeight,
+              params.screenshare.slide_change_interval * params.common.fps));
+    }
   }
 }
 
-void VideoQualityTest::CreateCapturer(VideoCaptureInput* input) {
-  if (params_.screenshare.enabled) {
+void VideoQualityTest::CreateCapturer(const Params& params,
+                                      VideoCaptureInput* input) {
+  if (params.screenshare.enabled) {
     test::FrameGeneratorCapturer *frame_generator_capturer =
         new test::FrameGeneratorCapturer(
-            clock_, input, frame_generator_.release(), params_.common.fps);
+            clock_, input, frame_generator_.release(), params.common.fps);
     EXPECT_TRUE(frame_generator_capturer->Init());
     capturer_.reset(frame_generator_capturer);
   } else {
-    if (params_.video.clip_name.empty()) {
+    if (params.video.clip_name.empty()) {
       capturer_.reset(test::VideoCapturer::Create(
-          input, params_.common.width, params_.common.height,
-          params_.common.fps, clock_));
+          input, params.common.width, params.common.height, params.common.fps,
+          clock_));
     } else {
       capturer_.reset(test::FrameGeneratorCapturer::CreateFromYuvFile(
-          input, test::ResourcePath(params_.video.clip_name, "yuv"),
-          params_.common.width, params_.common.height, params_.common.fps,
+          input, test::ResourcePath(params.video.clip_name, "yuv"),
+          params.common.width, params.common.height, params.common.fps,
           clock_));
       ASSERT_TRUE(capturer_.get() != nullptr)
-          << "Could not create capturer for " << params_.video.clip_name
+          << "Could not create capturer for " << params.video.clip_name
           << ".yuv. Is this resource file present?";
     }
   }
 }
 
-void VideoQualityTest::RunWithAnalyzer(const Params& _params) {
-  params_ = _params;
-
+void VideoQualityTest::RunWithAnalyzer(const Params& params) {
   // TODO(ivica): Merge with RunWithRenderer and use a flag / argument to
   // differentiate between the analyzer and the renderer case.
-  CheckParams();
+  ValidateParams(params);
 
   FILE* graph_data_output_file = nullptr;
-  if (!params_.analyzer.graph_data_output_filename.empty()) {
+  if (!params.analyzer.graph_data_output_filename.empty()) {
     graph_data_output_file =
-        fopen(params_.analyzer.graph_data_output_filename.c_str(), "w");
+        fopen(params.analyzer.graph_data_output_filename.c_str(), "w");
     RTC_CHECK(graph_data_output_file != nullptr)
         << "Can't open the file "
-        << params_.analyzer.graph_data_output_filename << "!";
+        << params.analyzer.graph_data_output_filename << "!";
   }
+
   test::LayerFilteringTransport send_transport(
-      params_.pipe, kPayloadTypeVP8, kPayloadTypeVP9,
-      params_.common.selected_tl, params_.ss.selected_sl);
-  test::DirectTransport recv_transport(params_.pipe);
-
-  std::string graph_title = params_.analyzer.graph_title;
-  if (graph_title.empty())
-    graph_title = VideoQualityTest::GenerateGraphTitle();
-
-  // In the case of different resolutions, the functions calculating PSNR and
-  // SSIM return -1.0, instead of a positive value as usual. VideoAnalyzer
-  // aborts if the average psnr/ssim are below the given threshold, which is
-  // 0.0 by default. Setting the thresholds to -1.1 prevents the unnecessary
-  // abort.
-  VideoStream& selected_stream =
-      params_.ss.streams[params_.ss.selected_stream];
-  int selected_sl = params_.ss.selected_sl != -1
-      ? params_.ss.selected_sl : params_.ss.num_spatial_layers - 1;
-  bool disable_quality_check =
-      selected_stream.width != params_.common.width ||
-      selected_stream.height != params_.common.height ||
-      (!params_.ss.spatial_layers.empty() &&
-       params_.ss.spatial_layers[selected_sl].scaling_factor_num !=
-       params_.ss.spatial_layers[selected_sl].scaling_factor_den);
-  if (disable_quality_check) {
-    fprintf(stderr,
-            "Warning: Calculating PSNR and SSIM for downsized resolution "
-            "not implemented yet! Skipping PSNR and SSIM calculations!");
-  }
-
+      params.pipe, kPayloadTypeVP8, kPayloadTypeVP9,
+      static_cast<uint8_t>(params.common.tl_discard_threshold), 0);
+  test::DirectTransport recv_transport(params.pipe);
   VideoAnalyzer analyzer(
-      &send_transport, params_.analyzer.test_label,
-      disable_quality_check ? -1.1 : params_.analyzer.avg_psnr_threshold,
-      disable_quality_check ? -1.1 : params_.analyzer.avg_ssim_threshold,
-      params_.analyzer.test_durations_secs * params_.common.fps,
-      graph_data_output_file, graph_title,
-      kSendSsrcs[params_.ss.selected_stream]);
+      &send_transport, params.analyzer.test_label,
+      params.analyzer.avg_psnr_threshold, params.analyzer.avg_ssim_threshold,
+      params.analyzer.test_durations_secs * params.common.fps,
+      graph_data_output_file);
 
   Call::Config call_config;
-  call_config.bitrate_config = params_.common.call_bitrate_config;
+  call_config.bitrate_config = params.common.call_bitrate_config;
   CreateCalls(call_config, call_config);
 
   analyzer.SetReceiver(receiver_call_->Receiver());
   send_transport.SetReceiver(&analyzer);
   recv_transport.SetReceiver(sender_call_->Receiver());
 
-  SetupCommon(&analyzer, &recv_transport);
+  SetupFullStack(params, &analyzer, &recv_transport);
   send_config_.encoding_time_observer = &analyzer;
-  receive_configs_[params_.ss.selected_stream].renderer = &analyzer;
+  receive_configs_[0].renderer = &analyzer;
   for (auto& config : receive_configs_)
     config.pre_decode_callback = &analyzer;
 
-  if (params_.screenshare.enabled)
-    SetupScreenshare();
+  if (params.screenshare.enabled)
+    SetupScreenshare(params);
 
   CreateStreams();
   analyzer.input_ = send_stream_->Input();
   analyzer.send_stream_ = send_stream_;
 
-  CreateCapturer(&analyzer);
+  CreateCapturer(params, &analyzer);
 
   send_stream_->Start();
   for (size_t i = 0; i < receive_streams_.size(); ++i)
@@ -988,52 +810,41 @@
     fclose(graph_data_output_file);
 }
 
-void VideoQualityTest::RunWithVideoRenderer(const Params& _params) {
-  params_ = _params;
-  CheckParams();
+void VideoQualityTest::RunWithVideoRenderer(const Params& params) {
+  ValidateParams(params);
 
   rtc::scoped_ptr<test::VideoRenderer> local_preview(
-      test::VideoRenderer::Create("Local Preview", params_.common.width,
-                                  params_.common.height));
-  size_t stream_id = params_.ss.selected_stream;
-  char title[32];
-  if (params_.ss.streams.size() == 1) {
-    sprintf(title, "Loopback Video");
-  } else {
-    sprintf(title, "Loopback Video - Stream #%" PRIuS, stream_id);
-  }
+      test::VideoRenderer::Create("Local Preview", params.common.width,
+                                  params.common.height));
   rtc::scoped_ptr<test::VideoRenderer> loopback_video(
-      test::VideoRenderer::Create(
-        title, params_.ss.streams[stream_id].width,
-        params_.ss.streams[stream_id].height));
+      test::VideoRenderer::Create("Loopback Video", params.common.width,
+                                  params.common.height));
 
   // TODO(ivica): Remove bitrate_config and use the default Call::Config(), to
   // match the full stack tests.
   Call::Config call_config;
-  call_config.bitrate_config = params_.common.call_bitrate_config;
+  call_config.bitrate_config = params.common.call_bitrate_config;
   rtc::scoped_ptr<Call> call(Call::Create(call_config));
 
   test::LayerFilteringTransport transport(
-      params_.pipe, kPayloadTypeVP8, kPayloadTypeVP9,
-      params_.common.selected_tl, params_.ss.selected_sl);
-
+      params.pipe, kPayloadTypeVP8, kPayloadTypeVP9,
+      static_cast<uint8_t>(params.common.tl_discard_threshold), 0);
   // TODO(ivica): Use two calls to be able to merge with RunWithAnalyzer or at
   // least share as much code as possible. That way this test would also match
   // the full stack tests better.
   transport.SetReceiver(call->Receiver());
 
-  SetupCommon(&transport, &transport);
-
+  SetupFullStack(params, &transport, &transport);
   send_config_.local_renderer = local_preview.get();
-  receive_configs_[stream_id].renderer = loopback_video.get();
+  receive_configs_[0].renderer = loopback_video.get();
 
-  if (params_.screenshare.enabled)
-    SetupScreenshare();
+  if (params.screenshare.enabled)
+    SetupScreenshare(params);
 
   send_stream_ = call->CreateVideoSendStream(send_config_, encoder_config_);
   VideoReceiveStream* receive_stream =
-      call->CreateVideoReceiveStream(receive_configs_[stream_id]);
-  CreateCapturer(send_stream_->Input());
+      call->CreateVideoReceiveStream(receive_configs_[0]);
+  CreateCapturer(params, send_stream_->Input());
 
   receive_stream->Start();
   send_stream_->Start();
diff --git a/webrtc/video/video_quality_test.h b/webrtc/video/video_quality_test.h
index 8548383..7b62fb3 100644
--- a/webrtc/video/video_quality_test.h
+++ b/webrtc/video/video_quality_test.h
@@ -33,11 +33,11 @@
       int target_bitrate_bps;
       int max_bitrate_bps;
       std::string codec;
-      int num_temporal_layers;
-      int selected_tl;
+      size_t num_temporal_layers;
       int min_transmit_bps;
 
       Call::Config::BitrateConfig call_bitrate_config;
+      size_t tl_discard_threshold;
       bool send_side_bwe;
     } common;
     struct {  // Video-specific settings.
@@ -50,55 +50,30 @@
     } screenshare;
     struct {  // Analyzer settings.
       std::string test_label;
-      double avg_psnr_threshold;  // (*)
-      double avg_ssim_threshold;  // (*)
+      double avg_psnr_threshold;
+      double avg_ssim_threshold;
       int test_durations_secs;
       std::string graph_data_output_filename;
-      std::string graph_title;
     } analyzer;
     FakeNetworkPipe::Config pipe;
     bool logs;
-    struct {  // Spatial scalability.
-      std::vector<VideoStream> streams;  // If empty, one stream is assumed.
-      size_t selected_stream;
-      int num_spatial_layers;
-      int selected_sl;
-      // If empty, bitrates are generated in VP9Impl automatically.
-      std::vector<SpatialLayer> spatial_layers;
-    } ss;
   };
-  // (*) Set to -1.1 if generating graph data for simulcast or SVC and the
-  // selected stream/layer doesn't have the same resolution as the largest
-  // stream/layer (to ignore the PSNR and SSIM calculation errors).
 
   VideoQualityTest();
   void RunWithAnalyzer(const Params& params);
   void RunWithVideoRenderer(const Params& params);
 
-  static void FillScalabilitySettings(
-      Params* params,
-      const std::vector<std::string>& stream_descriptors,
-      size_t selected_stream,
-      int num_spatial_layers,
-      int selected_sl,
-      const std::vector<std::string>& sl_descriptors);
  protected:
   // No-op implementation to be able to instantiate this class from non-TEST_F
   // locations.
   void TestBody() override;
 
-  // Helper methods accessing only params_.
-  std::string GenerateGraphTitle() const;
-  void CheckParams();
-
-  // Helper static methods.
-  static VideoStream DefaultVideoStream(const Params& params);
-  static std::vector<int> ParseCSV(const std::string& str);
-
-  // Helper methods for setting up the call.
-  void CreateCapturer(VideoCaptureInput* input);
-  void SetupCommon(Transport* send_transport, Transport* recv_transport);
-  void SetupScreenshare();
+  void CreateCapturer(const Params& params, VideoCaptureInput* input);
+  void ValidateParams(const Params& params);
+  void SetupFullStack(const Params& params,
+                      Transport* send_transport,
+                      Transport* recv_transport);
+  void SetupScreenshare(const Params& params);
 
   // We need a more general capturer than the FrameGeneratorCapturer.
   rtc::scoped_ptr<test::VideoCapturer> capturer_;
@@ -107,8 +82,6 @@
   rtc::scoped_ptr<VideoEncoder> encoder_;
   VideoCodecUnion codec_settings_;
   Clock* const clock_;
-
-  Params params_;
 };
 
 }  // namespace webrtc
diff --git a/webrtc/video/video_send_stream.cc b/webrtc/video/video_send_stream.cc
index 4d57c63..d55adf0 100644
--- a/webrtc/video/video_send_stream.cc
+++ b/webrtc/video/video_send_stream.cc
@@ -310,16 +310,6 @@
       static_cast<unsigned char>(streams.size());
   video_codec.minBitrate = streams[0].min_bitrate_bps / 1000;
   RTC_DCHECK_LE(streams.size(), static_cast<size_t>(kMaxSimulcastStreams));
-  if (video_codec.codecType == kVideoCodecVP9) {
-    // If the vector is empty, bitrates will be configured automatically.
-    RTC_DCHECK(config.spatial_layers.empty() ||
-               config.spatial_layers.size() ==
-                  video_codec.codecSpecific.VP9.numberOfSpatialLayers);
-    RTC_DCHECK_LE(video_codec.codecSpecific.VP9.numberOfSpatialLayers,
-                  kMaxSimulcastStreams);
-    for (size_t i = 0; i < config.spatial_layers.size(); ++i)
-      video_codec.spatialLayers[i] = config.spatial_layers[i];
-  }
   for (size_t i = 0; i < streams.size(); ++i) {
     SimulcastStream* sim_stream = &video_codec.simulcastStream[i];
     RTC_DCHECK_GT(streams[i].width, 0u);