(Auto)update libjingle 62063505-> 62278774

git-svn-id: http://webrtc.googlecode.com/svn/trunk@5617 4adac7df-926f-26a2-2b94-8c16560cd09d
diff --git a/talk/app/webrtc/statscollector.cc b/talk/app/webrtc/statscollector.cc
index a900bba..4d5b324 100644
--- a/talk/app/webrtc/statscollector.cc
+++ b/talk/app/webrtc/statscollector.cc
@@ -122,6 +122,8 @@
     "googLocalCertificateId";
 const char StatsReport::kStatsValueNameNacksReceived[] = "googNacksReceived";
 const char StatsReport::kStatsValueNameNacksSent[] = "googNacksSent";
+const char StatsReport::kStatsValueNamePlisReceived[] = "googPlisReceived";
+const char StatsReport::kStatsValueNamePlisSent[] = "googPlisSent";
 const char StatsReport::kStatsValueNamePacketsReceived[] = "packetsReceived";
 const char StatsReport::kStatsValueNamePacketsSent[] = "packetsSent";
 const char StatsReport::kStatsValueNamePacketsLost[] = "packetsLost";
@@ -284,6 +286,8 @@
 
   report->AddValue(StatsReport::kStatsValueNameFirsSent,
                    info.firs_sent);
+  report->AddValue(StatsReport::kStatsValueNamePlisSent,
+                   info.plis_sent);
   report->AddValue(StatsReport::kStatsValueNameNacksSent,
                    info.nacks_sent);
   report->AddValue(StatsReport::kStatsValueNameFrameWidthReceived,
@@ -321,6 +325,8 @@
 
   report->AddValue(StatsReport::kStatsValueNameFirsReceived,
                    info.firs_rcvd);
+  report->AddValue(StatsReport::kStatsValueNamePlisReceived,
+                   info.plis_rcvd);
   report->AddValue(StatsReport::kStatsValueNameNacksReceived,
                    info.nacks_rcvd);
   report->AddValue(StatsReport::kStatsValueNameFrameWidthInput,
diff --git a/talk/app/webrtc/statstypes.h b/talk/app/webrtc/statstypes.h
index 39441e2..2e7fb81 100644
--- a/talk/app/webrtc/statstypes.h
+++ b/talk/app/webrtc/statstypes.h
@@ -167,6 +167,8 @@
   static const char kStatsValueNameJitterReceived[];
   static const char kStatsValueNameNacksReceived[];
   static const char kStatsValueNameNacksSent[];
+  static const char kStatsValueNamePlisReceived[];
+  static const char kStatsValueNamePlisSent[];
   static const char kStatsValueNameRtt[];
   static const char kStatsValueNameAvailableSendBandwidth[];
   static const char kStatsValueNameAvailableReceiveBandwidth[];
diff --git a/talk/app/webrtc/test/fakeperiodicvideocapturer.h b/talk/app/webrtc/test/fakeperiodicvideocapturer.h
index 88fd753..7f70ae2 100644
--- a/talk/app/webrtc/test/fakeperiodicvideocapturer.h
+++ b/talk/app/webrtc/test/fakeperiodicvideocapturer.h
@@ -56,7 +56,6 @@
   virtual cricket::CaptureState Start(const cricket::VideoFormat& format) {
     cricket::CaptureState state = FakeVideoCapturer::Start(format);
     if (state != cricket::CS_FAILED) {
-      set_enable_video_adapter(false);  // Simplify testing.
       talk_base::Thread::Current()->Post(this, MSG_CREATEFRAME);
     }
     return state;
diff --git a/talk/app/webrtc/webrtcsdp.cc b/talk/app/webrtc/webrtcsdp.cc
index bf69279..bee8222 100644
--- a/talk/app/webrtc/webrtcsdp.cc
+++ b/talk/app/webrtc/webrtcsdp.cc
@@ -2111,9 +2111,6 @@
                     message, cricket::MEDIA_TYPE_AUDIO, mline_index, protocol,
                     codec_preference, pos, &content_name,
                     &transport, candidates, error));
-      MaybeCreateStaticPayloadAudioCodecs(
-          codec_preference,
-          static_cast<AudioContentDescription*>(content.get()));
     } else if (HasAttribute(line, kMediaTypeData)) {
       DataContentDescription* desc =
           ParseContentDescription<DataContentDescription>(
@@ -2366,6 +2363,11 @@
   ASSERT(content_name != NULL);
   ASSERT(transport != NULL);
 
+  if (media_type == cricket::MEDIA_TYPE_AUDIO) {
+    MaybeCreateStaticPayloadAudioCodecs(
+        codec_preference, static_cast<AudioContentDescription*>(media_desc));
+  }
+
   // The media level "ice-ufrag" and "ice-pwd".
   // The candidates before update the media level "ice-pwd" and "ice-ufrag".
   Candidates candidates_orig;
diff --git a/talk/app/webrtc/webrtcsdp_unittest.cc b/talk/app/webrtc/webrtcsdp_unittest.cc
index f609aff..76765aa 100644
--- a/talk/app/webrtc/webrtcsdp_unittest.cc
+++ b/talk/app/webrtc/webrtcsdp_unittest.cc
@@ -299,6 +299,19 @@
     "a=mid:data_content_name\r\n"
     "a=sctpmap:5000 webrtc-datachannel 1024\r\n";
 
+    static const char kSdpConferenceString[] =
+    "v=0\r\n"
+    "o=- 18446744069414584320 18446462598732840960 IN IP4 127.0.0.1\r\n"
+    "s=-\r\n"
+    "t=0 0\r\n"
+    "a=msid-semantic: WMS\r\n"
+    "m=audio 1 RTP/SAVPF 111 103 104\r\n"
+    "c=IN IP4 0.0.0.0\r\n"
+    "a=x-google-flag:conference\r\n"
+    "m=video 1 RTP/SAVPF 120\r\n"
+    "c=IN IP4 0.0.0.0\r\n"
+    "a=x-google-flag:conference\r\n";
+
 
 // One candidate reference string as per W3c spec.
 // candidate:<blah> not a=candidate:<blah>CRLF
@@ -1474,6 +1487,21 @@
   EXPECT_EQ(sdp_with_extmap, message);
 }
 
+TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithBufferLatency) {
+  VideoContentDescription* vcd = static_cast<VideoContentDescription*>(
+      GetFirstVideoContent(&desc_)->description);
+  vcd->set_buffered_mode_latency(128);
+
+  ASSERT_TRUE(jdesc_.Initialize(desc_.Copy(),
+                                jdesc_.session_id(),
+                                jdesc_.session_version()));
+  std::string message = webrtc::SdpSerialize(jdesc_);
+  std::string sdp_with_buffer_latency = kSdpFullString;
+  InjectAfter("a=rtpmap:120 VP8/90000\r\n",
+              "a=x-google-buffer-latency:128\r\n",
+              &sdp_with_buffer_latency);
+  EXPECT_EQ(sdp_with_buffer_latency, message);
+}
 
 TEST_F(WebRtcSdpTest, SerializeCandidates) {
   std::string message = webrtc::SdpSerializeCandidate(*jcandidate_);
@@ -1547,6 +1575,37 @@
   EXPECT_EQ(ref_codecs, audio->codecs());
 }
 
+TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithoutRtpmapButWithFmtp) {
+  static const char kSdpNoRtpmapString[] =
+      "v=0\r\n"
+      "o=- 11 22 IN IP4 127.0.0.1\r\n"
+      "s=-\r\n"
+      "t=0 0\r\n"
+      "m=audio 49232 RTP/AVP 18 103\r\n"
+      "a=fmtp:18 annexb=yes\r\n"
+      "a=rtpmap:103 ISAC/16000\r\n";
+
+  JsepSessionDescription jdesc(kDummyString);
+  EXPECT_TRUE(SdpDeserialize(kSdpNoRtpmapString, &jdesc));
+  cricket::AudioContentDescription* audio =
+    static_cast<AudioContentDescription*>(
+        jdesc.description()->GetContentDescriptionByName(cricket::CN_AUDIO));
+
+  cricket::AudioCodec g729 = audio->codecs()[0];
+  EXPECT_EQ("G729", g729.name);
+  EXPECT_EQ(8000, g729.clockrate);
+  EXPECT_EQ(18, g729.id);
+  cricket::CodecParameterMap::iterator found =
+      g729.params.find("annexb");
+  ASSERT_TRUE(found != g729.params.end());
+  EXPECT_EQ(found->second, "yes");
+
+  cricket::AudioCodec isac = audio->codecs()[1];
+  EXPECT_EQ("ISAC", isac.name);
+  EXPECT_EQ(103, isac.id);
+  EXPECT_EQ(16000, isac.clockrate);
+}
+
 // Ensure that we can deserialize SDP with a=fingerprint properly.
 TEST_F(WebRtcSdpTest, DeserializeJsepSessionDescriptionWithFingerprint) {
   // Add a DTLS a=fingerprint attribute to our session description.
@@ -1654,6 +1713,23 @@
   EXPECT_TRUE(CompareSessionDescription(jdesc_, jdesc_with_ufrag_pwd));
 }
 
+TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithBufferLatency) {
+  JsepSessionDescription jdesc_with_buffer_latency(kDummyString);
+  std::string sdp_with_buffer_latency = kSdpFullString;
+  InjectAfter("a=rtpmap:120 VP8/90000\r\n",
+              "a=x-google-buffer-latency:128\r\n",
+              &sdp_with_buffer_latency);
+
+  EXPECT_TRUE(
+      SdpDeserialize(sdp_with_buffer_latency, &jdesc_with_buffer_latency));
+  VideoContentDescription* vcd = static_cast<VideoContentDescription*>(
+      GetFirstVideoContent(&desc_)->description);
+  vcd->set_buffered_mode_latency(128);
+  ASSERT_TRUE(jdesc_.Initialize(desc_.Copy(),
+                                jdesc_.session_id(),
+                                jdesc_.session_version()));
+  EXPECT_TRUE(CompareSessionDescription(jdesc_, jdesc_with_buffer_latency));
+}
 
 TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithRecvOnlyContent) {
   EXPECT_TRUE(TestDeserializeDirection(cricket::MD_RECVONLY));
@@ -1904,6 +1980,24 @@
   EXPECT_TRUE(jcandidate.candidate().IsEquivalent(ref_candidate));
 }
 
+TEST_F(WebRtcSdpTest, DeserializeSdpWithConferenceFlag) {
+  JsepSessionDescription jdesc(kDummyString);
+
+  // Deserialize
+  EXPECT_TRUE(SdpDeserialize(kSdpConferenceString, &jdesc));
+
+  // Verify
+  cricket::AudioContentDescription* audio =
+    static_cast<AudioContentDescription*>(
+      jdesc.description()->GetContentDescriptionByName(cricket::CN_AUDIO));
+  EXPECT_TRUE(audio->conference_mode());
+
+  cricket::VideoContentDescription* video =
+    static_cast<VideoContentDescription*>(
+      jdesc.description()->GetContentDescriptionByName(cricket::CN_VIDEO));
+  EXPECT_TRUE(video->conference_mode());
+}
+
 TEST_F(WebRtcSdpTest, DeserializeBrokenSdp) {
   const char kSdpDestroyer[] = "!@#$%^&";
   const char kSdpInvalidLine1[] = " =candidate";
diff --git a/talk/base/bandwidthsmoother.cc b/talk/base/bandwidthsmoother.cc
index 3916488..edb4eda 100644
--- a/talk/base/bandwidthsmoother.cc
+++ b/talk/base/bandwidthsmoother.cc
@@ -62,7 +62,7 @@
   }
 
   // Replace bandwidth with the mean of sampled bandwidths.
-  const int mean_bandwidth = accumulator_.ComputeMean();
+  const int mean_bandwidth = static_cast<int>(accumulator_.ComputeMean());
 
   if (mean_bandwidth < bandwidth_estimation_) {
     time_at_last_change_ = sample_time;
diff --git a/talk/base/criticalsection_unittest.cc b/talk/base/criticalsection_unittest.cc
new file mode 100644
index 0000000..a31268e
--- /dev/null
+++ b/talk/base/criticalsection_unittest.cc
@@ -0,0 +1,163 @@
+/*
+ * libjingle
+ * Copyright 2014, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright notice,
+ *     this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright notice,
+ *     this list of conditions and the following disclaimer in the documentation
+ *     and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *     derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <set>
+#include <vector>
+
+#include "talk/base/criticalsection.h"
+#include "talk/base/event.h"
+#include "talk/base/gunit.h"
+#include "talk/base/scopedptrcollection.h"
+#include "talk/base/thread.h"
+
+namespace talk_base {
+
+namespace {
+
+const int kLongTime = 10000;  // 10 seconds
+const int kNumThreads = 16;
+const int kOperationsToRun = 10000;
+
+template <class T>
+class AtomicOpRunner : public MessageHandler {
+ public:
+  explicit AtomicOpRunner(int initial_value)
+      : value_(initial_value),
+        threads_active_(0),
+        start_event_(true, false),
+        done_event_(true, false) {}
+
+  int value() const { return value_; }
+
+  bool Run() {
+    // Signal all threads to start.
+    start_event_.Set();
+
+    // Wait for all threads to finish.
+    return done_event_.Wait(kLongTime);
+  }
+
+  void SetExpectedThreadCount(int count) {
+    threads_active_ = count;
+  }
+
+  virtual void OnMessage(Message* msg) {
+    std::vector<int> values;
+    values.reserve(kOperationsToRun);
+
+    // Wait to start.
+    ASSERT_TRUE(start_event_.Wait(kLongTime));
+
+    // Generate a bunch of values by updating value_ atomically.
+    for (int i = 0; i < kOperationsToRun; ++i) {
+      values.push_back(T::AtomicOp(&value_));
+    }
+
+    { // Add them all to the set.
+      CritScope cs(&all_values_crit_);
+      for (size_t i = 0; i < values.size(); ++i) {
+        std::pair<std::set<int>::iterator, bool> result =
+            all_values_.insert(values[i]);
+        // Each value should only be taken by one thread, so if this value
+        // has already been added, something went wrong.
+        EXPECT_TRUE(result.second)
+            << "Thread=" << Thread::Current() << " value=" << values[i];
+      }
+    }
+
+    // Signal that we're done.
+    if (AtomicOps::Decrement(&threads_active_) == 0) {
+      done_event_.Set();
+    }
+  }
+
+ private:
+  int value_;
+  int threads_active_;
+  CriticalSection all_values_crit_;
+  std::set<int> all_values_;
+  Event start_event_;
+  Event done_event_;
+};
+
+struct IncrementOp {
+  static int AtomicOp(int* i) { return AtomicOps::Increment(i); }
+};
+
+struct DecrementOp {
+  static int AtomicOp(int* i) { return AtomicOps::Decrement(i); }
+};
+
+void StartThreads(ScopedPtrCollection<Thread>* threads,
+                  MessageHandler* handler) {
+  for (int i = 0; i < kNumThreads; ++i) {
+    Thread* thread = new Thread();
+    thread->Start();
+    thread->Post(handler);
+    threads->PushBack(thread);
+  }
+}
+
+}  // namespace
+
+TEST(AtomicOpsTest, Simple) {
+  int value = 0;
+  EXPECT_EQ(1, AtomicOps::Increment(&value));
+  EXPECT_EQ(1, value);
+  EXPECT_EQ(2, AtomicOps::Increment(&value));
+  EXPECT_EQ(2, value);
+  EXPECT_EQ(1, AtomicOps::Decrement(&value));
+  EXPECT_EQ(1, value);
+  EXPECT_EQ(0, AtomicOps::Decrement(&value));
+  EXPECT_EQ(0, value);
+}
+
+TEST(AtomicOpsTest, Increment) {
+  // Create and start lots of threads.
+  AtomicOpRunner<IncrementOp> runner(0);
+  ScopedPtrCollection<Thread> threads;
+  StartThreads(&threads, &runner);
+  runner.SetExpectedThreadCount(kNumThreads);
+
+  // Release the hounds!
+  EXPECT_TRUE(runner.Run());
+  EXPECT_EQ(kOperationsToRun * kNumThreads, runner.value());
+}
+
+TEST(AtomicOpsTest, Decrement) {
+  // Create and start lots of threads.
+  AtomicOpRunner<DecrementOp> runner(kOperationsToRun * kNumThreads);
+  ScopedPtrCollection<Thread> threads;
+  StartThreads(&threads, &runner);
+  runner.SetExpectedThreadCount(kNumThreads);
+
+  // Release the hounds!
+  EXPECT_TRUE(runner.Run());
+  EXPECT_EQ(0, runner.value());
+}
+
+}  // namespace talk_base
diff --git a/talk/base/nssstreamadapter.cc b/talk/base/nssstreamadapter.cc
index 8965a68..ca7fa74 100644
--- a/talk/base/nssstreamadapter.cc
+++ b/talk/base/nssstreamadapter.cc
@@ -784,7 +784,13 @@
                                                 PRBool checksig,
                                                 PRBool isServer) {
   LOG(LS_INFO) << "NSSStreamAdapter::AuthCertificateHook";
-  NSSCertificate peer_cert(SSL_PeerCertificate(fd));
+  // SSL_PeerCertificate returns a pointer that is owned by the caller, and
+  // the NSSCertificate constructor copies its argument, so |raw_peer_cert|
+  // must be destroyed in this function.
+  CERTCertificate* raw_peer_cert = SSL_PeerCertificate(fd);
+  NSSCertificate peer_cert(raw_peer_cert);
+  CERT_DestroyCertificate(raw_peer_cert);
+
   NSSStreamAdapter *stream = reinterpret_cast<NSSStreamAdapter *>(arg);
   stream->cert_ok_ = false;
 
diff --git a/talk/base/rollingaccumulator.h b/talk/base/rollingaccumulator.h
index cdad025..dfda8fe 100644
--- a/talk/base/rollingaccumulator.h
+++ b/talk/base/rollingaccumulator.h
@@ -42,11 +42,8 @@
 class RollingAccumulator {
  public:
   explicit RollingAccumulator(size_t max_count)
-    : count_(0),
-      next_index_(0),
-      sum_(0.0),
-      sum_2_(0.0),
-      samples_(max_count) {
+    : samples_(max_count) {
+    Reset();
   }
   ~RollingAccumulator() {
   }
@@ -59,12 +56,29 @@
     return count_;
   }
 
+  void Reset() {
+    count_ = 0U;
+    next_index_ = 0U;
+    sum_ = 0.0;
+    sum_2_ = 0.0;
+    max_ = T();
+    max_stale_ = false;
+    min_ = T();
+    min_stale_ = false;
+  }
+
   void AddSample(T sample) {
     if (count_ == max_count()) {
       // Remove oldest sample.
       T sample_to_remove = samples_[next_index_];
       sum_ -= sample_to_remove;
       sum_2_ -= sample_to_remove * sample_to_remove;
+      if (sample_to_remove >= max_) {
+        max_stale_ = true;
+      }
+      if (sample_to_remove <= min_) {
+        min_stale_ = true;
+      }
     } else {
       // Increase count of samples.
       ++count_;
@@ -73,6 +87,14 @@
     samples_[next_index_] = sample;
     sum_ += sample;
     sum_2_ += sample * sample;
+    if (count_ == 1 || sample >= max_) {
+      max_ = sample;
+      max_stale_ = false;
+    }
+    if (count_ == 1 || sample <= min_) {
+      min_ = sample;
+      min_stale_ = false;
+    }
     // Update next_index_.
     next_index_ = (next_index_ + 1) % max_count();
   }
@@ -81,17 +103,43 @@
     return static_cast<T>(sum_);
   }
 
-  T ComputeMean() const {
+  double ComputeMean() const {
     if (count_ == 0) {
-      return static_cast<T>(0);
+      return 0.0;
     }
-    return static_cast<T>(sum_ / count_);
+    return sum_ / count_;
+  }
+
+  T ComputeMax() const {
+    if (max_stale_) {
+      ASSERT(count_ > 0 &&
+          "It shouldn't be possible for max_stale_ && count_ == 0");
+      max_ = samples_[next_index_];
+      for (size_t i = 1u; i < count_; i++) {
+        max_ = _max(max_, samples_[(next_index_ + i) % max_count()]);
+      }
+      max_stale_ = false;
+    }
+    return max_;
+  }
+
+  T ComputeMin() const {
+    if (min_stale_) {
+      ASSERT(count_ > 0 &&
+          "It shouldn't be possible for min_stale_ && count_ == 0");
+      min_ = samples_[next_index_];
+      for (size_t i = 1u; i < count_; i++) {
+        min_ = _min(min_, samples_[(next_index_ + i) % max_count()]);
+      }
+      min_stale_ = false;
+    }
+    return min_;
   }
 
   // O(n) time complexity.
   // Weights nth sample with weight (learning_rate)^n. Learning_rate should be
   // between (0.0, 1.0], otherwise the non-weighted mean is returned.
-  T ComputeWeightedMean(double learning_rate) const {
+  double ComputeWeightedMean(double learning_rate) const {
     if (count_ < 1 || learning_rate <= 0.0 || learning_rate >= 1.0) {
       return ComputeMean();
     }
@@ -106,27 +154,31 @@
       size_t index = (next_index_ + max_size - i - 1) % max_size;
       weighted_mean += current_weight * samples_[index];
     }
-    return static_cast<T>(weighted_mean / weight_sum);
+    return weighted_mean / weight_sum;
   }
 
   // Compute estimated variance.  Estimation is more accurate
   // as the number of samples grows.
-  T ComputeVariance() const {
+  double ComputeVariance() const {
     if (count_ == 0) {
-      return static_cast<T>(0);
+      return 0.0;
     }
     // Var = E[x^2] - (E[x])^2
     double count_inv = 1.0 / count_;
     double mean_2 = sum_2_ * count_inv;
     double mean = sum_ * count_inv;
-    return static_cast<T>(mean_2 - (mean * mean));
+    return mean_2 - (mean * mean);
   }
 
  private:
   size_t count_;
   size_t next_index_;
-  double sum_;    // Sum(x)
-  double sum_2_;  // Sum(x*x)
+  double sum_;    // Sum(x) - double to avoid overflow
+  double sum_2_;  // Sum(x*x) - double to avoid overflow
+  mutable T max_;
+  mutable bool max_stale_;
+  mutable T min_;
+  mutable bool min_stale_;
   std::vector<T> samples_;
 
   DISALLOW_COPY_AND_ASSIGN(RollingAccumulator);
diff --git a/talk/base/rollingaccumulator_unittest.cc b/talk/base/rollingaccumulator_unittest.cc
index c283103..e6d0ea2 100644
--- a/talk/base/rollingaccumulator_unittest.cc
+++ b/talk/base/rollingaccumulator_unittest.cc
@@ -40,8 +40,10 @@
   RollingAccumulator<int> accum(10);
 
   EXPECT_EQ(0U, accum.count());
-  EXPECT_EQ(0, accum.ComputeMean());
-  EXPECT_EQ(0, accum.ComputeVariance());
+  EXPECT_DOUBLE_EQ(0.0, accum.ComputeMean());
+  EXPECT_DOUBLE_EQ(0.0, accum.ComputeVariance());
+  EXPECT_EQ(0, accum.ComputeMin());
+  EXPECT_EQ(0, accum.ComputeMax());
 }
 
 TEST(RollingAccumulatorTest, SomeSamples) {
@@ -52,9 +54,11 @@
 
   EXPECT_EQ(4U, accum.count());
   EXPECT_EQ(6, accum.ComputeSum());
-  EXPECT_EQ(1, accum.ComputeMean());
-  EXPECT_EQ(2, accum.ComputeWeightedMean(kLearningRate));
-  EXPECT_EQ(1, accum.ComputeVariance());
+  EXPECT_DOUBLE_EQ(1.5, accum.ComputeMean());
+  EXPECT_NEAR(2.26666, accum.ComputeWeightedMean(kLearningRate), 0.01);
+  EXPECT_DOUBLE_EQ(1.25, accum.ComputeVariance());
+  EXPECT_EQ(0, accum.ComputeMin());
+  EXPECT_EQ(3, accum.ComputeMax());
 }
 
 TEST(RollingAccumulatorTest, RollingSamples) {
@@ -65,9 +69,36 @@
 
   EXPECT_EQ(10U, accum.count());
   EXPECT_EQ(65, accum.ComputeSum());
-  EXPECT_EQ(6, accum.ComputeMean());
-  EXPECT_EQ(10, accum.ComputeWeightedMean(kLearningRate));
-  EXPECT_NEAR(9, accum.ComputeVariance(), 1);
+  EXPECT_DOUBLE_EQ(6.5, accum.ComputeMean());
+  EXPECT_NEAR(10.0, accum.ComputeWeightedMean(kLearningRate), 0.01);
+  EXPECT_NEAR(9.0, accum.ComputeVariance(), 1.0);
+  EXPECT_EQ(2, accum.ComputeMin());
+  EXPECT_EQ(11, accum.ComputeMax());
+}
+
+TEST(RollingAccumulatorTest, ResetSamples) {
+  RollingAccumulator<int> accum(10);
+
+  for (int i = 0; i < 10; ++i) {
+    accum.AddSample(100);
+  }
+  EXPECT_EQ(10U, accum.count());
+  EXPECT_DOUBLE_EQ(100.0, accum.ComputeMean());
+  EXPECT_EQ(100, accum.ComputeMin());
+  EXPECT_EQ(100, accum.ComputeMax());
+
+  accum.Reset();
+  EXPECT_EQ(0U, accum.count());
+
+  for (int i = 0; i < 5; ++i) {
+    accum.AddSample(i);
+  }
+
+  EXPECT_EQ(5U, accum.count());
+  EXPECT_EQ(10, accum.ComputeSum());
+  EXPECT_DOUBLE_EQ(2.0, accum.ComputeMean());
+  EXPECT_EQ(0, accum.ComputeMin());
+  EXPECT_EQ(4, accum.ComputeMax());
 }
 
 TEST(RollingAccumulatorTest, RollingSamplesDouble) {
@@ -81,22 +112,24 @@
   EXPECT_DOUBLE_EQ(87.5, accum.ComputeMean());
   EXPECT_NEAR(105.049, accum.ComputeWeightedMean(kLearningRate), 0.1);
   EXPECT_NEAR(229.166667, accum.ComputeVariance(), 25);
+  EXPECT_DOUBLE_EQ(65.0, accum.ComputeMin());
+  EXPECT_DOUBLE_EQ(110.0, accum.ComputeMax());
 }
 
 TEST(RollingAccumulatorTest, ComputeWeightedMeanCornerCases) {
   RollingAccumulator<int> accum(10);
-  EXPECT_EQ(0, accum.ComputeWeightedMean(kLearningRate));
-  EXPECT_EQ(0, accum.ComputeWeightedMean(0.0));
-  EXPECT_EQ(0, accum.ComputeWeightedMean(1.1));
+  EXPECT_DOUBLE_EQ(0.0, accum.ComputeWeightedMean(kLearningRate));
+  EXPECT_DOUBLE_EQ(0.0, accum.ComputeWeightedMean(0.0));
+  EXPECT_DOUBLE_EQ(0.0, accum.ComputeWeightedMean(1.1));
 
   for (int i = 0; i < 8; ++i) {
     accum.AddSample(i);
   }
 
-  EXPECT_EQ(3, accum.ComputeMean());
-  EXPECT_EQ(3, accum.ComputeWeightedMean(0));
-  EXPECT_EQ(3, accum.ComputeWeightedMean(1.1));
-  EXPECT_EQ(6, accum.ComputeWeightedMean(kLearningRate));
+  EXPECT_DOUBLE_EQ(3.5, accum.ComputeMean());
+  EXPECT_DOUBLE_EQ(3.5, accum.ComputeWeightedMean(0));
+  EXPECT_DOUBLE_EQ(3.5, accum.ComputeWeightedMean(1.1));
+  EXPECT_NEAR(6.0, accum.ComputeWeightedMean(kLearningRate), 0.1);
 }
 
 }  // namespace talk_base
diff --git a/talk/libjingle.scons b/talk/libjingle.scons
index dc5e9a0..d8ead7f 100644
--- a/talk/libjingle.scons
+++ b/talk/libjingle.scons
@@ -544,6 +544,7 @@
                 "base/callback_unittest.cc",
                 "base/cpumonitor_unittest.cc",
                 "base/crc32_unittest.cc",
+                "base/criticalsection_unittest.cc",
                 "base/event_unittest.cc",
                 "base/filelock_unittest.cc",
                 "base/fileutils_unittest.cc",
diff --git a/talk/libjingle_tests.gyp b/talk/libjingle_tests.gyp
index 038fb4f..31dc551 100755
--- a/talk/libjingle_tests.gyp
+++ b/talk/libjingle_tests.gyp
@@ -123,6 +123,7 @@
         'base/callback_unittest.cc',
         'base/cpumonitor_unittest.cc',
         'base/crc32_unittest.cc',
+        'base/criticalsection_unittest.cc',
         'base/event_unittest.cc',
         'base/filelock_unittest.cc',
         'base/fileutils_unittest.cc',
diff --git a/talk/media/base/constants.cc b/talk/media/base/constants.cc
index 72ea043..761ef5c 100644
--- a/talk/media/base/constants.cc
+++ b/talk/media/base/constants.cc
@@ -99,4 +99,8 @@
 const char kRtpAbsoluteSendTimeHeaderExtension[] =
     "http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time";
 
+const int kNumDefaultUnsignalledVideoRecvStreams = 0;
+
+
 }  // namespace cricket
+
diff --git a/talk/media/base/constants.h b/talk/media/base/constants.h
index 5f123ff..9f4d4a8 100644
--- a/talk/media/base/constants.h
+++ b/talk/media/base/constants.h
@@ -120,6 +120,8 @@
 // http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time
 extern const char kRtpAbsoluteSendTimeHeaderExtension[];
 
+extern const int kNumDefaultUnsignalledVideoRecvStreams;
 }  // namespace cricket
 
 #endif  // TALK_MEDIA_BASE_CONSTANTS_H_
+
diff --git a/talk/media/base/mediachannel.h b/talk/media/base/mediachannel.h
index 7c42b70..0af14a9 100644
--- a/talk/media/base/mediachannel.h
+++ b/talk/media/base/mediachannel.h
@@ -289,6 +289,7 @@
     process_adaptation_threshhold.Set(kProcessCpuThreshold);
     system_low_adaptation_threshhold.Set(kLowSystemCpuThreshold);
     system_high_adaptation_threshhold.Set(kHighSystemCpuThreshold);
+    unsignalled_recv_stream_limit.Set(kNumDefaultUnsignalledVideoRecvStreams);
   }
 
   void SetAll(const VideoOptions& change) {
@@ -317,6 +318,7 @@
     lower_min_bitrate.SetFrom(change.lower_min_bitrate);
     dscp.SetFrom(change.dscp);
     suspend_below_min_bitrate.SetFrom(change.suspend_below_min_bitrate);
+    unsignalled_recv_stream_limit.SetFrom(change.unsignalled_recv_stream_limit);
   }
 
   bool operator==(const VideoOptions& o) const {
@@ -342,7 +344,8 @@
         buffered_mode_latency == o.buffered_mode_latency &&
         lower_min_bitrate == o.lower_min_bitrate &&
         dscp == o.dscp &&
-        suspend_below_min_bitrate == o.suspend_below_min_bitrate;
+        suspend_below_min_bitrate == o.suspend_below_min_bitrate &&
+        unsignalled_recv_stream_limit == o.unsignalled_recv_stream_limit;
   }
 
   std::string ToString() const {
@@ -372,6 +375,8 @@
     ost << ToStringIfSet("dscp", dscp);
     ost << ToStringIfSet("suspend below min bitrate",
                          suspend_below_min_bitrate);
+    ost << ToStringIfSet("num channels for early receive",
+                         unsignalled_recv_stream_limit);
     ost << "}";
     return ost.str();
   }
@@ -421,6 +426,8 @@
   // Enable WebRTC suspension of video. No video frames will be sent when the
   // bitrate is below the configured minimum bitrate.
   Settable<bool> suspend_below_min_bitrate;
+  // Limit on the number of early receive channels that can be created.
+  Settable<int> unsignalled_recv_stream_limit;
 };
 
 // A class for playing out soundclips.
@@ -677,6 +684,20 @@
   std::vector<SsrcReceiverInfo> remote_stats;
 };
 
+template<class T>
+struct VariableInfo {
+  VariableInfo()
+      : min_val(),
+        mean(0.0),
+        max_val(),
+        variance(0.0) {
+  }
+  T min_val;
+  double mean;
+  T max_val;
+  double variance;
+};
+
 struct MediaReceiverInfo {
   MediaReceiverInfo()
       : bytes_rcvd(0),
@@ -782,6 +803,7 @@
   VideoSenderInfo()
       : packets_cached(0),
         firs_rcvd(0),
+        plis_rcvd(0),
         nacks_rcvd(0),
         input_frame_width(0),
         input_frame_height(0),
@@ -801,6 +823,7 @@
   std::vector<SsrcGroup> ssrc_groups;
   int packets_cached;
   int firs_rcvd;
+  int plis_rcvd;
   int nacks_rcvd;
   int input_frame_width;
   int input_frame_height;
@@ -815,12 +838,16 @@
   int avg_encode_ms;
   int encode_usage_percent;
   int capture_queue_delay_ms_per_s;
+  VariableInfo<int> adapt_frame_drops;
+  VariableInfo<int> effects_frame_drops;
+  VariableInfo<double> capturer_frame_time;
 };
 
 struct VideoReceiverInfo : public MediaReceiverInfo {
   VideoReceiverInfo()
       : packets_concealed(0),
         firs_sent(0),
+        plis_sent(0),
         nacks_sent(0),
         frame_width(0),
         frame_height(0),
@@ -841,6 +868,7 @@
   std::vector<SsrcGroup> ssrc_groups;
   int packets_concealed;
   int firs_sent;
+  int plis_sent;
   int nacks_sent;
   int frame_width;
   int frame_height;
diff --git a/talk/media/base/videoadapter.cc b/talk/media/base/videoadapter.cc
index f197995..bcf89cb 100644
--- a/talk/media/base/videoadapter.cc
+++ b/talk/media/base/videoadapter.cc
@@ -30,6 +30,7 @@
 #include "talk/base/logging.h"
 #include "talk/base/timeutils.h"
 #include "talk/media/base/constants.h"
+#include "talk/media/base/videocommon.h"
 #include "talk/media/base/videoframe.h"
 
 namespace cricket {
@@ -235,6 +236,10 @@
   return input_format_;
 }
 
+bool VideoAdapter::drops_all_frames() const {
+  return output_num_pixels_ == 0;
+}
+
 const VideoFormat& VideoAdapter::output_format() {
   talk_base::CritScope cs(&critical_section_);
   return output_format_;
@@ -308,7 +313,7 @@
   }
 
   float scale = 1.f;
-  if (output_num_pixels_) {
+  if (output_num_pixels_ < input_format_.width * input_format_.height) {
     scale = VideoAdapter::FindClosestViewScale(
         static_cast<int>(in_frame->GetWidth()),
         static_cast<int>(in_frame->GetHeight()),
@@ -316,6 +321,9 @@
     output_format_.width = static_cast<int>(in_frame->GetWidth() * scale + .5f);
     output_format_.height = static_cast<int>(in_frame->GetHeight() * scale +
                                              .5f);
+  } else {
+    output_format_.width = static_cast<int>(in_frame->GetWidth());
+    output_format_.height = static_cast<int>(in_frame->GetHeight());
   }
 
   if (!StretchToOutputFrame(in_frame)) {
diff --git a/talk/media/base/videoadapter.h b/talk/media/base/videoadapter.h
index 98c64d6..64a850f 100644
--- a/talk/media/base/videoadapter.h
+++ b/talk/media/base/videoadapter.h
@@ -51,6 +51,8 @@
   int GetOutputNumPixels() const;
 
   const VideoFormat& input_format();
+  // Returns true if the adapter is dropping frames in calls to AdaptFrame.
+  bool drops_all_frames() const;
   const VideoFormat& output_format();
   // If the parameter black is true, the adapted frames will be black.
   void SetBlackOutput(bool black);
diff --git a/talk/media/base/videocapturer.cc b/talk/media/base/videocapturer.cc
index c5e725c..5f51727 100644
--- a/talk/media/base/videocapturer.cc
+++ b/talk/media/base/videocapturer.cc
@@ -63,6 +63,10 @@
 static const int kDefaultScreencastFps = 5;
 typedef talk_base::TypedMessageData<CaptureState> StateChangeParams;
 
+// Limit stats data collections to ~20 seconds of 30fps data before dropping
+// old data in case stats aren't reset for long periods of time.
+static const size_t kMaxAccumulatorSize = 600;
+
 }  // namespace
 
 /////////////////////////////////////////////////////////////////////
@@ -92,11 +96,19 @@
 /////////////////////////////////////////////////////////////////////
 // Implementation of class VideoCapturer
 /////////////////////////////////////////////////////////////////////
-VideoCapturer::VideoCapturer() : thread_(talk_base::Thread::Current()) {
+VideoCapturer::VideoCapturer()
+    : thread_(talk_base::Thread::Current()),
+      adapt_frame_drops_data_(kMaxAccumulatorSize),
+      effect_frame_drops_data_(kMaxAccumulatorSize),
+      frame_time_data_(kMaxAccumulatorSize) {
   Construct();
 }
 
-VideoCapturer::VideoCapturer(talk_base::Thread* thread) : thread_(thread) {
+VideoCapturer::VideoCapturer(talk_base::Thread* thread)
+    : thread_(thread),
+      adapt_frame_drops_data_(kMaxAccumulatorSize),
+      effect_frame_drops_data_(kMaxAccumulatorSize),
+      frame_time_data_(kMaxAccumulatorSize) {
   Construct();
 }
 
@@ -112,6 +124,9 @@
   muted_ = false;
   black_frame_count_down_ = kNumBlackFramesOnMute;
   enable_video_adapter_ = true;
+  adapt_frame_drops_ = 0;
+  effect_frame_drops_ = 0;
+  previous_frame_time_ = 0.0;
 }
 
 const std::vector<VideoFormat>* VideoCapturer::GetSupportedFormats() const {
@@ -119,6 +134,7 @@
 }
 
 bool VideoCapturer::StartCapturing(const VideoFormat& capture_format) {
+  previous_frame_time_ = frame_length_time_reporter_.TimerNow();
   CaptureState result = Start(capture_format);
   const bool success = (result == CS_RUNNING) || (result == CS_STARTING);
   if (!success) {
@@ -306,6 +322,19 @@
   return ss.str();
 }
 
+void VideoCapturer::GetStats(VariableInfo<int>* adapt_drops_stats,
+                             VariableInfo<int>* effect_drops_stats,
+                             VariableInfo<double>* frame_time_stats) {
+  talk_base::CritScope cs(&frame_stats_crit_);
+  GetVariableSnapshot(adapt_frame_drops_data_, adapt_drops_stats);
+  GetVariableSnapshot(effect_frame_drops_data_, effect_drops_stats);
+  GetVariableSnapshot(frame_time_data_, frame_time_stats);
+
+  adapt_frame_drops_data_.Reset();
+  effect_frame_drops_data_.Reset();
+  frame_time_data_.Reset();
+}
+
 void VideoCapturer::OnFrameCaptured(VideoCapturer*,
                                     const CapturedFrame* captured_frame) {
   if (muted_) {
@@ -482,19 +511,36 @@
     VideoFrame* out_frame = NULL;
     video_adapter_.AdaptFrame(adapted_frame, &out_frame);
     if (!out_frame) {
-      return;  // VideoAdapter dropped the frame.
+      // VideoAdapter dropped the frame.
+      ++adapt_frame_drops_;
+      return;
     }
     adapted_frame = out_frame;
   }
 
   if (!muted_ && !ApplyProcessors(adapted_frame)) {
     // Processor dropped the frame.
+    ++effect_frame_drops_;
     return;
   }
   if (muted_) {
     adapted_frame->SetToBlack();
   }
   SignalVideoFrame(this, adapted_frame);
+
+  double time_now = frame_length_time_reporter_.TimerNow();
+  if (previous_frame_time_ != 0.0) {
+    // Update stats protected from jmi data fetches.
+    talk_base::CritScope cs(&frame_stats_crit_);
+
+    adapt_frame_drops_data_.AddSample(adapt_frame_drops_);
+    effect_frame_drops_data_.AddSample(effect_frame_drops_);
+    frame_time_data_.AddSample(time_now - previous_frame_time_);
+  }
+  previous_frame_time_ = time_now;
+  effect_frame_drops_ = 0;
+  adapt_frame_drops_ = 0;
+
 #endif  // VIDEO_FRAME_NAME
 }
 
@@ -669,4 +715,14 @@
          format.height > max_format_->height;
 }
 
+template<class T>
+void VideoCapturer::GetVariableSnapshot(
+    const talk_base::RollingAccumulator<T>& data,
+    VariableInfo<T>* stats) {
+  stats->max_val = data.ComputeMax();
+  stats->mean = data.ComputeMean();
+  stats->min_val = data.ComputeMin();
+  stats->variance = data.ComputeVariance();
+}
+
 }  // namespace cricket
diff --git a/talk/media/base/videocapturer.h b/talk/media/base/videocapturer.h
index 37b37ba..c45ad78 100644
--- a/talk/media/base/videocapturer.h
+++ b/talk/media/base/videocapturer.h
@@ -34,9 +34,12 @@
 #include "talk/base/basictypes.h"
 #include "talk/base/criticalsection.h"
 #include "talk/base/messagehandler.h"
+#include "talk/base/rollingaccumulator.h"
 #include "talk/base/scoped_ptr.h"
 #include "talk/base/sigslot.h"
 #include "talk/base/thread.h"
+#include "talk/base/timing.h"
+#include "talk/media/base/mediachannel.h"
 #include "talk/media/base/videoadapter.h"
 #include "talk/media/base/videocommon.h"
 #include "talk/media/devices/devicemanager.h"
@@ -286,6 +289,13 @@
     return &video_adapter_;
   }
 
+  // Gets statistics for tracked variables recorded since the last call to
+  // GetStats.  Note that calling GetStats resets any gathered data so it
+  // should be called only periodically to log statistics.
+  void GetStats(VariableInfo<int>* adapt_drop_stats,
+                VariableInfo<int>* effect_drop_stats,
+                VariableInfo<double>* frame_time_stats);
+
  protected:
   // Callback attached to SignalFrameCaptured where SignalVideoFrames is called.
   void OnFrameCaptured(VideoCapturer* video_capturer,
@@ -338,6 +348,13 @@
   // Returns true if format doesn't fulfill all applied restrictions.
   bool ShouldFilterFormat(const VideoFormat& format) const;
 
+  // Helper function to save statistics on the current data from a
+  // RollingAccumulator into stats.
+  template<class T>
+  static void GetVariableSnapshot(
+      const talk_base::RollingAccumulator<T>& data,
+      VariableInfo<T>* stats);
+
   talk_base::Thread* thread_;
   std::string id_;
   CaptureState capture_state_;
@@ -359,6 +376,16 @@
   bool enable_video_adapter_;
   CoordinatedVideoAdapter video_adapter_;
 
+  talk_base::Timing frame_length_time_reporter_;
+  talk_base::CriticalSection frame_stats_crit_;
+
+  int adapt_frame_drops_;
+  talk_base::RollingAccumulator<int> adapt_frame_drops_data_;
+  int effect_frame_drops_;
+  talk_base::RollingAccumulator<int> effect_frame_drops_data_;
+  double previous_frame_time_;
+  talk_base::RollingAccumulator<double> frame_time_data_;
+
   talk_base::CriticalSection crit_;
   VideoProcessors video_processors_;
 
diff --git a/talk/media/base/videoengine_unittest.h b/talk/media/base/videoengine_unittest.h
index 2a762bc..5586d76 100644
--- a/talk/media/base/videoengine_unittest.h
+++ b/talk/media/base/videoengine_unittest.h
@@ -473,19 +473,30 @@
                                 cricket::FOURCC_I420);
     EXPECT_EQ(cricket::CS_RUNNING, video_capturer_->Start(format));
     EXPECT_TRUE(channel_->SetCapturer(kSsrc, video_capturer_.get()));
-    EXPECT_TRUE(channel_->SetSendStreamFormat(kSsrc, format));
   }
+  // Utility method to setup an additional stream to send and receive video.
+  // Used to test send and recv between two streams.
   void SetUpSecondStream() {
-    EXPECT_TRUE(channel_->AddRecvStream(
-        cricket::StreamParams::CreateLegacy(kSsrc)));
+    SetUpSecondStreamWithNoRecv();
+    // Setup recv for second stream.
     EXPECT_TRUE(channel_->AddRecvStream(
         cricket::StreamParams::CreateLegacy(kSsrc + 2)));
+    // Make the second renderer available for use by a new stream.
+    EXPECT_TRUE(channel_->SetRenderer(kSsrc + 2, &renderer2_));
+  }
+  // Setup an additional stream just to send video. Defer add recv stream.
+  // This is required if you want to test unsignalled recv of video rtp packets.
+  void SetUpSecondStreamWithNoRecv() {
     // SetUp() already added kSsrc make sure duplicate SSRCs cant be added.
+    EXPECT_TRUE(channel_->AddRecvStream(
+        cricket::StreamParams::CreateLegacy(kSsrc)));
     EXPECT_FALSE(channel_->AddSendStream(
         cricket::StreamParams::CreateLegacy(kSsrc)));
     EXPECT_TRUE(channel_->AddSendStream(
         cricket::StreamParams::CreateLegacy(kSsrc + 2)));
+    // We dont add recv for the second stream.
 
+    // Setup the receive and renderer for second stream after send.
     video_capturer_2_.reset(new cricket::FakeVideoCapturer());
     cricket::VideoFormat format(640, 480,
                                 cricket::VideoFormat::FpsToInterval(30),
@@ -493,9 +504,6 @@
     EXPECT_EQ(cricket::CS_RUNNING, video_capturer_2_->Start(format));
 
     EXPECT_TRUE(channel_->SetCapturer(kSsrc + 2, video_capturer_2_.get()));
-    // Make the second renderer available for use by a new stream.
-    EXPECT_TRUE(channel_->SetRenderer(kSsrc + 2, &renderer2_));
-    EXPECT_TRUE(channel_->SetSendStreamFormat(kSsrc + 2, format));
   }
   virtual void TearDown() {
     channel_.reset();
@@ -718,7 +726,6 @@
     EXPECT_TRUE(channel_->SetCapturer(kSsrc, video_capturer_.get()));
     EXPECT_TRUE(SetOneCodec(DefaultCodec()));
     EXPECT_FALSE(channel_->sending());
-    EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
     EXPECT_TRUE(SetSend(true));
     EXPECT_TRUE(channel_->sending());
     EXPECT_TRUE(SendFrame());
@@ -755,7 +762,6 @@
   // Tests that we can send and receive frames.
   void SendAndReceive(const cricket::VideoCodec& codec) {
     EXPECT_TRUE(SetOneCodec(codec));
-    EXPECT_TRUE(SetSendStreamFormat(kSsrc, codec));
     EXPECT_TRUE(SetSend(true));
     EXPECT_TRUE(channel_->SetRender(true));
     EXPECT_EQ(0, renderer_.num_rendered_frames());
@@ -768,7 +774,6 @@
   void SendManyResizeOnce() {
     cricket::VideoCodec codec(DefaultCodec());
     EXPECT_TRUE(SetOneCodec(codec));
-    EXPECT_TRUE(SetSendStreamFormat(kSsrc, codec));
     EXPECT_TRUE(SetSend(true));
     EXPECT_TRUE(channel_->SetRender(true));
     EXPECT_EQ(0, renderer_.num_rendered_frames());
@@ -783,7 +788,6 @@
     codec.width /= 2;
     codec.height /= 2;
     EXPECT_TRUE(SetOneCodec(codec));
-    EXPECT_TRUE(SetSendStreamFormat(kSsrc, codec));
     EXPECT_TRUE(WaitAndSendFrame(30));
     EXPECT_FRAME_WAIT(3, codec.width, codec.height, kTimeout);
     EXPECT_EQ(2, renderer_.num_set_sizes());
@@ -800,6 +804,7 @@
     EXPECT_EQ(NumRtpPackets(), info.senders[0].packets_sent);
     EXPECT_EQ(0.0, info.senders[0].fraction_lost);
     EXPECT_EQ(0, info.senders[0].firs_rcvd);
+    EXPECT_EQ(0, info.senders[0].plis_rcvd);
     EXPECT_EQ(0, info.senders[0].nacks_rcvd);
     EXPECT_EQ(DefaultCodec().width, info.senders[0].send_frame_width);
     EXPECT_EQ(DefaultCodec().height, info.senders[0].send_frame_height);
@@ -816,6 +821,7 @@
     EXPECT_EQ(0, info.receivers[0].packets_lost);
     EXPECT_EQ(0, info.receivers[0].packets_concealed);
     EXPECT_EQ(0, info.receivers[0].firs_sent);
+    EXPECT_EQ(0, info.receivers[0].plis_sent);
     EXPECT_EQ(0, info.receivers[0].nacks_sent);
     EXPECT_EQ(DefaultCodec().width, info.receivers[0].frame_width);
     EXPECT_EQ(DefaultCodec().height, info.receivers[0].frame_height);
@@ -858,6 +864,7 @@
     EXPECT_EQ(NumRtpPackets(), info.senders[0].packets_sent);
     EXPECT_EQ(0.0, info.senders[0].fraction_lost);
     EXPECT_EQ(0, info.senders[0].firs_rcvd);
+    EXPECT_EQ(0, info.senders[0].plis_rcvd);
     EXPECT_EQ(0, info.senders[0].nacks_rcvd);
     EXPECT_EQ(DefaultCodec().width, info.senders[0].send_frame_width);
     EXPECT_EQ(DefaultCodec().height, info.senders[0].send_frame_height);
@@ -874,6 +881,7 @@
       EXPECT_EQ(0, info.receivers[i].packets_lost);
       EXPECT_EQ(0, info.receivers[i].packets_concealed);
       EXPECT_EQ(0, info.receivers[i].firs_sent);
+      EXPECT_EQ(0, info.receivers[i].plis_sent);
       EXPECT_EQ(0, info.receivers[i].nacks_sent);
       EXPECT_EQ(DefaultCodec().width, info.receivers[i].frame_width);
       EXPECT_EQ(DefaultCodec().height, info.receivers[i].frame_height);
@@ -893,7 +901,6 @@
     EXPECT_TRUE(channel_->AddRecvStream(
         cricket::StreamParams::CreateLegacy(1234)));
     channel_->UpdateAspectRatio(640, 400);
-    EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
     EXPECT_TRUE(SetSend(true));
     EXPECT_TRUE(channel_->SetRender(true));
     EXPECT_TRUE(SendFrame());
@@ -914,7 +921,6 @@
     EXPECT_TRUE(channel_->AddSendStream(
         cricket::StreamParams::CreateLegacy(5678)));
     EXPECT_TRUE(channel_->SetCapturer(5678, capturer.get()));
-    EXPECT_TRUE(channel_->SetSendStreamFormat(5678, format));
     EXPECT_TRUE(channel_->AddRecvStream(
         cricket::StreamParams::CreateLegacy(5678)));
     EXPECT_TRUE(channel_->SetRenderer(5678, &renderer1));
@@ -997,7 +1003,6 @@
     talk_base::SetBE32(packet1.data() + 8, kSsrc);
     channel_->SetRenderer(0, NULL);
     EXPECT_TRUE(SetDefaultCodec());
-    EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
     EXPECT_TRUE(SetSend(true));
     EXPECT_TRUE(channel_->SetRender(true));
     EXPECT_EQ(0, renderer_.num_rendered_frames());
@@ -1021,7 +1026,6 @@
   // Tests setting up and configuring a send stream.
   void AddRemoveSendStreams() {
     EXPECT_TRUE(SetOneCodec(DefaultCodec()));
-    EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
     EXPECT_TRUE(SetSend(true));
     EXPECT_TRUE(channel_->SetRender(true));
     EXPECT_TRUE(SendFrame());
@@ -1168,7 +1172,6 @@
   void AddRemoveRecvStreamAndRender() {
     cricket::FakeVideoRenderer renderer1;
     EXPECT_TRUE(SetDefaultCodec());
-    EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
     EXPECT_TRUE(SetSend(true));
     EXPECT_TRUE(channel_->SetRender(true));
     EXPECT_TRUE(channel_->AddRecvStream(
@@ -1213,7 +1216,6 @@
     cricket::VideoOptions vmo;
     vmo.conference_mode.Set(true);
     EXPECT_TRUE(channel_->SetOptions(vmo));
-    EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
     EXPECT_TRUE(SetSend(true));
     EXPECT_TRUE(channel_->SetRender(true));
     EXPECT_TRUE(channel_->AddRecvStream(
@@ -1251,7 +1253,6 @@
     codec.height = 240;
     const int time_between_send = TimeBetweenSend(codec);
     EXPECT_TRUE(SetOneCodec(codec));
-    EXPECT_TRUE(SetSendStreamFormat(kSsrc, codec));
     EXPECT_TRUE(SetSend(true));
     EXPECT_TRUE(channel_->SetRender(true));
     EXPECT_EQ(0, renderer_.num_rendered_frames());
@@ -1273,7 +1274,6 @@
     int captured_frames = 1;
     for (int iterations = 0; iterations < 2; ++iterations) {
       EXPECT_TRUE(channel_->SetCapturer(kSsrc, capturer.get()));
-      EXPECT_TRUE(SetSendStreamFormat(kSsrc, codec));
       talk_base::Thread::Current()->ProcessMessages(time_between_send);
       EXPECT_TRUE(capturer->CaptureCustomFrame(format.width, format.height,
                                                cricket::FOURCC_I420));
@@ -1313,7 +1313,6 @@
   // added, the plugin shouldn't crash (and no black frame should be sent).
   void RemoveCapturerWithoutAdd() {
     EXPECT_TRUE(SetOneCodec(DefaultCodec()));
-    EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
     EXPECT_TRUE(SetSend(true));
     EXPECT_TRUE(channel_->SetRender(true));
     EXPECT_EQ(0, renderer_.num_rendered_frames());
@@ -1375,8 +1374,6 @@
     // TODO(hellner): this seems like an unnecessary constraint, fix it.
     EXPECT_TRUE(channel_->SetCapturer(1, capturer1.get()));
     EXPECT_TRUE(channel_->SetCapturer(2, capturer2.get()));
-    EXPECT_TRUE(SetSendStreamFormat(1, DefaultCodec()));
-    EXPECT_TRUE(SetSendStreamFormat(2, DefaultCodec()));
     EXPECT_TRUE(SetSend(true));
     EXPECT_TRUE(channel_->SetRender(true));
     // Test capturer associated with engine.
@@ -1409,7 +1406,6 @@
 
     cricket::VideoCodec codec(DefaultCodec());
     EXPECT_TRUE(SetOneCodec(codec));
-    EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
     EXPECT_TRUE(SetSend(true));
 
     cricket::FakeVideoRenderer renderer;
@@ -1435,7 +1431,6 @@
     // Capture frame to not get same frame timestamps as previous capturer.
     capturer->CaptureFrame();
     EXPECT_TRUE(channel_->SetCapturer(kSsrc, capturer.get()));
-    EXPECT_TRUE(channel_->SetSendStreamFormat(kSsrc, capture_format));
     EXPECT_TRUE(talk_base::Thread::Current()->ProcessMessages(30));
     EXPECT_TRUE(capturer->CaptureCustomFrame(kWidth, kHeight,
                                              cricket::FOURCC_ARGB));
@@ -1455,7 +1450,6 @@
     codec.height /= 2;
     // Adapt the resolution.
     EXPECT_TRUE(SetOneCodec(codec));
-    EXPECT_TRUE(SetSendStreamFormat(kSsrc, codec));
     EXPECT_TRUE(WaitAndSendFrame(30));
     EXPECT_FRAME_WAIT(2, codec.width, codec.height, kTimeout);
   }
@@ -1469,7 +1463,6 @@
     codec.height /= 2;
     // Adapt the resolution.
     EXPECT_TRUE(SetOneCodec(codec));
-    EXPECT_TRUE(SetSendStreamFormat(kSsrc, codec));
     EXPECT_TRUE(WaitAndSendFrame(30));
     EXPECT_FRAME_WAIT(2, codec.width, codec.height, kTimeout);
   }
@@ -1604,7 +1597,6 @@
             cricket::VideoFormat::FpsToInterval(30),
             cricket::FOURCC_I420));
     EXPECT_TRUE(channel_->SetCapturer(kSsrc, &video_capturer));
-    EXPECT_TRUE(SetSendStreamFormat(kSsrc, DefaultCodec()));
     EXPECT_TRUE(SetSend(true));
     EXPECT_TRUE(channel_->SetRender(true));
     EXPECT_EQ(frame_count, renderer_.num_rendered_frames());
@@ -1704,6 +1696,121 @@
     EXPECT_TRUE(channel_->RemoveSendStream(0));
   }
 
+  // Tests that we can send and receive frames with early receive.
+  void TwoStreamsSendAndUnsignalledRecv(const cricket::VideoCodec& codec) {
+    cricket::VideoOptions vmo;
+    vmo.conference_mode.Set(true);
+    vmo.unsignalled_recv_stream_limit.Set(1);
+    EXPECT_TRUE(channel_->SetOptions(vmo));
+    SetUpSecondStreamWithNoRecv();
+    // Test sending and receiving on first stream.
+    EXPECT_TRUE(channel_->SetRender(true));
+    Send(codec);
+    EXPECT_EQ_WAIT(2, NumRtpPackets(), kTimeout);
+    EXPECT_EQ_WAIT(1, renderer_.num_rendered_frames(), kTimeout);
+    // The first send is not expected to yield frames, because the ssrc
+    // is not signalled yet. With unsignalled recv enabled, we will drop frames
+    // instead of packets.
+    EXPECT_EQ(0, renderer2_.num_rendered_frames());
+    // Give a chance for the decoder to process before adding the receiver.
+    talk_base::Thread::Current()->ProcessMessages(10);
+    // Test sending and receiving on second stream.
+    EXPECT_TRUE(channel_->AddRecvStream(
+        cricket::StreamParams::CreateLegacy(kSsrc + 2)));
+    EXPECT_TRUE(channel_->SetRenderer(kSsrc + 2, &renderer2_));
+    SendFrame();
+    EXPECT_EQ_WAIT(2, renderer_.num_rendered_frames(), kTimeout);
+    EXPECT_EQ(4, NumRtpPackets());
+    // The second send is expected to yield frame as the ssrc is signalled now.
+    // Decode should succeed here, though we received the key frame earlier.
+    // Without early recv, we would have dropped it and decoding would have
+    // failed.
+    EXPECT_EQ_WAIT(1, renderer2_.num_rendered_frames(), kTimeout);
+  }
+
+  // Tests that we cannot receive key frames with unsignalled recv disabled.
+  void TwoStreamsSendAndFailUnsignalledRecv(const cricket::VideoCodec& codec) {
+    cricket::VideoOptions vmo;
+    vmo.unsignalled_recv_stream_limit.Set(0);
+    EXPECT_TRUE(channel_->SetOptions(vmo));
+    SetUpSecondStreamWithNoRecv();
+    // Test sending and receiving on first stream.
+    EXPECT_TRUE(channel_->SetRender(true));
+    Send(codec);
+    EXPECT_EQ_WAIT(2, NumRtpPackets(), kTimeout);
+    EXPECT_EQ_WAIT(1, renderer_.num_rendered_frames(), kTimeout);
+    EXPECT_EQ_WAIT(0, renderer2_.num_rendered_frames(), kTimeout);
+    // Give a chance for the decoder to process before adding the receiver.
+    talk_base::Thread::Current()->ProcessMessages(10);
+    // Test sending and receiving on second stream.
+    EXPECT_TRUE(channel_->AddRecvStream(
+        cricket::StreamParams::CreateLegacy(kSsrc + 2)));
+    EXPECT_TRUE(channel_->SetRenderer(kSsrc + 2, &renderer2_));
+    SendFrame();
+    EXPECT_TRUE_WAIT(renderer_.num_rendered_frames() >= 1, kTimeout);
+    EXPECT_EQ_WAIT(4, NumRtpPackets(), kTimeout);
+    // We dont expect any frames here, because the key frame would have been
+    // lost in the earlier packet. This is the case we want to solve with early
+    // receive.
+    EXPECT_EQ(0, renderer2_.num_rendered_frames());
+  }
+
+  // Tests that we drop key frames when conference mode is disabled and we
+  // receive rtp packets on unsignalled streams.
+  void TwoStreamsSendAndFailUnsignalledRecvInOneToOne(
+      const cricket::VideoCodec& codec) {
+    cricket::VideoOptions vmo;
+    vmo.conference_mode.Set(false);
+    vmo.unsignalled_recv_stream_limit.Set(1);
+    EXPECT_TRUE(channel_->SetOptions(vmo));
+    SetUpSecondStreamWithNoRecv();
+    // Test sending and receiving on first stream.
+    EXPECT_TRUE(channel_->SetRender(true));
+    Send(codec);
+    EXPECT_EQ_WAIT(2, NumRtpPackets(), kTimeout);
+    EXPECT_EQ_WAIT(1, renderer_.num_rendered_frames(), kTimeout);
+    EXPECT_EQ_WAIT(0, renderer2_.num_rendered_frames(), kTimeout);
+    // Give a chance for the decoder to process before adding the receiver.
+    talk_base::Thread::Current()->ProcessMessages(10);
+    // Test sending and receiving on second stream.
+    EXPECT_TRUE(channel_->AddRecvStream(
+        cricket::StreamParams::CreateLegacy(kSsrc + 2)));
+    EXPECT_TRUE(channel_->SetRenderer(kSsrc + 2, &renderer2_));
+    SendFrame();
+    EXPECT_TRUE_WAIT(renderer_.num_rendered_frames() >= 1, kTimeout);
+    EXPECT_EQ_WAIT(4, NumRtpPackets(), kTimeout);
+    // We dont expect any frames here, because the key frame would have been
+    // lost in the earlier packet. This is the case we want to solve with early
+    // receive.
+    EXPECT_EQ(0, renderer2_.num_rendered_frames());
+  }
+
+  // Tests that we drop key frames when conference mode is enabled and we
+  // receive rtp packets on unsignalled streams. Removal of a unsignalled recv
+  // stream is successful.
+  void TwoStreamsAddAndRemoveUnsignalledRecv(
+      const cricket::VideoCodec& codec) {
+    cricket::VideoOptions vmo;
+    vmo.conference_mode.Set(true);
+    vmo.unsignalled_recv_stream_limit.Set(1);
+    EXPECT_TRUE(channel_->SetOptions(vmo));
+    SetUpSecondStreamWithNoRecv();
+    // Sending and receiving on first stream.
+    EXPECT_TRUE(channel_->SetRender(true));
+    Send(codec);
+    EXPECT_EQ_WAIT(2, NumRtpPackets(), kTimeout);
+    EXPECT_EQ_WAIT(1, renderer_.num_rendered_frames(), kTimeout);
+    // The first send is not expected to yield frames, because the ssrc
+    // is no signalled yet. With unsignalled recv enabled, we will drop frames
+    // instead of packets.
+    EXPECT_EQ(0, renderer2_.num_rendered_frames());
+    // Give a chance for the decoder to process before adding the receiver.
+    talk_base::Thread::Current()->ProcessMessages(100);
+    // Ensure that we can remove the unsignalled recv stream that was created
+    // when the first video packet with unsignalled recv ssrc is received.
+    EXPECT_TRUE(channel_->RemoveRecvStream(kSsrc + 2));
+  }
+
   VideoEngineOverride<E> engine_;
   talk_base::scoped_ptr<cricket::FakeVideoCapturer> video_capturer_;
   talk_base::scoped_ptr<cricket::FakeVideoCapturer> video_capturer_2_;
diff --git a/talk/media/devices/filevideocapturer.cc b/talk/media/devices/filevideocapturer.cc
index f5c078d..e79783f 100644
--- a/talk/media/devices/filevideocapturer.cc
+++ b/talk/media/devices/filevideocapturer.cc
@@ -209,8 +209,14 @@
   std::vector<VideoFormat> supported;
   supported.push_back(format);
 
+  // TODO(thorcarpenter): Report the actual file video format as the supported
+  // format. Do not use kMinimumInterval as it conflicts with video adaptation.
   SetId(device.id);
   SetSupportedFormats(supported);
+
+  // TODO(wuwang): Design an E2E integration test for video adaptation,
+  // then remove the below call to disable the video adapter.
+  set_enable_video_adapter(false);
   return true;
 }
 
diff --git a/talk/media/webrtc/fakewebrtcvideoengine.h b/talk/media/webrtc/fakewebrtcvideoengine.h
index 315c69c..74d0607 100644
--- a/talk/media/webrtc/fakewebrtcvideoengine.h
+++ b/talk/media/webrtc/fakewebrtcvideoengine.h
@@ -1044,6 +1044,10 @@
     channels_[channel]->transmission_smoothing_ = enable;
     return 0;
   }
+#ifdef USE_WEBRTC_DEV_BRANCH
+  WEBRTC_STUB_CONST(GetRtcpPacketTypeCounters, (int,
+      webrtc::RtcpPacketTypeCounter*, webrtc::RtcpPacketTypeCounter*));
+#endif
   WEBRTC_STUB_CONST(GetReceivedRTCPStatistics, (const int, unsigned short&,
       unsigned int&, unsigned int&, unsigned int&, int&));
   WEBRTC_STUB_CONST(GetSentRTCPStatistics, (const int, unsigned short&,
diff --git a/talk/media/webrtc/fakewebrtcvoiceengine.h b/talk/media/webrtc/fakewebrtcvoiceengine.h
index 0eb880b..36dcb78 100644
--- a/talk/media/webrtc/fakewebrtcvoiceengine.h
+++ b/talk/media/webrtc/fakewebrtcvoiceengine.h
@@ -371,6 +371,13 @@
   }
   WEBRTC_FUNC(SetSendCodec, (int channel, const webrtc::CodecInst& codec)) {
     WEBRTC_CHECK_CHANNEL(channel);
+    // To match the behavior of the real implementation.
+    if (_stricmp(codec.plname, "telephone-event") == 0 ||
+        _stricmp(codec.plname, "audio/telephone-event") == 0 ||
+        _stricmp(codec.plname, "CN") == 0 ||
+        _stricmp(codec.plname, "red") == 0 ) {
+      return -1;
+    }
     channels_[channel]->send_codec = codec;
     return 0;
   }
diff --git a/talk/media/webrtc/webrtcvideoengine.cc b/talk/media/webrtc/webrtcvideoengine.cc
index 72432f1..025ad9d 100644
--- a/talk/media/webrtc/webrtcvideoengine.cc
+++ b/talk/media/webrtc/webrtcvideoengine.cc
@@ -317,8 +317,7 @@
          target_delay_ms_(0),
          jitter_buffer_ms_(0),
          min_playout_delay_ms_(0),
-         render_delay_ms_(0),
-         firs_requested_(0) {
+         render_delay_ms_(0) {
   }
 
   // virtual functions from VieDecoderObserver.
@@ -350,16 +349,11 @@
     render_delay_ms_ = render_delay_ms;
   }
 
-  virtual void RequestNewKeyFrame(const int videoChannel) {
-    talk_base::CritScope cs(&crit_);
-    ASSERT(video_channel_ == videoChannel);
-    ++firs_requested_;
-  }
+  virtual void RequestNewKeyFrame(const int videoChannel) {}
 
   // Populate |rinfo| based on previously-set data in |*this|.
   void ExportTo(VideoReceiverInfo* rinfo) {
     talk_base::CritScope cs(&crit_);
-    rinfo->firs_sent = firs_requested_;
     rinfo->framerate_rcvd = framerate_;
     rinfo->decode_ms = decode_ms_;
     rinfo->max_decode_ms = max_decode_ms_;
@@ -382,7 +376,6 @@
   int jitter_buffer_ms_;
   int min_playout_delay_ms_;
   int render_delay_ms_;
-  int firs_requested_;
 };
 
 class WebRtcEncoderObserver : public webrtc::ViEEncoderObserver {
@@ -672,7 +665,6 @@
     ASSERT(adapter && "Video adapter should not be null here.");
 
     UpdateAdapterCpuOptions();
-    adapter->OnOutputFormatRequest(video_format_);
 
     overuse_observer_.reset(new WebRtcOveruseObserver(adapter));
     // (Dis)connect the video adapter from the cpu monitor as appropriate.
@@ -1557,6 +1549,7 @@
       remb_enabled_(false),
       render_started_(false),
       first_receive_ssrc_(0),
+      num_unsignalled_recv_channels_(0),
       send_rtx_type_(-1),
       send_red_type_(-1),
       send_fec_type_(-1),
@@ -1936,27 +1929,33 @@
     return true;
   }
 
-  if (recv_channels_.find(sp.first_ssrc()) != recv_channels_.end() ||
-      first_receive_ssrc_ == sp.first_ssrc()) {
-    LOG(LS_ERROR) << "Stream already exists";
-    return false;
-  }
-
-  // TODO(perkj): Implement recv media from multiple media SSRCs per stream.
-  // NOTE: We have two SSRCs per stream when RTX is enabled.
-  if (!IsOneSsrcStream(sp)) {
-    LOG(LS_ERROR) << "WebRtcVideoMediaChannel supports one primary SSRC per"
-                  << " stream and one FID SSRC per primary SSRC.";
-    return false;
-  }
-
-  // Create a new channel for receiving video data.
-  // In order to get the bandwidth estimation work fine for
-  // receive only channels, we connect all receiving channels
-  // to our master send channel.
   int channel_id = -1;
-  if (!CreateChannel(sp.first_ssrc(), MD_RECV, &channel_id)) {
-    return false;
+  RecvChannelMap::iterator channel_iterator =
+      recv_channels_.find(sp.first_ssrc());
+  if (channel_iterator == recv_channels_.end() &&
+      first_receive_ssrc_ != sp.first_ssrc()) {
+    // TODO(perkj): Implement recv media from multiple media SSRCs per stream.
+    // NOTE: We have two SSRCs per stream when RTX is enabled.
+    if (!IsOneSsrcStream(sp)) {
+      LOG(LS_ERROR) << "WebRtcVideoMediaChannel supports one primary SSRC per"
+                    << " stream and one FID SSRC per primary SSRC.";
+      return false;
+    }
+
+    // Create a new channel for receiving video data.
+    // In order to get the bandwidth estimation work fine for
+    // receive only channels, we connect all receiving channels
+    // to our master send channel.
+    if (!CreateChannel(sp.first_ssrc(), MD_RECV, &channel_id)) {
+      return false;
+    }
+  } else {
+    // Already exists.
+    if (first_receive_ssrc_ == sp.first_ssrc()) {
+      return false;
+    }
+    // Early receive added channel.
+    channel_id = (*channel_iterator).second->channel_id();
   }
 
   // Set the corresponding RTX SSRC.
@@ -2327,12 +2326,18 @@
       sinfo.packets_cached = -1;
       sinfo.packets_lost = -1;
       sinfo.fraction_lost = -1;
-      sinfo.firs_rcvd = -1;
-      sinfo.nacks_rcvd = -1;
       sinfo.rtt_ms = -1;
       sinfo.input_frame_width = static_cast<int>(channel_stream_info->width());
       sinfo.input_frame_height =
           static_cast<int>(channel_stream_info->height());
+
+      VideoCapturer* video_capturer = send_channel->video_capturer();
+      if (video_capturer) {
+        video_capturer->GetStats(&sinfo.adapt_frame_drops,
+                                 &sinfo.effects_frame_drops,
+                                 &sinfo.capturer_frame_time);
+      }
+
       webrtc::VideoCodec vie_codec;
       if (engine()->vie()->codec()->GetSendCodec(channel_id, vie_codec) == 0) {
         sinfo.send_frame_width = vie_codec.width;
@@ -2368,6 +2373,26 @@
         sinfo.capture_queue_delay_ms_per_s = capture_queue_delay_ms_per_s;
       }
 
+#ifdef USE_WEBRTC_DEV_BRANCH
+      webrtc::RtcpPacketTypeCounter rtcp_sent;
+      webrtc::RtcpPacketTypeCounter rtcp_received;
+      if (engine()->vie()->rtp()->GetRtcpPacketTypeCounters(
+          channel_id, &rtcp_sent, &rtcp_received) == 0) {
+        sinfo.firs_rcvd = rtcp_received.fir_packets;
+        sinfo.plis_rcvd = rtcp_received.pli_packets;
+        sinfo.nacks_rcvd = rtcp_received.nack_packets;
+      } else {
+        sinfo.firs_rcvd = -1;
+        sinfo.plis_rcvd = -1;
+        sinfo.nacks_rcvd = -1;
+        LOG_RTCERR1(GetRtcpPacketTypeCounters, channel_id);
+      }
+#else
+      sinfo.firs_rcvd = -1;
+      sinfo.plis_rcvd = -1;
+      sinfo.nacks_rcvd = -1;
+#endif
+
       // Get received RTCP statistics for the sender (reported by the remote
       // client in a RTCP packet), if available.
       // It's not a fatal error if we can't, since RTCP may not have arrived
@@ -2425,10 +2450,6 @@
   unsigned int estimated_recv_bandwidth = 0;
   for (RecvChannelMap::const_iterator it = recv_channels_.begin();
        it != recv_channels_.end(); ++it) {
-    // Don't report receive statistics from the default channel if we have
-    // specified receive channels.
-    if (it->first == 0 && recv_channels_.size() > 1)
-      continue;
     WebRtcVideoChannelRecvInfo* channel = it->second;
 
     unsigned int ssrc;
@@ -2453,7 +2474,6 @@
     rinfo.packets_lost = -1;
     rinfo.packets_concealed = -1;
     rinfo.fraction_lost = -1;  // from SentRTCP
-    rinfo.nacks_sent = -1;
     rinfo.frame_width = channel->render_adapter()->width();
     rinfo.frame_height = channel->render_adapter()->height();
     int fps = channel->render_adapter()->framerate();
@@ -2461,6 +2481,26 @@
     rinfo.framerate_output = fps;
     channel->decoder_observer()->ExportTo(&rinfo);
 
+#ifdef USE_WEBRTC_DEV_BRANCH
+    webrtc::RtcpPacketTypeCounter rtcp_sent;
+    webrtc::RtcpPacketTypeCounter rtcp_received;
+    if (engine()->vie()->rtp()->GetRtcpPacketTypeCounters(
+        channel->channel_id(), &rtcp_sent, &rtcp_received) == 0) {
+      rinfo.firs_sent = rtcp_sent.fir_packets;
+      rinfo.plis_sent = rtcp_sent.pli_packets;
+      rinfo.nacks_sent = rtcp_sent.nack_packets;
+    } else {
+      rinfo.firs_sent = -1;
+      rinfo.plis_sent = -1;
+      rinfo.nacks_sent = -1;
+      LOG_RTCERR1(GetRtcpPacketTypeCounters, channel->channel_id());
+    }
+#else
+    rinfo.firs_sent = -1;
+    rinfo.plis_sent = -1;
+    rinfo.nacks_sent = -1;
+#endif
+
     // Get our locally created statistics of the received RTP stream.
     webrtc::RtcpStatistics incoming_stream_rtcp_stats;
     int incoming_stream_rtt_ms;
@@ -2558,13 +2598,18 @@
   uint32 ssrc = 0;
   if (!GetRtpSsrc(packet->data(), packet->length(), &ssrc))
     return;
-  int which_channel = GetRecvChannelNum(ssrc);
-  if (which_channel == -1) {
-    which_channel = video_channel();
+  int processing_channel = GetRecvChannelNum(ssrc);
+  if (processing_channel == -1) {
+    // Allocate an unsignalled recv channel for processing in conference mode.
+    if (!InConferenceMode() ||
+        !CreateUnsignalledRecvChannel(ssrc, &processing_channel)) {
+      // If we cant find or allocate one, use the default.
+      processing_channel = video_channel();
+    }
   }
 
   engine()->vie()->network()->ReceivedRTPPacket(
-      which_channel,
+      processing_channel,
       packet->data(),
       static_cast<int>(packet->length()),
       webrtc::PacketTime(packet_time.timestamp, packet_time.not_before));
@@ -3101,6 +3146,22 @@
   return true;
 }
 
+bool WebRtcVideoMediaChannel::CreateUnsignalledRecvChannel(
+    uint32 ssrc_key, int* out_channel_id) {
+  int unsignalled_recv_channel_limit =
+      options_.unsignalled_recv_stream_limit.GetWithDefaultIfUnset(
+          kNumDefaultUnsignalledVideoRecvStreams);
+  if (num_unsignalled_recv_channels_ >= unsignalled_recv_channel_limit) {
+    return false;
+  }
+  if (!CreateChannel(ssrc_key, MD_RECV, out_channel_id)) {
+    return false;
+  }
+  // TODO(tvsriram): Support dynamic sizing of unsignalled recv channels.
+  num_unsignalled_recv_channels_++;
+  return true;
+}
+
 bool WebRtcVideoMediaChannel::ConfigureChannel(int channel_id,
                                                MediaDirection direction,
                                                uint32 ssrc_key) {
diff --git a/talk/media/webrtc/webrtcvideoengine.h b/talk/media/webrtc/webrtcvideoengine.h
index 668d760..b75c20b 100644
--- a/talk/media/webrtc/webrtcvideoengine.h
+++ b/talk/media/webrtc/webrtcvideoengine.h
@@ -323,6 +323,7 @@
   // returning false.
   bool CreateChannel(uint32 ssrc_key, MediaDirection direction,
                      int* channel_id);
+  bool CreateUnsignalledRecvChannel(uint32 ssrc_key, int* channel_id);
   bool ConfigureChannel(int channel_id, MediaDirection direction,
                         uint32 ssrc_key);
   bool ConfigureReceiving(int channel_id, uint32 remote_ssrc_key);
@@ -431,6 +432,7 @@
   bool render_started_;
   uint32 first_receive_ssrc_;
   std::vector<RtpHeaderExtension> receive_extensions_;
+  int num_unsignalled_recv_channels_;
 
   // Global send side state.
   SendChannelMap send_channels_;
diff --git a/talk/media/webrtc/webrtcvideoengine_unittest.cc b/talk/media/webrtc/webrtcvideoengine_unittest.cc
index 73e3c77..9d655b3 100644
--- a/talk/media/webrtc/webrtcvideoengine_unittest.cc
+++ b/talk/media/webrtc/webrtcvideoengine_unittest.cc
@@ -1292,7 +1292,6 @@
         cricket::StreamParams::CreateLegacy(kSsrcs2[i])));
     // Register the capturer to the ssrc.
     EXPECT_TRUE(channel_->SetCapturer(kSsrcs2[i], &capturer));
-    EXPECT_TRUE(channel_->SetSendStreamFormat(kSsrcs2[i], capture_format_vga));
   }
 
   const int channel0 = vie_.GetChannelFromLocalSsrc(kSsrcs2[0]);
@@ -1937,10 +1936,6 @@
   EXPECT_TRUE(SetOneCodec(codec));
   codec.width /= 2;
   codec.height /= 2;
-  EXPECT_TRUE(channel_->SetSendStreamFormat(kSsrc, cricket::VideoFormat(
-      codec.width, codec.height,
-      cricket::VideoFormat::FpsToInterval(codec.framerate),
-      cricket::FOURCC_ANY)));
   EXPECT_TRUE(SetSend(true));
   EXPECT_TRUE(channel_->SetRender(true));
   EXPECT_EQ(0, renderer_.num_rendered_frames());
@@ -2099,3 +2094,26 @@
   Base::TwoStreamsReUseFirstStream(cricket::VideoCodec(100, "VP8", 640, 400, 30,
                                                        0));
 }
+
+TEST_F(WebRtcVideoMediaChannelTest, TwoStreamsSendAndUnsignalledRecv) {
+  Base::TwoStreamsSendAndUnsignalledRecv(cricket::VideoCodec(100, "VP8", 640,
+                                                             400, 30, 0));
+}
+
+TEST_F(WebRtcVideoMediaChannelTest, TwoStreamsSendAndFailUnsignalledRecv) {
+  Base::TwoStreamsSendAndFailUnsignalledRecv(
+      cricket::VideoCodec(100, "VP8", 640, 400, 30, 0));
+}
+
+TEST_F(WebRtcVideoMediaChannelTest,
+       TwoStreamsSendAndFailUnsignalledRecvInOneToOne) {
+  Base::TwoStreamsSendAndFailUnsignalledRecvInOneToOne(
+      cricket::VideoCodec(100, "VP8", 640, 400, 30, 0));
+}
+
+TEST_F(WebRtcVideoMediaChannelTest,
+       TwoStreamsAddAndRemoveUnsignalledRecv) {
+  Base::TwoStreamsAddAndRemoveUnsignalledRecv(cricket::VideoCodec(100, "VP8",
+                                                                  640, 400, 30,
+                                                                  0));
+}
diff --git a/talk/media/webrtc/webrtcvoiceengine.cc b/talk/media/webrtc/webrtcvoiceengine.cc
index 8db8c99..c3b090e 100644
--- a/talk/media/webrtc/webrtcvoiceengine.cc
+++ b/talk/media/webrtc/webrtcvoiceengine.cc
@@ -194,6 +194,18 @@
   return false;
 }
 
+static bool IsTelephoneEventCodec(const std::string& name) {
+  return _stricmp(name.c_str(), "telephone-event") == 0;
+}
+
+static bool IsCNCodec(const std::string& name) {
+  return _stricmp(name.c_str(), "CN") == 0;
+}
+
+static bool IsRedCodec(const std::string& name) {
+  return _stricmp(name.c_str(), "red") == 0;
+}
+
 static bool FindCodec(const std::vector<AudioCodec>& codecs,
                       const AudioCodec& codec,
                       AudioCodec* found_codec) {
@@ -1966,10 +1978,11 @@
 
   // Scan through the list to figure out the codec to use for sending, along
   // with the proper configuration for VAD and DTMF.
-  bool first = true;
+  bool found_send_codec = false;
   webrtc::CodecInst send_codec;
   memset(&send_codec, 0, sizeof(send_codec));
 
+  // Set send codec (the first non-telephone-event/CN codec)
   for (std::vector<AudioCodec>::const_iterator it = codecs.begin();
        it != codecs.end(); ++it) {
     // Ignore codecs we don't know about. The negotiation step should prevent
@@ -1980,6 +1993,11 @@
       continue;
     }
 
+    if (IsTelephoneEventCodec(it->name) || IsCNCodec(it->name)) {
+      // Skip telephone-event/CN codec, which will be handled later.
+      continue;
+    }
+
     // If OPUS, change what we send according to the "stereo" codec
     // parameter, and not the "channels" parameter.  We set
     // voe_codec.channels to 2 if "stereo=1" and 1 otherwise.  If
@@ -2015,21 +2033,73 @@
       }
     }
 
+    // We'll use the first codec in the list to actually send audio data.
+    // Be sure to use the payload type requested by the remote side.
+    // "red", for FEC audio, is a special case where the actual codec to be
+    // used is specified in params.
+    if (IsRedCodec(it->name)) {
+      // Parse out the RED parameters. If we fail, just ignore RED;
+      // we don't support all possible params/usage scenarios.
+      if (!GetRedSendCodec(*it, codecs, &send_codec)) {
+        continue;
+      }
+
+      // Enable redundant encoding of the specified codec. Treat any
+      // failure as a fatal internal error.
+      LOG(LS_INFO) << "Enabling FEC";
+      if (engine()->voe()->rtp()->SetFECStatus(channel, true, it->id) == -1) {
+        LOG_RTCERR3(SetFECStatus, channel, true, it->id);
+        return false;
+      }
+    } else {
+      send_codec = voe_codec;
+      nack_enabled_ = IsNackEnabled(*it);
+      SetNack(channel, nack_enabled_);
+    }
+    found_send_codec = true;
+    break;
+  }
+
+  if (!found_send_codec) {
+    LOG(LS_WARNING) << "Received empty list of codecs.";
+    return false;
+  }
+
+  // Set the codec immediately, since SetVADStatus() depends on whether
+  // the current codec is mono or stereo.
+  if (!SetSendCodec(channel, send_codec))
+    return false;
+
+  // Always update the |send_codec_| to the currently set send codec.
+  send_codec_.reset(new webrtc::CodecInst(send_codec));
+
+  if (send_bw_setting_) {
+    SetSendBandwidthInternal(send_bw_bps_);
+  }
+
+  // Loop through the codecs list again to config the telephone-event/CN codec.
+  for (std::vector<AudioCodec>::const_iterator it = codecs.begin();
+       it != codecs.end(); ++it) {
+    // Ignore codecs we don't know about. The negotiation step should prevent
+    // this, but double-check to be sure.
+    webrtc::CodecInst voe_codec;
+    if (!engine()->FindWebRtcCodec(*it, &voe_codec)) {
+      LOG(LS_WARNING) << "Unknown codec " << ToString(*it);
+      continue;
+    }
+
     // Find the DTMF telephone event "codec" and tell VoiceEngine channels
     // about it.
-    if (_stricmp(it->name.c_str(), "telephone-event") == 0 ||
-        _stricmp(it->name.c_str(), "audio/telephone-event") == 0) {
+    if (IsTelephoneEventCodec(it->name)) {
       if (engine()->voe()->dtmf()->SetSendTelephoneEventPayloadType(
               channel, it->id) == -1) {
         LOG_RTCERR2(SetSendTelephoneEventPayloadType, channel, it->id);
         return false;
       }
-    }
-
-    // Turn voice activity detection/comfort noise on if supported.
-    // Set the wideband CN payload type appropriately.
-    // (narrowband always uses the static payload type 13).
-    if (_stricmp(it->name.c_str(), "CN") == 0) {
+    } else if (IsCNCodec(it->name)) {
+      // Turn voice activity detection/comfort noise on if supported.
+      // Set the wideband CN payload type appropriately.
+      // (narrowband always uses the static payload type 13).
       webrtc::PayloadFrequencies cn_freq;
       switch (it->clockrate) {
         case 8000:
@@ -2062,7 +2132,6 @@
           // send the offer.
         }
       }
-
       // Only turn on VAD if we have a CN payload type that matches the
       // clockrate for the codec we are going to use.
       if (it->clockrate == send_codec.plfreq) {
@@ -2073,54 +2142,6 @@
         }
       }
     }
-
-    // We'll use the first codec in the list to actually send audio data.
-    // Be sure to use the payload type requested by the remote side.
-    // "red", for FEC audio, is a special case where the actual codec to be
-    // used is specified in params.
-    if (first) {
-      if (_stricmp(it->name.c_str(), "red") == 0) {
-        // Parse out the RED parameters. If we fail, just ignore RED;
-        // we don't support all possible params/usage scenarios.
-        if (!GetRedSendCodec(*it, codecs, &send_codec)) {
-          continue;
-        }
-
-        // Enable redundant encoding of the specified codec. Treat any
-        // failure as a fatal internal error.
-        LOG(LS_INFO) << "Enabling FEC";
-        if (engine()->voe()->rtp()->SetFECStatus(channel, true, it->id) == -1) {
-          LOG_RTCERR3(SetFECStatus, channel, true, it->id);
-          return false;
-        }
-      } else {
-        send_codec = voe_codec;
-        nack_enabled_ = IsNackEnabled(*it);
-        SetNack(channel, nack_enabled_);
-      }
-      first = false;
-      // Set the codec immediately, since SetVADStatus() depends on whether
-      // the current codec is mono or stereo.
-      if (!SetSendCodec(channel, send_codec))
-        return false;
-    }
-  }
-
-  // If we're being asked to set an empty list of codecs, due to a buggy client,
-  // choose the most common format: PCMU
-  if (first) {
-    LOG(LS_WARNING) << "Received empty list of codecs; using PCMU/8000";
-    AudioCodec codec(0, "PCMU", 8000, 0, 1, 0);
-    engine()->FindWebRtcCodec(codec, &send_codec);
-    if (!SetSendCodec(channel, send_codec))
-      return false;
-  }
-
-  // Always update the |send_codec_| to the currently set send codec.
-  send_codec_.reset(new webrtc::CodecInst(send_codec));
-
-  if (send_bw_setting_) {
-    SetSendBandwidthInternal(send_bw_bps_);
   }
 
   return true;
diff --git a/talk/media/webrtc/webrtcvoiceengine_unittest.cc b/talk/media/webrtc/webrtcvoiceengine_unittest.cc
index 946aa36..2abfd78 100644
--- a/talk/media/webrtc/webrtcvoiceengine_unittest.cc
+++ b/talk/media/webrtc/webrtcvoiceengine_unittest.cc
@@ -680,93 +680,67 @@
   EXPECT_EQ(106, voe_.GetSendTelephoneEventPayloadType(channel_num));
 }
 
-// TODO(pthatcher): Change failure behavior to returning false rather
-// than defaulting to PCMU.
-// Test that if clockrate is not 48000 for opus, we fail by fallback to PCMU.
+// Test that if clockrate is not 48000 for opus, we fail.
 TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusBadClockrate) {
   EXPECT_TRUE(SetupEngine());
-  int channel_num = voe_.GetLastChannel();
   std::vector<cricket::AudioCodec> codecs;
   codecs.push_back(kOpusCodec);
   codecs[0].bitrate = 0;
   codecs[0].clockrate = 50000;
-  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
-  webrtc::CodecInst gcodec;
-  EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
-  EXPECT_STREQ("PCMU", gcodec.plname);
+  EXPECT_FALSE(channel_->SetSendCodecs(codecs));
 }
 
-// Test that if channels=0 for opus, we fail by falling back to PCMU.
+// Test that if channels=0 for opus, we fail.
 TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusBad0ChannelsNoStereo) {
   EXPECT_TRUE(SetupEngine());
-  int channel_num = voe_.GetLastChannel();
   std::vector<cricket::AudioCodec> codecs;
   codecs.push_back(kOpusCodec);
   codecs[0].bitrate = 0;
   codecs[0].channels = 0;
-  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
-  webrtc::CodecInst gcodec;
-  EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
-  EXPECT_STREQ("PCMU", gcodec.plname);
+  EXPECT_FALSE(channel_->SetSendCodecs(codecs));
 }
 
-// Test that if channels=0 for opus, we fail by falling back to PCMU.
+// Test that if channels=0 for opus, we fail.
 TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusBad0Channels1Stereo) {
   EXPECT_TRUE(SetupEngine());
-  int channel_num = voe_.GetLastChannel();
   std::vector<cricket::AudioCodec> codecs;
   codecs.push_back(kOpusCodec);
   codecs[0].bitrate = 0;
   codecs[0].channels = 0;
   codecs[0].params["stereo"] = "1";
-  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
-  webrtc::CodecInst gcodec;
-  EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
-  EXPECT_STREQ("PCMU", gcodec.plname);
+  EXPECT_FALSE(channel_->SetSendCodecs(codecs));
 }
 
 // Test that if channel is 1 for opus and there's no stereo, we fail.
 TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpus1ChannelNoStereo) {
   EXPECT_TRUE(SetupEngine());
-  int channel_num = voe_.GetLastChannel();
   std::vector<cricket::AudioCodec> codecs;
   codecs.push_back(kOpusCodec);
   codecs[0].bitrate = 0;
   codecs[0].channels = 1;
-  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
-  webrtc::CodecInst gcodec;
-  EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
-  EXPECT_STREQ("PCMU", gcodec.plname);
+  EXPECT_FALSE(channel_->SetSendCodecs(codecs));
 }
 
 // Test that if channel is 1 for opus and stereo=0, we fail.
 TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusBad1Channel0Stereo) {
   EXPECT_TRUE(SetupEngine());
-  int channel_num = voe_.GetLastChannel();
   std::vector<cricket::AudioCodec> codecs;
   codecs.push_back(kOpusCodec);
   codecs[0].bitrate = 0;
   codecs[0].channels = 1;
   codecs[0].params["stereo"] = "0";
-  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
-  webrtc::CodecInst gcodec;
-  EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
-  EXPECT_STREQ("PCMU", gcodec.plname);
+  EXPECT_FALSE(channel_->SetSendCodecs(codecs));
 }
 
 // Test that if channel is 1 for opus and stereo=1, we fail.
 TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusBad1Channel1Stereo) {
   EXPECT_TRUE(SetupEngine());
-  int channel_num = voe_.GetLastChannel();
   std::vector<cricket::AudioCodec> codecs;
   codecs.push_back(kOpusCodec);
   codecs[0].bitrate = 0;
   codecs[0].channels = 1;
   codecs[0].params["stereo"] = "1";
-  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
-  webrtc::CodecInst gcodec;
-  EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
-  EXPECT_STREQ("PCMU", gcodec.plname);
+  EXPECT_FALSE(channel_->SetSendCodecs(codecs));
 }
 
 // Test that with bitrate=0 and no stereo,
@@ -1087,11 +1061,11 @@
   int channel_num = voe_.GetLastChannel();
   std::vector<cricket::AudioCodec> codecs;
   codecs.push_back(kCeltCodec);
-  codecs.push_back(kPcmuCodec);
+  codecs.push_back(kIsacCodec);
   codecs[0].id = 96;
   codecs[0].channels = 2;
   codecs[0].bitrate = 96000;
-  codecs[1].bitrate = 96000;
+  codecs[1].bitrate = 64000;
   EXPECT_TRUE(channel_->SetSendCodecs(codecs));
   webrtc::CodecInst gcodec;
   EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
@@ -1103,10 +1077,10 @@
   codecs[0].channels = 1;
   EXPECT_TRUE(channel_->SetSendCodecs(codecs));
   EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
-  EXPECT_EQ(0, gcodec.pltype);
+  EXPECT_EQ(103, gcodec.pltype);
   EXPECT_EQ(1, gcodec.channels);
   EXPECT_EQ(64000, gcodec.rate);
-  EXPECT_STREQ("PCMU", gcodec.plname);
+  EXPECT_STREQ("ISAC", gcodec.plname);
 }
 
 // Test that we can switch back and forth between CELT and ISAC with CN.
@@ -1186,21 +1160,49 @@
   EXPECT_EQ(32000, gcodec.rate);
 }
 
-// Test that we fall back to PCMU if no codecs are specified.
+// Test that we fail if no codecs are specified.
 TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsNoCodecs) {
   EXPECT_TRUE(SetupEngine());
+  std::vector<cricket::AudioCodec> codecs;
+  EXPECT_FALSE(channel_->SetSendCodecs(codecs));
+}
+
+// Test that we can set send codecs even with telephone-event codec as the first
+// one on the list.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsDTMFOnTop) {
+  EXPECT_TRUE(SetupEngine());
   int channel_num = voe_.GetLastChannel();
   std::vector<cricket::AudioCodec> codecs;
+  codecs.push_back(kTelephoneEventCodec);
+  codecs.push_back(kIsacCodec);
+  codecs.push_back(kPcmuCodec);
+  codecs[0].id = 98;  // DTMF
+  codecs[1].id = 96;
   EXPECT_TRUE(channel_->SetSendCodecs(codecs));
   webrtc::CodecInst gcodec;
   EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
-  EXPECT_EQ(0, gcodec.pltype);
-  EXPECT_STREQ("PCMU", gcodec.plname);
-  EXPECT_FALSE(voe_.GetVAD(channel_num));
-  EXPECT_FALSE(voe_.GetFEC(channel_num));
-  EXPECT_EQ(13, voe_.GetSendCNPayloadType(channel_num, false));
-  EXPECT_EQ(105, voe_.GetSendCNPayloadType(channel_num, true));
-  EXPECT_EQ(106, voe_.GetSendTelephoneEventPayloadType(channel_num));
+  EXPECT_EQ(96, gcodec.pltype);
+  EXPECT_STREQ("ISAC", gcodec.plname);
+  EXPECT_EQ(98, voe_.GetSendTelephoneEventPayloadType(channel_num));
+}
+
+// Test that we can set send codecs even with CN codec as the first
+// one on the list.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsCNOnTop) {
+  EXPECT_TRUE(SetupEngine());
+  int channel_num = voe_.GetLastChannel();
+  std::vector<cricket::AudioCodec> codecs;
+  codecs.push_back(kCn16000Codec);
+  codecs.push_back(kIsacCodec);
+  codecs.push_back(kPcmuCodec);
+  codecs[0].id = 98;  // wideband CN
+  codecs[1].id = 96;
+  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+  webrtc::CodecInst gcodec;
+  EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+  EXPECT_EQ(96, gcodec.pltype);
+  EXPECT_STREQ("ISAC", gcodec.plname);
+  EXPECT_EQ(98, voe_.GetSendCNPayloadType(channel_num, true));
 }
 
 // Test that we set VAD and DTMF types correctly as caller.
diff --git a/talk/p2p/base/session.cc b/talk/p2p/base/session.cc
index e0911e1..05ac207 100644
--- a/talk/p2p/base/session.cc
+++ b/talk/p2p/base/session.cc
@@ -760,9 +760,9 @@
   // Transport, since this removes the need to manually iterate over all
   // the transports, as is needed to make sure signals are handled properly
   // when BUNDLEing.
-#if 0
-  ASSERT(!IsCandidateAllocationDone());
-#endif
+  // TODO(juberti): Per b/7998978, devs and QA are hitting this assert in ways
+  // that make it prohibitively difficult to run dbg builds. Disabled for now.
+  //ASSERT(!IsCandidateAllocationDone());
   for (TransportMap::iterator iter = transports_.begin();
        iter != transports_.end(); ++iter) {
     if (iter->second->impl() == transport) {
diff --git a/talk/session/media/mediasession_unittest.cc b/talk/session/media/mediasession_unittest.cc
index 0e64566..f0ea690 100644
--- a/talk/session/media/mediasession_unittest.cc
+++ b/talk/session/media/mediasession_unittest.cc
@@ -163,6 +163,8 @@
   RtpHeaderExtension("urn:ietf:params:rtp-hdrext:toffset", 14),
 };
 
+static const uint32 kSimulcastParamsSsrc[] = {10, 11, 20, 21, 30, 31};
+static const uint32 kSimSsrc[] = {10, 20, 30};
 static const uint32 kFec1Ssrc[] = {10, 11};
 static const uint32 kFec2Ssrc[] = {20, 21};
 static const uint32 kFec3Ssrc[] = {30, 31};
@@ -192,6 +194,32 @@
     tdf2_.set_identity(&id2_);
   }
 
+  // Create a video StreamParamsVec object with:
+  // - one video stream with 3 simulcast streams and FEC,
+  StreamParamsVec CreateComplexVideoStreamParamsVec() {
+    SsrcGroup sim_group("SIM", MAKE_VECTOR(kSimSsrc));
+    SsrcGroup fec_group1("FEC", MAKE_VECTOR(kFec1Ssrc));
+    SsrcGroup fec_group2("FEC", MAKE_VECTOR(kFec2Ssrc));
+    SsrcGroup fec_group3("FEC", MAKE_VECTOR(kFec3Ssrc));
+
+    std::vector<SsrcGroup> ssrc_groups;
+    ssrc_groups.push_back(sim_group);
+    ssrc_groups.push_back(fec_group1);
+    ssrc_groups.push_back(fec_group2);
+    ssrc_groups.push_back(fec_group3);
+
+    StreamParams simulcast_params;
+    simulcast_params.id = kVideoTrack1;
+    simulcast_params.ssrcs = MAKE_VECTOR(kSimulcastParamsSsrc);
+    simulcast_params.ssrc_groups = ssrc_groups;
+    simulcast_params.cname = "Video_SIM_FEC";
+    simulcast_params.sync_label = kMediaStream1;
+
+    StreamParamsVec video_streams;
+    video_streams.push_back(simulcast_params);
+
+    return video_streams;
+  }
 
   bool CompareCryptoParams(const CryptoParamsVec& c1,
                            const CryptoParamsVec& c2) {
diff --git a/talk/session/media/planarfunctions_unittest.cc b/talk/session/media/planarfunctions_unittest.cc
new file mode 100644
index 0000000..32cacf9
--- /dev/null
+++ b/talk/session/media/planarfunctions_unittest.cc
@@ -0,0 +1,1010 @@
+// libjingle
+// Copyright 2014 Google Inc.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+//  1. Redistributions of source code must retain the above copyright notice,
+//     this list of conditions and the following disclaimer.
+//  2. Redistributions in binary form must reproduce the above copyright notice,
+//     this list of conditions and the following disclaimer in the documentation
+//     and/or other materials provided with the distribution.
+//  3. The name of the author may not be used to endorse or promote products
+//     derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <string>
+
+#include "libyuv/convert.h"
+#include "libyuv/convert_from.h"
+#include "libyuv/convert_from_argb.h"
+#include "libyuv/format_conversion.h"
+#include "libyuv/mjpeg_decoder.h"
+#include "libyuv/planar_functions.h"
+#include "talk/base/flags.h"
+#include "talk/base/gunit.h"
+#include "talk/base/scoped_ptr.h"
+#include "talk/media/base/testutils.h"
+#include "talk/media/base/videocommon.h"
+
+// Undefine macros for the windows build.
+#undef max
+#undef min
+
+using cricket::DumpPlanarYuvTestImage;
+
+DEFINE_bool(planarfunctions_dump, false,
+    "whether to write out scaled images for inspection");
+DEFINE_int(planarfunctions_repeat, 1,
+    "how many times to perform each scaling operation (for perf testing)");
+
+namespace cricket {
+
+// Number of testing colors in each color channel.
+static const int kTestingColorChannelResolution = 6;
+
+// The total number of testing colors
+// kTestingColorNum = kTestingColorChannelResolution^3;
+static const int kTestingColorNum = kTestingColorChannelResolution *
+    kTestingColorChannelResolution * kTestingColorChannelResolution;
+
+static const int kWidth = 1280;
+static const int kHeight = 720;
+static const int kAlignment = 16;
+
+class PlanarFunctionsTest : public testing::TestWithParam<int> {
+ protected:
+  PlanarFunctionsTest() : dump_(false), repeat_(1) {
+    InitializeColorBand();
+  }
+
+  virtual void SetUp() {
+    dump_ = FLAG_planarfunctions_dump;
+    repeat_ = FLAG_planarfunctions_repeat;
+  }
+
+  // Initialize the color band for testing.
+  void InitializeColorBand() {
+    testing_color_y_.reset(new uint8[kTestingColorNum]);
+    testing_color_u_.reset(new uint8[kTestingColorNum]);
+    testing_color_v_.reset(new uint8[kTestingColorNum]);
+    testing_color_r_.reset(new uint8[kTestingColorNum]);
+    testing_color_g_.reset(new uint8[kTestingColorNum]);
+    testing_color_b_.reset(new uint8[kTestingColorNum]);
+    int color_counter = 0;
+    for (int i = 0; i < kTestingColorChannelResolution; ++i) {
+      uint8 color_r = static_cast<uint8>(
+          i * 255 / (kTestingColorChannelResolution - 1));
+      for (int j = 0; j < kTestingColorChannelResolution; ++j) {
+        uint8 color_g = static_cast<uint8>(
+            j * 255 / (kTestingColorChannelResolution - 1));
+        for (int k = 0; k < kTestingColorChannelResolution; ++k) {
+          uint8 color_b = static_cast<uint8>(
+              k * 255 / (kTestingColorChannelResolution - 1));
+          testing_color_r_[color_counter] = color_r;
+          testing_color_g_[color_counter] = color_g;
+          testing_color_b_[color_counter] = color_b;
+           // Converting the testing RGB colors to YUV colors.
+          ConvertRgbPixel(color_r, color_g, color_b,
+                          &(testing_color_y_[color_counter]),
+                          &(testing_color_u_[color_counter]),
+                          &(testing_color_v_[color_counter]));
+          ++color_counter;
+        }
+      }
+    }
+  }
+  // Simple and slow RGB->YUV conversion. From NTSC standard, c/o Wikipedia.
+  // (from lmivideoframe_unittest.cc)
+  void ConvertRgbPixel(uint8 r, uint8 g, uint8 b,
+                       uint8* y, uint8* u, uint8* v) {
+    *y = ClampUint8(.257 * r + .504 * g + .098 * b + 16);
+    *u = ClampUint8(-.148 * r - .291 * g + .439 * b + 128);
+    *v = ClampUint8(.439 * r - .368 * g - .071 * b + 128);
+  }
+
+  uint8 ClampUint8(double value) {
+    value = std::max(0., std::min(255., value));
+    uint8 uint8_value = static_cast<uint8>(value);
+    return uint8_value;
+  }
+
+  // Generate a Red-Green-Blue inter-weaving chessboard-like
+  // YUV testing image (I420/I422/I444).
+  // The pattern looks like c0 c1 c2 c3 ...
+  //                        c1 c2 c3 c4 ...
+  //                        c2 c3 c4 c5 ...
+  //                        ...............
+  // The size of each chrome block is (block_size) x (block_size).
+  uint8* CreateFakeYuvTestingImage(int height, int width, int block_size,
+                                   libyuv::JpegSubsamplingType subsample_type,
+                                   uint8* &y_pointer,
+                                   uint8* &u_pointer,
+                                   uint8* &v_pointer) {
+    if (height <= 0 || width <= 0 || block_size <= 0) { return NULL; }
+    int y_size = height * width;
+    int u_size, v_size;
+    int vertical_sample_ratio = 1, horizontal_sample_ratio = 1;
+    switch (subsample_type) {
+      case libyuv::kJpegYuv420:
+        u_size = ((height + 1) >> 1) * ((width + 1) >> 1);
+        v_size = u_size;
+        vertical_sample_ratio = 2, horizontal_sample_ratio = 2;
+        break;
+      case libyuv::kJpegYuv422:
+        u_size = height * ((width + 1) >> 1);
+        v_size = u_size;
+        vertical_sample_ratio = 1, horizontal_sample_ratio = 2;
+        break;
+      case libyuv::kJpegYuv444:
+        v_size = u_size = y_size;
+        vertical_sample_ratio = 1, horizontal_sample_ratio = 1;
+        break;
+      case libyuv::kJpegUnknown:
+      default:
+        return NULL;
+        break;
+    }
+    uint8* image_pointer = new uint8[y_size + u_size + v_size + kAlignment];
+    y_pointer = ALIGNP(image_pointer, kAlignment);
+    u_pointer = ALIGNP(&image_pointer[y_size], kAlignment);
+    v_pointer = ALIGNP(&image_pointer[y_size + u_size], kAlignment);
+    uint8* current_y_pointer = y_pointer;
+    uint8* current_u_pointer = u_pointer;
+    uint8* current_v_pointer = v_pointer;
+    for (int j = 0; j < height; ++j) {
+      for (int i = 0; i < width; ++i) {
+        int color = ((i / block_size) + (j / block_size)) % kTestingColorNum;
+        *(current_y_pointer++) = testing_color_y_[color];
+        if (i % horizontal_sample_ratio == 0 &&
+            j % vertical_sample_ratio == 0) {
+          *(current_u_pointer++) = testing_color_u_[color];
+          *(current_v_pointer++) = testing_color_v_[color];
+        }
+      }
+    }
+    return image_pointer;
+  }
+
+  // Generate a Red-Green-Blue inter-weaving chessboard-like
+  // YUY2/UYVY testing image.
+  // The pattern looks like c0 c1 c2 c3 ...
+  //                        c1 c2 c3 c4 ...
+  //                        c2 c3 c4 c5 ...
+  //                        ...............
+  // The size of each chrome block is (block_size) x (block_size).
+  uint8* CreateFakeInterleaveYuvTestingImage(
+      int height, int width, int block_size,
+      uint8* &yuv_pointer, FourCC fourcc_type) {
+    if (height <= 0 || width <= 0 || block_size <= 0) { return NULL; }
+    if (fourcc_type != FOURCC_YUY2 && fourcc_type != FOURCC_UYVY) {
+      LOG(LS_ERROR) << "Format " << static_cast<int>(fourcc_type)
+                    << " is not supported.";
+      return NULL;
+    }
+    // Regularize the width of the output to be even.
+    int awidth = (width + 1) & ~1;
+
+    uint8* image_pointer = new uint8[2 * height * awidth + kAlignment];
+    yuv_pointer = ALIGNP(image_pointer, kAlignment);
+    uint8* current_yuv_pointer = yuv_pointer;
+    switch (fourcc_type) {
+      case FOURCC_YUY2: {
+        for (int j = 0; j < height; ++j) {
+          for (int i = 0; i < awidth; i += 2, current_yuv_pointer += 4) {
+            int color1 = ((i / block_size) + (j / block_size)) %
+                kTestingColorNum;
+            int color2 = (((i + 1) / block_size) + (j / block_size)) %
+                kTestingColorNum;
+            current_yuv_pointer[0] = testing_color_y_[color1];
+            if (i < width) {
+              current_yuv_pointer[1] = static_cast<uint8>(
+                  (static_cast<uint32>(testing_color_u_[color1]) +
+                  static_cast<uint32>(testing_color_u_[color2])) / 2);
+              current_yuv_pointer[2] = testing_color_y_[color2];
+              current_yuv_pointer[3] = static_cast<uint8>(
+                  (static_cast<uint32>(testing_color_v_[color1]) +
+                  static_cast<uint32>(testing_color_v_[color2])) / 2);
+            } else {
+              current_yuv_pointer[1] = testing_color_u_[color1];
+              current_yuv_pointer[2] = 0;
+              current_yuv_pointer[3] = testing_color_v_[color1];
+            }
+          }
+        }
+        break;
+      }
+      case FOURCC_UYVY: {
+        for (int j = 0; j < height; ++j) {
+          for (int i = 0; i < awidth; i += 2, current_yuv_pointer += 4) {
+            int color1 = ((i / block_size) + (j / block_size)) %
+                kTestingColorNum;
+            int color2 = (((i + 1) / block_size) + (j / block_size)) %
+                kTestingColorNum;
+            if (i < width) {
+              current_yuv_pointer[0] = static_cast<uint8>(
+                  (static_cast<uint32>(testing_color_u_[color1]) +
+                  static_cast<uint32>(testing_color_u_[color2])) / 2);
+              current_yuv_pointer[1] = testing_color_y_[color1];
+              current_yuv_pointer[2] = static_cast<uint8>(
+                  (static_cast<uint32>(testing_color_v_[color1]) +
+                  static_cast<uint32>(testing_color_v_[color2])) / 2);
+              current_yuv_pointer[3] = testing_color_y_[color2];
+            } else {
+              current_yuv_pointer[0] = testing_color_u_[color1];
+              current_yuv_pointer[1] = testing_color_y_[color1];
+              current_yuv_pointer[2] = testing_color_v_[color1];
+              current_yuv_pointer[3] = 0;
+            }
+          }
+        }
+        break;
+      }
+    }
+    return image_pointer;
+  }
+  // Generate a Red-Green-Blue inter-weaving chessboard-like
+  // Q420 testing image.
+  // The pattern looks like c0 c1 c2 c3 ...
+  //                        c1 c2 c3 c4 ...
+  //                        c2 c3 c4 c5 ...
+  //                        ...............
+  // The size of each chrome block is (block_size) x (block_size).
+  uint8* CreateFakeQ420TestingImage(int height, int width, int block_size,
+      uint8* &y_pointer, uint8* &yuy2_pointer) {
+    if (height <= 0 || width <= 0 || block_size <= 0) { return NULL; }
+    // Regularize the width of the output to be even.
+    int awidth = (width + 1) & ~1;
+
+    uint8* image_pointer = new uint8[(height / 2) * awidth * 2 +
+        ((height + 1) / 2) * width + kAlignment];
+    y_pointer = ALIGNP(image_pointer, kAlignment);
+    yuy2_pointer = y_pointer + ((height + 1) / 2) * width;
+    uint8* current_yuy2_pointer = yuy2_pointer;
+    uint8* current_y_pointer = y_pointer;
+    for (int j = 0; j < height; ++j) {
+      if (j % 2 == 0) {
+        for (int i = 0; i < width; ++i) {
+          int color = ((i / block_size) + (j / block_size)) %
+              kTestingColorNum;
+          *(current_y_pointer++) = testing_color_y_[color];
+        }
+      } else {
+        for (int i = 0; i < awidth; i += 2, current_yuy2_pointer += 4) {
+          int color1 = ((i / block_size) + (j / block_size)) %
+              kTestingColorNum;
+          int color2 = (((i + 1) / block_size) + (j / block_size)) %
+              kTestingColorNum;
+          current_yuy2_pointer[0] = testing_color_y_[color1];
+          if (i < width) {
+            current_yuy2_pointer[1] = static_cast<uint8>(
+                (static_cast<uint32>(testing_color_u_[color1]) +
+                static_cast<uint32>(testing_color_u_[color2])) / 2);
+            current_yuy2_pointer[2] = testing_color_y_[color2];
+            current_yuy2_pointer[3] = static_cast<uint8>(
+                (static_cast<uint32>(testing_color_v_[color1]) +
+                static_cast<uint32>(testing_color_v_[color2])) / 2);
+          } else {
+            current_yuy2_pointer[1] = testing_color_u_[color1];
+            current_yuy2_pointer[2] = 0;
+            current_yuy2_pointer[3] = testing_color_v_[color1];
+          }
+        }
+      }
+    }
+    return image_pointer;
+  }
+
+  // Generate a Red-Green-Blue inter-weaving chessboard-like
+  // NV12 testing image.
+  // (Note: No interpolation is used.)
+  // The pattern looks like c0 c1 c2 c3 ...
+  //                        c1 c2 c3 c4 ...
+  //                        c2 c3 c4 c5 ...
+  //                        ...............
+  // The size of each chrome block is (block_size) x (block_size).
+  uint8* CreateFakeNV12TestingImage(int height, int width, int block_size,
+      uint8* &y_pointer, uint8* &uv_pointer) {
+    if (height <= 0 || width <= 0 || block_size <= 0) { return NULL; }
+
+    uint8* image_pointer = new uint8[height * width +
+        ((height + 1) / 2) * ((width + 1) / 2) * 2 + kAlignment];
+    y_pointer = ALIGNP(image_pointer, kAlignment);
+    uv_pointer = y_pointer + height * width;
+    uint8* current_uv_pointer = uv_pointer;
+    uint8* current_y_pointer = y_pointer;
+    for (int j = 0; j < height; ++j) {
+      for (int i = 0; i < width; ++i) {
+        int color = ((i / block_size) + (j / block_size)) %
+            kTestingColorNum;
+        *(current_y_pointer++) = testing_color_y_[color];
+      }
+      if (j % 2 == 0) {
+        for (int i = 0; i < width; i += 2, current_uv_pointer += 2) {
+          int color = ((i / block_size) + (j / block_size)) %
+              kTestingColorNum;
+          current_uv_pointer[0] = testing_color_u_[color];
+          current_uv_pointer[1] = testing_color_v_[color];
+        }
+      }
+    }
+    return image_pointer;
+  }
+
+  // Generate a Red-Green-Blue inter-weaving chessboard-like
+  // M420 testing image.
+  // (Note: No interpolation is used.)
+  // The pattern looks like c0 c1 c2 c3 ...
+  //                        c1 c2 c3 c4 ...
+  //                        c2 c3 c4 c5 ...
+  //                        ...............
+  // The size of each chrome block is (block_size) x (block_size).
+  uint8* CreateFakeM420TestingImage(
+      int height, int width, int block_size, uint8* &m420_pointer) {
+    if (height <= 0 || width <= 0 || block_size <= 0) { return NULL; }
+
+    uint8* image_pointer = new uint8[height * width +
+        ((height + 1) / 2) * ((width + 1) / 2) * 2 + kAlignment];
+    m420_pointer = ALIGNP(image_pointer, kAlignment);
+    uint8* current_m420_pointer = m420_pointer;
+    for (int j = 0; j < height; ++j) {
+      for (int i = 0; i < width; ++i) {
+        int color = ((i / block_size) + (j / block_size)) %
+            kTestingColorNum;
+        *(current_m420_pointer++) = testing_color_y_[color];
+      }
+      if (j % 2 == 1) {
+        for (int i = 0; i < width; i += 2, current_m420_pointer += 2) {
+          int color = ((i / block_size) + ((j - 1) / block_size)) %
+              kTestingColorNum;
+          current_m420_pointer[0] = testing_color_u_[color];
+          current_m420_pointer[1] = testing_color_v_[color];
+        }
+      }
+    }
+    return image_pointer;
+  }
+
+  // Generate a Red-Green-Blue inter-weaving chessboard-like
+  // ARGB/ABGR/RAW/BG24 testing image.
+  // The pattern looks like c0 c1 c2 c3 ...
+  //                        c1 c2 c3 c4 ...
+  //                        c2 c3 c4 c5 ...
+  //                        ...............
+  // The size of each chrome block is (block_size) x (block_size).
+  uint8* CreateFakeArgbTestingImage(int height, int width, int block_size,
+                                    uint8* &argb_pointer, FourCC fourcc_type) {
+    if (height <= 0 || width <= 0 || block_size <= 0) { return NULL; }
+    uint8* image_pointer = NULL;
+    if (fourcc_type == FOURCC_ABGR || fourcc_type == FOURCC_BGRA ||
+        fourcc_type == FOURCC_ARGB) {
+      image_pointer = new uint8[height * width * 4 + kAlignment];
+    } else if (fourcc_type == FOURCC_RAW || fourcc_type == FOURCC_24BG) {
+      image_pointer = new uint8[height * width * 3 + kAlignment];
+    } else {
+      LOG(LS_ERROR) << "Format " << static_cast<int>(fourcc_type)
+                    << " is not supported.";
+      return NULL;
+    }
+    argb_pointer = ALIGNP(image_pointer, kAlignment);
+    uint8* current_pointer = argb_pointer;
+    switch (fourcc_type) {
+      case FOURCC_ARGB: {
+        for (int j = 0; j < height; ++j) {
+          for (int i = 0; i < width; ++i) {
+            int color = ((i / block_size) + (j / block_size)) %
+                kTestingColorNum;
+            *(current_pointer++) = testing_color_b_[color];
+            *(current_pointer++) = testing_color_g_[color];
+            *(current_pointer++) = testing_color_r_[color];
+            *(current_pointer++) = 255;
+          }
+        }
+        break;
+      }
+      case FOURCC_ABGR: {
+        for (int j = 0; j < height; ++j) {
+          for (int i = 0; i < width; ++i) {
+            int color = ((i / block_size) + (j / block_size)) %
+                kTestingColorNum;
+            *(current_pointer++) = testing_color_r_[color];
+            *(current_pointer++) = testing_color_g_[color];
+            *(current_pointer++) = testing_color_b_[color];
+            *(current_pointer++) = 255;
+          }
+        }
+        break;
+      }
+      case FOURCC_BGRA: {
+        for (int j = 0; j < height; ++j) {
+          for (int i = 0; i < width; ++i) {
+            int color = ((i / block_size) + (j / block_size)) %
+                kTestingColorNum;
+            *(current_pointer++) = 255;
+            *(current_pointer++) = testing_color_r_[color];
+            *(current_pointer++) = testing_color_g_[color];
+            *(current_pointer++) = testing_color_b_[color];
+           }
+        }
+        break;
+      }
+      case FOURCC_24BG: {
+        for (int j = 0; j < height; ++j) {
+          for (int i = 0; i < width; ++i) {
+            int color = ((i / block_size) + (j / block_size)) %
+                kTestingColorNum;
+            *(current_pointer++) = testing_color_b_[color];
+            *(current_pointer++) = testing_color_g_[color];
+            *(current_pointer++) = testing_color_r_[color];
+          }
+        }
+        break;
+      }
+      case FOURCC_RAW: {
+        for (int j = 0; j < height; ++j) {
+          for (int i = 0; i < width; ++i) {
+            int color = ((i / block_size) + (j / block_size)) %
+                kTestingColorNum;
+            *(current_pointer++) = testing_color_r_[color];
+            *(current_pointer++) = testing_color_g_[color];
+            *(current_pointer++) = testing_color_b_[color];
+          }
+        }
+        break;
+      }
+      default: {
+        LOG(LS_ERROR) << "Format " << static_cast<int>(fourcc_type)
+                      << " is not supported.";
+      }
+    }
+    return image_pointer;
+  }
+
+  // Check if two memory chunks are equal.
+  // (tolerate MSE errors within a threshold).
+  static bool IsMemoryEqual(const uint8* ibuf, const uint8* obuf,
+                            int osize, double average_error) {
+    double sse = cricket::ComputeSumSquareError(ibuf, obuf, osize);
+    double error = sse / osize;  // Mean Squared Error.
+    double PSNR = cricket::ComputePSNR(sse, osize);
+    LOG(LS_INFO) << "Image MSE: "  << error << " Image PSNR: " << PSNR
+                 << " First Diff Byte: " << FindDiff(ibuf, obuf, osize);
+    return (error < average_error);
+  }
+
+  // Returns the index of the first differing byte. Easier to debug than memcmp.
+  static int FindDiff(const uint8* buf1, const uint8* buf2, int len) {
+    int i = 0;
+    while (i < len && buf1[i] == buf2[i]) {
+      i++;
+    }
+    return (i < len) ? i : -1;
+  }
+
+  // Dump the result image (ARGB format).
+  void DumpArgbImage(const uint8* obuf, int width, int height) {
+    DumpPlanarArgbTestImage(GetTestName(), obuf, width, height);
+  }
+
+  // Dump the result image (YUV420 format).
+  void DumpYuvImage(const uint8* obuf, int width, int height) {
+    DumpPlanarYuvTestImage(GetTestName(), obuf, width, height);
+  }
+
+  std::string GetTestName() {
+    const testing::TestInfo* const test_info =
+        testing::UnitTest::GetInstance()->current_test_info();
+    std::string test_name(test_info->name());
+    return test_name;
+  }
+
+  bool dump_;
+  int repeat_;
+
+  // Y, U, V and R, G, B channels of testing colors.
+  talk_base::scoped_ptr<uint8[]> testing_color_y_;
+  talk_base::scoped_ptr<uint8[]> testing_color_u_;
+  talk_base::scoped_ptr<uint8[]> testing_color_v_;
+  talk_base::scoped_ptr<uint8[]> testing_color_r_;
+  talk_base::scoped_ptr<uint8[]> testing_color_g_;
+  talk_base::scoped_ptr<uint8[]> testing_color_b_;
+};
+
+TEST_F(PlanarFunctionsTest, I420Copy) {
+  uint8 *y_pointer = NULL, *u_pointer = NULL, *v_pointer = NULL;
+  int y_pitch = kWidth;
+  int u_pitch = (kWidth + 1) >> 1;
+  int v_pitch = (kWidth + 1) >> 1;
+  int y_size = kHeight * kWidth;
+  int uv_size = ((kHeight + 1) >> 1) * ((kWidth + 1) >> 1);
+  int block_size = 3;
+  // Generate a fake input image.
+  talk_base::scoped_ptr<uint8[]> yuv_input(
+      CreateFakeYuvTestingImage(kHeight, kWidth, block_size,
+                                libyuv::kJpegYuv420,
+                                y_pointer, u_pointer, v_pointer));
+  // Allocate space for the output image.
+  talk_base::scoped_ptr<uint8[]> yuv_output(
+      new uint8[I420_SIZE(kHeight, kWidth) + kAlignment]);
+  uint8 *y_output_pointer = ALIGNP(yuv_output.get(), kAlignment);
+  uint8 *u_output_pointer = y_output_pointer + y_size;
+  uint8 *v_output_pointer = u_output_pointer + uv_size;
+
+  for (int i = 0; i < repeat_; ++i) {
+  libyuv::I420Copy(y_pointer, y_pitch,
+                   u_pointer, u_pitch,
+                   v_pointer, v_pitch,
+                   y_output_pointer, y_pitch,
+                   u_output_pointer, u_pitch,
+                   v_output_pointer, v_pitch,
+                   kWidth, kHeight);
+  }
+
+  // Expect the copied frame to be exactly the same.
+  EXPECT_TRUE(IsMemoryEqual(y_output_pointer, y_pointer,
+      I420_SIZE(kHeight, kWidth), 1.e-6));
+
+  if (dump_) { DumpYuvImage(y_output_pointer, kWidth, kHeight); }
+}
+
+TEST_F(PlanarFunctionsTest, I422ToI420) {
+  uint8 *y_pointer = NULL, *u_pointer = NULL, *v_pointer = NULL;
+  int y_pitch = kWidth;
+  int u_pitch = (kWidth + 1) >> 1;
+  int v_pitch = (kWidth + 1) >> 1;
+  int y_size = kHeight * kWidth;
+  int uv_size = ((kHeight + 1) >> 1) * ((kWidth + 1) >> 1);
+  int block_size = 2;
+  // Generate a fake input image.
+  talk_base::scoped_ptr<uint8[]> yuv_input(
+      CreateFakeYuvTestingImage(kHeight, kWidth, block_size,
+                                libyuv::kJpegYuv422,
+                                y_pointer, u_pointer, v_pointer));
+  // Allocate space for the output image.
+  talk_base::scoped_ptr<uint8[]> yuv_output(
+      new uint8[I420_SIZE(kHeight, kWidth) + kAlignment]);
+  uint8 *y_output_pointer = ALIGNP(yuv_output.get(), kAlignment);
+  uint8 *u_output_pointer = y_output_pointer + y_size;
+  uint8 *v_output_pointer = u_output_pointer + uv_size;
+  // Generate the expected output.
+  uint8 *y_expected_pointer = NULL, *u_expected_pointer = NULL,
+        *v_expected_pointer = NULL;
+  talk_base::scoped_ptr<uint8[]> yuv_output_expected(
+      CreateFakeYuvTestingImage(kHeight, kWidth, block_size,
+          libyuv::kJpegYuv420,
+          y_expected_pointer, u_expected_pointer, v_expected_pointer));
+
+  for (int i = 0; i < repeat_; ++i) {
+  libyuv::I422ToI420(y_pointer, y_pitch,
+                     u_pointer, u_pitch,
+                     v_pointer, v_pitch,
+                     y_output_pointer, y_pitch,
+                     u_output_pointer, u_pitch,
+                     v_output_pointer, v_pitch,
+                     kWidth, kHeight);
+  }
+
+  // Compare the output frame with what is expected; expect exactly the same.
+  // Note: MSE should be set to a larger threshold if an odd block width
+  // is used, since the conversion will be lossy.
+  EXPECT_TRUE(IsMemoryEqual(y_output_pointer, y_expected_pointer,
+      I420_SIZE(kHeight, kWidth), 1.e-6));
+
+  if (dump_) { DumpYuvImage(y_output_pointer, kWidth, kHeight); }
+}
+
+TEST_P(PlanarFunctionsTest, Q420ToI420) {
+  // Get the unalignment offset
+  int unalignment = GetParam();
+  uint8 *y_pointer = NULL, *yuy2_pointer = NULL;
+  int y_pitch = kWidth;
+  int yuy2_pitch = 2 * ((kWidth + 1) & ~1);
+  int u_pitch = (kWidth + 1) >> 1;
+  int v_pitch = (kWidth + 1) >> 1;
+  int y_size = kHeight * kWidth;
+  int uv_size = ((kHeight + 1) >> 1) * ((kWidth + 1) >> 1);
+  int block_size = 2;
+  // Generate a fake input image.
+  talk_base::scoped_ptr<uint8[]> yuv_input(
+      CreateFakeQ420TestingImage(kHeight, kWidth, block_size,
+                                 y_pointer, yuy2_pointer));
+  // Allocate space for the output image.
+  talk_base::scoped_ptr<uint8[]> yuv_output(
+      new uint8[I420_SIZE(kHeight, kWidth) + kAlignment + unalignment]);
+  uint8 *y_output_pointer = ALIGNP(yuv_output.get(), kAlignment) +
+      unalignment;
+  uint8 *u_output_pointer = y_output_pointer + y_size;
+  uint8 *v_output_pointer = u_output_pointer + uv_size;
+  // Generate the expected output.
+  uint8 *y_expected_pointer = NULL, *u_expected_pointer = NULL,
+        *v_expected_pointer = NULL;
+  talk_base::scoped_ptr<uint8[]> yuv_output_expected(
+      CreateFakeYuvTestingImage(kHeight, kWidth, block_size,
+          libyuv::kJpegYuv420,
+          y_expected_pointer, u_expected_pointer, v_expected_pointer));
+
+  for (int i = 0; i < repeat_; ++i) {
+  libyuv::Q420ToI420(y_pointer, y_pitch,
+                     yuy2_pointer, yuy2_pitch,
+                     y_output_pointer, y_pitch,
+                     u_output_pointer, u_pitch,
+                     v_output_pointer, v_pitch,
+                     kWidth, kHeight);
+  }
+  // Compare the output frame with what is expected; expect exactly the same.
+  // Note: MSE should be set to a larger threshold if an odd block width
+  // is used, since the conversion will be lossy.
+  EXPECT_TRUE(IsMemoryEqual(y_output_pointer, y_expected_pointer,
+      I420_SIZE(kHeight, kWidth), 1.e-6));
+
+  if (dump_) { DumpYuvImage(y_output_pointer, kWidth, kHeight); }
+}
+
+TEST_P(PlanarFunctionsTest, M420ToI420) {
+  // Get the unalignment offset
+  int unalignment = GetParam();
+  uint8 *m420_pointer = NULL;
+  int y_pitch = kWidth;
+  int m420_pitch = kWidth;
+  int u_pitch = (kWidth + 1) >> 1;
+  int v_pitch = (kWidth + 1) >> 1;
+  int y_size = kHeight * kWidth;
+  int uv_size = ((kHeight + 1) >> 1) * ((kWidth + 1) >> 1);
+  int block_size = 2;
+  // Generate a fake input image.
+  talk_base::scoped_ptr<uint8[]> yuv_input(
+      CreateFakeM420TestingImage(kHeight, kWidth, block_size, m420_pointer));
+  // Allocate space for the output image.
+  talk_base::scoped_ptr<uint8[]> yuv_output(
+      new uint8[I420_SIZE(kHeight, kWidth) + kAlignment + unalignment]);
+  uint8 *y_output_pointer = ALIGNP(yuv_output.get(), kAlignment) + unalignment;
+  uint8 *u_output_pointer = y_output_pointer + y_size;
+  uint8 *v_output_pointer = u_output_pointer + uv_size;
+  // Generate the expected output.
+  uint8 *y_expected_pointer = NULL, *u_expected_pointer = NULL,
+        *v_expected_pointer = NULL;
+  talk_base::scoped_ptr<uint8[]> yuv_output_expected(
+      CreateFakeYuvTestingImage(kHeight, kWidth, block_size,
+          libyuv::kJpegYuv420,
+          y_expected_pointer, u_expected_pointer, v_expected_pointer));
+
+  for (int i = 0; i < repeat_; ++i) {
+  libyuv::M420ToI420(m420_pointer, m420_pitch,
+                     y_output_pointer, y_pitch,
+                     u_output_pointer, u_pitch,
+                     v_output_pointer, v_pitch,
+                     kWidth, kHeight);
+  }
+  // Compare the output frame with what is expected; expect exactly the same.
+  // Note: MSE should be set to a larger threshold if an odd block width
+  // is used, since the conversion will be lossy.
+  EXPECT_TRUE(IsMemoryEqual(y_output_pointer, y_expected_pointer,
+      I420_SIZE(kHeight, kWidth), 1.e-6));
+
+  if (dump_) { DumpYuvImage(y_output_pointer, kWidth, kHeight); }
+}
+
+TEST_P(PlanarFunctionsTest, NV12ToI420) {
+  // Get the unalignment offset
+  int unalignment = GetParam();
+  uint8 *y_pointer = NULL, *uv_pointer = NULL;
+  int y_pitch = kWidth;
+  int uv_pitch = 2 * ((kWidth + 1) >> 1);
+  int u_pitch = (kWidth + 1) >> 1;
+  int v_pitch = (kWidth + 1) >> 1;
+  int y_size = kHeight * kWidth;
+  int uv_size = ((kHeight + 1) >> 1) * ((kWidth + 1) >> 1);
+  int block_size = 2;
+  // Generate a fake input image.
+  talk_base::scoped_ptr<uint8[]> yuv_input(
+      CreateFakeNV12TestingImage(kHeight, kWidth, block_size,
+                                 y_pointer, uv_pointer));
+  // Allocate space for the output image.
+  talk_base::scoped_ptr<uint8[]> yuv_output(
+      new uint8[I420_SIZE(kHeight, kWidth) + kAlignment + unalignment]);
+  uint8 *y_output_pointer = ALIGNP(yuv_output.get(), kAlignment) + unalignment;
+  uint8 *u_output_pointer = y_output_pointer + y_size;
+  uint8 *v_output_pointer = u_output_pointer + uv_size;
+  // Generate the expected output.
+  uint8 *y_expected_pointer = NULL, *u_expected_pointer = NULL,
+        *v_expected_pointer = NULL;
+  talk_base::scoped_ptr<uint8[]> yuv_output_expected(
+      CreateFakeYuvTestingImage(kHeight, kWidth, block_size,
+          libyuv::kJpegYuv420,
+          y_expected_pointer, u_expected_pointer, v_expected_pointer));
+
+  for (int i = 0; i < repeat_; ++i) {
+  libyuv::NV12ToI420(y_pointer, y_pitch,
+                     uv_pointer, uv_pitch,
+                     y_output_pointer, y_pitch,
+                     u_output_pointer, u_pitch,
+                     v_output_pointer, v_pitch,
+                     kWidth, kHeight);
+  }
+  // Compare the output frame with what is expected; expect exactly the same.
+  // Note: MSE should be set to a larger threshold if an odd block width
+  // is used, since the conversion will be lossy.
+  EXPECT_TRUE(IsMemoryEqual(y_output_pointer, y_expected_pointer,
+      I420_SIZE(kHeight, kWidth), 1.e-6));
+
+  if (dump_) { DumpYuvImage(y_output_pointer, kWidth, kHeight); }
+}
+
+// A common macro for testing converting YUY2/UYVY to I420.
+#define TEST_YUVTOI420(SRC_NAME, MSE, BLOCK_SIZE) \
+TEST_P(PlanarFunctionsTest, SRC_NAME##ToI420) { \
+  /* Get the unalignment offset.*/ \
+  int unalignment = GetParam(); \
+  uint8 *yuv_pointer = NULL; \
+  int yuv_pitch = 2 * ((kWidth + 1) & ~1); \
+  int y_pitch = kWidth; \
+  int u_pitch = (kWidth + 1) >> 1; \
+  int v_pitch = (kWidth + 1) >> 1; \
+  int y_size = kHeight * kWidth; \
+  int uv_size = ((kHeight + 1) >> 1) * ((kWidth + 1) >> 1); \
+  int block_size = 2; \
+  /* Generate a fake input image.*/ \
+  talk_base::scoped_ptr<uint8[]> yuv_input( \
+      CreateFakeInterleaveYuvTestingImage(kHeight, kWidth, BLOCK_SIZE, \
+          yuv_pointer, FOURCC_##SRC_NAME)); \
+  /* Allocate space for the output image.*/ \
+  talk_base::scoped_ptr<uint8[]> yuv_output( \
+      new uint8[I420_SIZE(kHeight, kWidth) + kAlignment + unalignment]); \
+  uint8 *y_output_pointer = ALIGNP(yuv_output.get(), kAlignment) + \
+      unalignment; \
+  uint8 *u_output_pointer = y_output_pointer + y_size; \
+  uint8 *v_output_pointer = u_output_pointer + uv_size; \
+  /* Generate the expected output.*/ \
+  uint8 *y_expected_pointer = NULL, *u_expected_pointer = NULL, \
+        *v_expected_pointer = NULL; \
+  talk_base::scoped_ptr<uint8[]> yuv_output_expected( \
+      CreateFakeYuvTestingImage(kHeight, kWidth, block_size, \
+          libyuv::kJpegYuv420, \
+          y_expected_pointer, u_expected_pointer, v_expected_pointer)); \
+  for (int i = 0; i < repeat_; ++i) { \
+    libyuv::SRC_NAME##ToI420(yuv_pointer, yuv_pitch, \
+                             y_output_pointer, y_pitch, \
+                             u_output_pointer, u_pitch, \
+                             v_output_pointer, v_pitch, \
+                             kWidth, kHeight); \
+  } \
+  /* Compare the output frame with what is expected.*/ \
+  /* Note: MSE should be set to a larger threshold if an odd block width*/ \
+  /* is used, since the conversion will be lossy.*/ \
+  EXPECT_TRUE(IsMemoryEqual(y_output_pointer, y_expected_pointer, \
+      I420_SIZE(kHeight, kWidth), MSE)); \
+  if (dump_) { DumpYuvImage(y_output_pointer, kWidth, kHeight); } \
+} \
+
+// TEST_P(PlanarFunctionsTest, YUV2ToI420)
+TEST_YUVTOI420(YUY2, 1.e-6, 2);
+// TEST_P(PlanarFunctionsTest, UYVYToI420)
+TEST_YUVTOI420(UYVY, 1.e-6, 2);
+
+// A common macro for testing converting I420 to ARGB, BGRA and ABGR.
+#define TEST_YUVTORGB(SRC_NAME, DST_NAME, JPG_TYPE, MSE, BLOCK_SIZE) \
+TEST_F(PlanarFunctionsTest, SRC_NAME##To##DST_NAME) { \
+  uint8 *y_pointer = NULL, *u_pointer = NULL, *v_pointer = NULL; \
+  uint8 *argb_expected_pointer = NULL; \
+  int y_pitch = kWidth; \
+  int u_pitch = (kWidth + 1) >> 1; \
+  int v_pitch = (kWidth + 1) >> 1; \
+  /* Generate a fake input image.*/ \
+  talk_base::scoped_ptr<uint8[]> yuv_input( \
+      CreateFakeYuvTestingImage(kHeight, kWidth, BLOCK_SIZE, JPG_TYPE, \
+                                y_pointer, u_pointer, v_pointer)); \
+  /* Generate the expected output.*/ \
+  talk_base::scoped_ptr<uint8[]> argb_expected( \
+      CreateFakeArgbTestingImage(kHeight, kWidth, BLOCK_SIZE, \
+                                 argb_expected_pointer, FOURCC_##DST_NAME)); \
+  /* Allocate space for the output.*/ \
+  talk_base::scoped_ptr<uint8[]> argb_output( \
+    new uint8[kHeight * kWidth * 4 + kAlignment]); \
+  uint8 *argb_pointer = ALIGNP(argb_expected.get(), kAlignment); \
+  for (int i = 0; i < repeat_; ++i) { \
+    libyuv::SRC_NAME##To##DST_NAME(y_pointer, y_pitch, \
+                                   u_pointer, u_pitch, \
+                                   v_pointer, v_pitch, \
+                                   argb_pointer, \
+                                   kWidth * 4, \
+                                   kWidth, kHeight); \
+  } \
+  EXPECT_TRUE(IsMemoryEqual(argb_expected_pointer, argb_pointer, \
+                            kHeight * kWidth * 4, MSE)); \
+  if (dump_) { DumpArgbImage(argb_pointer, kWidth, kHeight); } \
+}
+
+// TEST_F(PlanarFunctionsTest, I420ToARGB)
+TEST_YUVTORGB(I420, ARGB, libyuv::kJpegYuv420, 3., 2);
+// TEST_F(PlanarFunctionsTest, I420ToABGR)
+TEST_YUVTORGB(I420, ABGR, libyuv::kJpegYuv420, 3., 2);
+// TEST_F(PlanarFunctionsTest, I420ToBGRA)
+TEST_YUVTORGB(I420, BGRA, libyuv::kJpegYuv420, 3., 2);
+// TEST_F(PlanarFunctionsTest, I422ToARGB)
+TEST_YUVTORGB(I422, ARGB, libyuv::kJpegYuv422, 3., 2);
+// TEST_F(PlanarFunctionsTest, I444ToARGB)
+TEST_YUVTORGB(I444, ARGB, libyuv::kJpegYuv444, 3., 3);
+// Note: an empirical MSE tolerance 3.0 is used here for the probable
+// error from float-to-uint8 type conversion.
+
+TEST_F(PlanarFunctionsTest, I400ToARGB_Reference) {
+  uint8 *y_pointer = NULL, *u_pointer = NULL, *v_pointer = NULL;
+  int y_pitch = kWidth;
+  int u_pitch = (kWidth + 1) >> 1;
+  int v_pitch = (kWidth + 1) >> 1;
+  int block_size = 3;
+  // Generate a fake input image.
+  talk_base::scoped_ptr<uint8[]> yuv_input(
+      CreateFakeYuvTestingImage(kHeight, kWidth, block_size,
+                                libyuv::kJpegYuv420,
+                                y_pointer, u_pointer, v_pointer));
+  // As the comparison standard, we convert a grayscale image (by setting both
+  // U and V channels to be 128) using an I420 converter.
+  int uv_size = ((kHeight + 1) >> 1) * ((kWidth + 1) >> 1);
+
+  talk_base::scoped_ptr<uint8[]> uv(new uint8[uv_size + kAlignment]);
+  u_pointer = v_pointer = ALIGNP(uv.get(), kAlignment);
+  memset(u_pointer, 128, uv_size);
+
+  // Allocate space for the output image and generate the expected output.
+  talk_base::scoped_ptr<uint8[]> argb_expected(
+      new uint8[kHeight * kWidth * 4 + kAlignment]);
+  talk_base::scoped_ptr<uint8[]> argb_output(
+      new uint8[kHeight * kWidth * 4 + kAlignment]);
+  uint8 *argb_expected_pointer = ALIGNP(argb_expected.get(), kAlignment);
+  uint8 *argb_pointer = ALIGNP(argb_output.get(), kAlignment);
+
+  libyuv::I420ToARGB(y_pointer, y_pitch,
+                     u_pointer, u_pitch,
+                     v_pointer, v_pitch,
+                     argb_expected_pointer, kWidth * 4,
+                     kWidth, kHeight);
+  for (int i = 0; i < repeat_; ++i) {
+    libyuv::I400ToARGB_Reference(y_pointer, y_pitch,
+                                 argb_pointer, kWidth * 4,
+                                 kWidth, kHeight);
+  }
+
+  // Note: I420ToARGB and I400ToARGB_Reference should produce identical results.
+  EXPECT_TRUE(IsMemoryEqual(argb_expected_pointer, argb_pointer,
+                            kHeight * kWidth * 4, 2.));
+  if (dump_) { DumpArgbImage(argb_pointer, kWidth, kHeight); }
+}
+
+TEST_P(PlanarFunctionsTest, I400ToARGB) {
+  // Get the unalignment offset
+  int unalignment = GetParam();
+  uint8 *y_pointer = NULL, *u_pointer = NULL, *v_pointer = NULL;
+  int y_pitch = kWidth;
+  int u_pitch = (kWidth + 1) >> 1;
+  int v_pitch = (kWidth + 1) >> 1;
+  int block_size = 3;
+  // Generate a fake input image.
+  talk_base::scoped_ptr<uint8[]> yuv_input(
+      CreateFakeYuvTestingImage(kHeight, kWidth, block_size,
+                                libyuv::kJpegYuv420,
+                                y_pointer, u_pointer, v_pointer));
+  // As the comparison standard, we convert a grayscale image (by setting both
+  // U and V channels to be 128) using an I420 converter.
+  int uv_size = ((kHeight + 1) >> 1) * ((kWidth + 1) >> 1);
+
+  // 1 byte extra if in the unaligned mode.
+  talk_base::scoped_ptr<uint8[]> uv(new uint8[uv_size * 2 + kAlignment]);
+  u_pointer = ALIGNP(uv.get(), kAlignment);
+  v_pointer = u_pointer + uv_size;
+  memset(u_pointer, 128, uv_size);
+  memset(v_pointer, 128, uv_size);
+
+  // Allocate space for the output image and generate the expected output.
+  talk_base::scoped_ptr<uint8[]> argb_expected(
+      new uint8[kHeight * kWidth * 4 + kAlignment]);
+  // 1 byte extra if in the misalinged mode.
+  talk_base::scoped_ptr<uint8[]> argb_output(
+      new uint8[kHeight * kWidth * 4 + kAlignment + unalignment]);
+  uint8 *argb_expected_pointer = ALIGNP(argb_expected.get(), kAlignment);
+  uint8 *argb_pointer = ALIGNP(argb_output.get(), kAlignment) + unalignment;
+
+  libyuv::I420ToARGB(y_pointer, y_pitch,
+                     u_pointer, u_pitch,
+                     v_pointer, v_pitch,
+                     argb_expected_pointer, kWidth * 4,
+                     kWidth, kHeight);
+  for (int i = 0; i < repeat_; ++i) {
+    libyuv::I400ToARGB(y_pointer, y_pitch,
+                       argb_pointer, kWidth * 4,
+                       kWidth, kHeight);
+  }
+
+  // Note: current I400ToARGB uses an approximate method,
+  // so the error tolerance is larger here.
+  EXPECT_TRUE(IsMemoryEqual(argb_expected_pointer, argb_pointer,
+                            kHeight * kWidth * 4, 64.0));
+  if (dump_) { DumpArgbImage(argb_pointer, kWidth, kHeight); }
+}
+
+TEST_P(PlanarFunctionsTest, ARGBToI400) {
+  // Get the unalignment offset
+  int unalignment = GetParam();
+  // Create a fake ARGB input image.
+  uint8 *y_pointer = NULL, *u_pointer = NULL, *v_pointer = NULL;
+  uint8 *argb_pointer = NULL;
+  int block_size = 3;
+  // Generate a fake input image.
+  talk_base::scoped_ptr<uint8[]> argb_input(
+      CreateFakeArgbTestingImage(kHeight, kWidth, block_size,
+                                 argb_pointer, FOURCC_ARGB));
+  // Generate the expected output. Only Y channel is used
+  talk_base::scoped_ptr<uint8[]> yuv_expected(
+      CreateFakeYuvTestingImage(kHeight, kWidth, block_size,
+                                libyuv::kJpegYuv420,
+                                y_pointer, u_pointer, v_pointer));
+  // Allocate space for the Y output.
+  talk_base::scoped_ptr<uint8[]> y_output(
+    new uint8[kHeight * kWidth + kAlignment + unalignment]);
+  uint8 *y_output_pointer = ALIGNP(y_output.get(), kAlignment) + unalignment;
+
+  for (int i = 0; i < repeat_; ++i) {
+    libyuv::ARGBToI400(argb_pointer, kWidth * 4, y_output_pointer, kWidth,
+                       kWidth, kHeight);
+  }
+  // Check if the output matches the input Y channel.
+  // Note: an empirical MSE tolerance 2.0 is used here for the probable
+  // error from float-to-uint8 type conversion.
+  EXPECT_TRUE(IsMemoryEqual(y_output_pointer, y_pointer,
+                            kHeight * kWidth, 2.));
+  if (dump_) { DumpArgbImage(argb_pointer, kWidth, kHeight); }
+}
+
+// A common macro for testing converting RAW, BG24, BGRA, and ABGR
+// to ARGB.
+#define TEST_ARGB(SRC_NAME, FC_ID, BPP, BLOCK_SIZE) \
+TEST_P(PlanarFunctionsTest, SRC_NAME##ToARGB) { \
+  int unalignment = GetParam();  /* Get the unalignment offset.*/ \
+  uint8 *argb_expected_pointer = NULL, *src_pointer = NULL; \
+  /* Generate a fake input image.*/ \
+  talk_base::scoped_ptr<uint8[]> src_input(  \
+      CreateFakeArgbTestingImage(kHeight, kWidth, BLOCK_SIZE, \
+                                 src_pointer, FOURCC_##FC_ID)); \
+  /* Generate the expected output.*/ \
+  talk_base::scoped_ptr<uint8[]> argb_expected( \
+      CreateFakeArgbTestingImage(kHeight, kWidth, BLOCK_SIZE, \
+                                 argb_expected_pointer, FOURCC_ARGB)); \
+  /* Allocate space for the output; 1 byte extra if in the unaligned mode.*/ \
+  talk_base::scoped_ptr<uint8[]> argb_output( \
+      new uint8[kHeight * kWidth * 4 + kAlignment + unalignment]); \
+  uint8 *argb_pointer = ALIGNP(argb_output.get(), kAlignment) + unalignment; \
+  for (int i = 0; i < repeat_; ++i) { \
+    libyuv:: SRC_NAME##ToARGB(src_pointer, kWidth * (BPP), argb_pointer, \
+                              kWidth * 4, kWidth, kHeight); \
+  } \
+  /* Compare the result; expect identical.*/ \
+  EXPECT_TRUE(IsMemoryEqual(argb_expected_pointer, argb_pointer, \
+                            kHeight * kWidth * 4, 1.e-6)); \
+  if (dump_) { DumpArgbImage(argb_pointer, kWidth, kHeight); } \
+}
+
+TEST_ARGB(RAW, RAW, 3, 3);    // TEST_P(PlanarFunctionsTest, RAWToARGB)
+TEST_ARGB(BG24, 24BG, 3, 3);  // TEST_P(PlanarFunctionsTest, BG24ToARGB)
+TEST_ARGB(ABGR, ABGR, 4, 3);  // TEST_P(PlanarFunctionsTest, ABGRToARGB)
+TEST_ARGB(BGRA, BGRA, 4, 3);  // TEST_P(PlanarFunctionsTest, BGRAToARGB)
+
+// Parameter Test: The parameter is the unalignment offset.
+// Aligned data for testing assembly versions.
+INSTANTIATE_TEST_CASE_P(PlanarFunctionsAligned, PlanarFunctionsTest,
+    ::testing::Values(0));
+
+// Purposely unalign the output argb pointer to test slow path (C version).
+INSTANTIATE_TEST_CASE_P(PlanarFunctionsMisaligned, PlanarFunctionsTest,
+    ::testing::Values(1));
+
+}  // namespace cricket
diff --git a/talk/session/media/yuvscaler_unittest.cc b/talk/session/media/yuvscaler_unittest.cc
new file mode 100644
index 0000000..93ac534
--- /dev/null
+++ b/talk/session/media/yuvscaler_unittest.cc
@@ -0,0 +1,615 @@
+/*
+ * libjingle
+ * Copyright 2010 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright notice,
+ *     this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright notice,
+ *     this list of conditions and the following disclaimer in the documentation
+ *     and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *     derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sstream>
+
+#include "libyuv/cpu_id.h"
+#include "libyuv/scale.h"
+#include "talk/base/basictypes.h"
+#include "talk/base/flags.h"
+#include "talk/base/gunit.h"
+#include "talk/base/scoped_ptr.h"
+#include "talk/media/base/testutils.h"
+
+#if defined(_MSC_VER)
+#define ALIGN16(var) __declspec(align(16)) var
+#else
+#define ALIGN16(var) var __attribute__((aligned(16)))
+#endif
+
+using cricket::LoadPlanarYuvTestImage;
+using cricket::DumpPlanarYuvTestImage;
+using talk_base::scoped_ptr;
+
+DEFINE_bool(yuvscaler_dump, false,
+    "whether to write out scaled images for inspection");
+DEFINE_int(yuvscaler_repeat, 1,
+    "how many times to perform each scaling operation (for perf testing)");
+
+static const int kAlignment = 16;
+
+// TEST_UNCACHED flushes cache to test real memory performance.
+// TEST_RSTSC uses cpu cycles for more accurate benchmark of the scale function.
+#ifndef __arm__
+// #define TEST_UNCACHED 1
+// #define TEST_RSTSC 1
+#endif
+
+#if defined(TEST_UNCACHED) || defined(TEST_RSTSC)
+#ifdef _MSC_VER
+#include <emmintrin.h>  // NOLINT
+#endif
+
+#if defined(__GNUC__) && defined(__i386__)
+static inline uint64 __rdtsc(void) {
+  uint32_t a, d;
+  __asm__ volatile("rdtsc" : "=a" (a), "=d" (d));
+  return (reinterpret_cast<uint64>(d) << 32) + a;
+}
+
+static inline void _mm_clflush(volatile void *__p) {
+  asm volatile("clflush %0" : "+m" (*(volatile char *)__p));
+}
+#endif
+
+static void FlushCache(uint8* dst, int count) {
+  while (count >= 32) {
+    _mm_clflush(dst);
+    dst += 32;
+    count -= 32;
+  }
+}
+#endif
+
+class YuvScalerTest : public testing::Test {
+ protected:
+  virtual void SetUp() {
+    dump_ = *FlagList::Lookup("yuvscaler_dump")->bool_variable();
+    repeat_ = *FlagList::Lookup("yuvscaler_repeat")->int_variable();
+  }
+
+  // Scale an image and compare against a Lanczos-filtered test image.
+  // Lanczos is considered to be the "ideal" image resampling method, so we try
+  // to get as close to that as possible, while being as fast as possible.
+  bool TestScale(int iw, int ih, int ow, int oh, int offset, bool usefile,
+                 bool optimize, int cpuflags, bool interpolate,
+                 int memoffset, double* error) {
+    *error = 0.;
+    size_t isize = I420_SIZE(iw, ih);
+    size_t osize = I420_SIZE(ow, oh);
+    scoped_ptr<uint8[]> ibuffer(new uint8[isize + kAlignment + memoffset]());
+    scoped_ptr<uint8[]> obuffer(new uint8[osize + kAlignment + memoffset]());
+    scoped_ptr<uint8[]> xbuffer(new uint8[osize + kAlignment + memoffset]());
+
+    uint8 *ibuf = ALIGNP(ibuffer.get(), kAlignment) + memoffset;
+    uint8 *obuf = ALIGNP(obuffer.get(), kAlignment) + memoffset;
+    uint8 *xbuf = ALIGNP(xbuffer.get(), kAlignment) + memoffset;
+
+    if (usefile) {
+      if (!LoadPlanarYuvTestImage("faces", iw, ih, ibuf) ||
+          !LoadPlanarYuvTestImage("faces", ow, oh, xbuf)) {
+        LOG(LS_ERROR) << "Failed to load image";
+        return false;
+      }
+    } else {
+      // These are used to test huge images.
+      memset(ibuf, 213, isize);  // Input is constant color.
+      memset(obuf, 100, osize);  // Output set to something wrong for now.
+      memset(xbuf, 213, osize);  // Expected result.
+    }
+
+#ifdef TEST_UNCACHED
+    FlushCache(ibuf, isize);
+    FlushCache(obuf, osize);
+    FlushCache(xbuf, osize);
+#endif
+
+    // Scale down.
+    // If cpu true, disable cpu optimizations.  Else allow auto detect
+    // TODO(fbarchard): set flags for libyuv
+    libyuv::MaskCpuFlags(cpuflags);
+#ifdef TEST_RSTSC
+    uint64 t = 0;
+#endif
+    for (int i = 0; i < repeat_; ++i) {
+#ifdef TEST_UNCACHED
+      FlushCache(ibuf, isize);
+      FlushCache(obuf, osize);
+#endif
+#ifdef TEST_RSTSC
+      uint64 t1 = __rdtsc();
+#endif
+      EXPECT_EQ(0, libyuv::ScaleOffset(ibuf, iw, ih, obuf, ow, oh,
+                                       offset, interpolate));
+#ifdef TEST_RSTSC
+      uint64 t2 = __rdtsc();
+      t += t2 - t1;
+#endif
+    }
+
+#ifdef TEST_RSTSC
+    LOG(LS_INFO) << "Time: " << std::setw(9) << t;
+#endif
+
+    if (dump_) {
+      const testing::TestInfo* const test_info =
+          testing::UnitTest::GetInstance()->current_test_info();
+      std::string test_name(test_info->name());
+      DumpPlanarYuvTestImage(test_name, obuf, ow, oh);
+    }
+
+    double sse = cricket::ComputeSumSquareError(obuf, xbuf, osize);
+    *error = sse / osize;  // Mean Squared Error.
+    double PSNR = cricket::ComputePSNR(sse, osize);
+    LOG(LS_INFO) << "Image MSE: " <<
+      std::setw(6) << std::setprecision(4) << *error <<
+      " Image PSNR: " << PSNR;
+    return true;
+  }
+
+  // Returns the index of the first differing byte. Easier to debug than memcmp.
+  static int FindDiff(const uint8* buf1, const uint8* buf2, int len) {
+    int i = 0;
+    while (i < len && buf1[i] == buf2[i]) {
+      i++;
+    }
+    return (i < len) ? i : -1;
+  }
+
+ protected:
+  bool dump_;
+  int repeat_;
+};
+
+// Tests straight copy of data.
+TEST_F(YuvScalerTest, TestCopy) {
+  const int iw = 640, ih = 360;
+  const int ow = 640, oh = 360;
+  ALIGN16(uint8 ibuf[I420_SIZE(iw, ih)]);
+  ALIGN16(uint8 obuf[I420_SIZE(ow, oh)]);
+
+  // Load the frame, scale it, check it.
+  ASSERT_TRUE(LoadPlanarYuvTestImage("faces", iw, ih, ibuf));
+  for (int i = 0; i < repeat_; ++i) {
+    libyuv::ScaleOffset(ibuf, iw, ih, obuf, ow, oh, 0, false);
+  }
+  if (dump_) DumpPlanarYuvTestImage("TestCopy", obuf, ow, oh);
+  EXPECT_EQ(-1, FindDiff(obuf, ibuf, sizeof(ibuf)));
+}
+
+// Tests copy from 4:3 to 16:9.
+TEST_F(YuvScalerTest, TestOffset16_10Copy) {
+  const int iw = 640, ih = 360;
+  const int ow = 640, oh = 480;
+  const int offset = (480 - 360) / 2;
+  scoped_ptr<uint8[]> ibuffer(new uint8[I420_SIZE(iw, ih) + kAlignment]);
+  scoped_ptr<uint8[]> obuffer(new uint8[I420_SIZE(ow, oh) + kAlignment]);
+
+  uint8 *ibuf = ALIGNP(ibuffer.get(), kAlignment);
+  uint8 *obuf = ALIGNP(obuffer.get(), kAlignment);
+
+  // Load the frame, scale it, check it.
+  ASSERT_TRUE(LoadPlanarYuvTestImage("faces", iw, ih, ibuf));
+
+  // Clear to black, which is Y = 0 and U and V = 128
+  memset(obuf, 0, ow * oh);
+  memset(obuf + ow * oh, 128, ow * oh / 2);
+  for (int i = 0; i < repeat_; ++i) {
+    libyuv::ScaleOffset(ibuf, iw, ih, obuf, ow, oh, offset, false);
+  }
+  if (dump_) DumpPlanarYuvTestImage("TestOffsetCopy16_9", obuf, ow, oh);
+  EXPECT_EQ(-1, FindDiff(obuf + ow * offset,
+                         ibuf,
+                         iw * ih));
+  EXPECT_EQ(-1, FindDiff(obuf + ow * oh + ow * offset / 4,
+                         ibuf + iw * ih,
+                         iw * ih / 4));
+  EXPECT_EQ(-1, FindDiff(obuf + ow * oh * 5 / 4 + ow * offset / 4,
+                         ibuf + iw * ih * 5 / 4,
+                         iw * ih / 4));
+}
+
+// The following are 'cpu' flag values:
+// Allow all SIMD optimizations
+#define ALLFLAGS -1
+// Disable SSSE3 but allow other forms of SIMD (SSE2)
+#define NOSSSE3 ~libyuv::kCpuHasSSSE3
+// Disable SSE2 and SSSE3
+#define NOSSE ~libyuv::kCpuHasSSE2 & ~libyuv::kCpuHasSSSE3
+
+// TEST_M scale factor with variations of opt, align, int
+#define TEST_M(name, iwidth, iheight, owidth, oheight, mse) \
+TEST_F(YuvScalerTest, name##Ref) { \
+  double error; \
+  EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \
+                        0, true, false, ALLFLAGS, false, 0, &error)); \
+  EXPECT_LE(error, mse); \
+} \
+TEST_F(YuvScalerTest, name##OptAligned) { \
+  double error; \
+  EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \
+                        0, true, true, ALLFLAGS, false, 0, &error)); \
+  EXPECT_LE(error, mse); \
+} \
+TEST_F(YuvScalerTest, name##OptUnaligned) { \
+  double error; \
+  EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \
+                        0, true, true, ALLFLAGS, false, 1, &error)); \
+  EXPECT_LE(error, mse); \
+} \
+TEST_F(YuvScalerTest, name##OptSSE2) { \
+  double error; \
+  EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \
+                        0, true, true, NOSSSE3, false, 0, &error)); \
+  EXPECT_LE(error, mse); \
+} \
+TEST_F(YuvScalerTest, name##OptC) { \
+  double error; \
+  EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \
+                        0, true, true, NOSSE, false, 0, &error)); \
+  EXPECT_LE(error, mse); \
+} \
+TEST_F(YuvScalerTest, name##IntRef) { \
+  double error; \
+  EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \
+                        0, true, false, ALLFLAGS, true, 0, &error)); \
+  EXPECT_LE(error, mse); \
+} \
+TEST_F(YuvScalerTest, name##IntOptAligned) { \
+  double error; \
+  EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \
+                        0, true, true, ALLFLAGS, true, 0, &error)); \
+  EXPECT_LE(error, mse); \
+} \
+TEST_F(YuvScalerTest, name##IntOptUnaligned) { \
+  double error; \
+  EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \
+                        0, true, true, ALLFLAGS, true, 1, &error)); \
+  EXPECT_LE(error, mse); \
+} \
+TEST_F(YuvScalerTest, name##IntOptSSE2) { \
+  double error; \
+  EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \
+                        0, true, true, NOSSSE3, true, 0, &error)); \
+  EXPECT_LE(error, mse); \
+} \
+TEST_F(YuvScalerTest, name##IntOptC) { \
+  double error; \
+  EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \
+                        0, true, true, NOSSE, true, 0, &error)); \
+  EXPECT_LE(error, mse); \
+}
+
+#define TEST_H(name, iwidth, iheight, owidth, oheight, opt, cpu, intr, mse) \
+TEST_F(YuvScalerTest, name) { \
+  double error; \
+  EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \
+                        0, false, opt, cpu, intr, 0, &error)); \
+  EXPECT_LE(error, mse); \
+}
+
+// Test 4x3 aspect ratio scaling
+
+// Tests 1/1x scale down.
+TEST_M(TestScale4by3Down11, 640, 480, 640, 480, 0)
+
+// Tests 3/4x scale down.
+TEST_M(TestScale4by3Down34, 640, 480, 480, 360, 60)
+
+// Tests 1/2x scale down.
+TEST_M(TestScale4by3Down12, 640, 480, 320, 240, 60)
+
+// Tests 3/8x scale down.
+TEST_M(TestScale4by3Down38, 640, 480, 240, 180, 60)
+
+// Tests 1/4x scale down..
+TEST_M(TestScale4by3Down14, 640, 480, 160, 120, 60)
+
+// Tests 3/16x scale down.
+TEST_M(TestScale4by3Down316, 640, 480, 120, 90, 120)
+
+// Tests 1/8x scale down.
+TEST_M(TestScale4by3Down18, 640, 480, 80, 60, 150)
+
+// Tests 2/3x scale down.
+TEST_M(TestScale4by3Down23, 480, 360, 320, 240, 60)
+
+// Tests 4/3x scale up.
+TEST_M(TestScale4by3Up43, 480, 360, 640, 480, 60)
+
+// Tests 2/1x scale up.
+TEST_M(TestScale4by3Up21, 320, 240, 640, 480, 60)
+
+// Tests 4/1x scale up.
+TEST_M(TestScale4by3Up41, 160, 120, 640, 480, 80)
+
+// Test 16x10 aspect ratio scaling
+
+// Tests 1/1x scale down.
+TEST_M(TestScale16by10Down11, 640, 400, 640, 400, 0)
+
+// Tests 3/4x scale down.
+TEST_M(TestScale16by10Down34, 640, 400, 480, 300, 60)
+
+// Tests 1/2x scale down.
+TEST_M(TestScale16by10Down12, 640, 400, 320, 200, 60)
+
+// Tests 3/8x scale down.
+TEST_M(TestScale16by10Down38, 640, 400, 240, 150, 60)
+
+// Tests 1/4x scale down..
+TEST_M(TestScale16by10Down14, 640, 400, 160, 100, 60)
+
+// Tests 3/16x scale down.
+TEST_M(TestScale16by10Down316, 640, 400, 120, 75, 120)
+
+// Tests 1/8x scale down.
+TEST_M(TestScale16by10Down18, 640, 400, 80, 50, 150)
+
+// Tests 2/3x scale down.
+TEST_M(TestScale16by10Down23, 480, 300, 320, 200, 60)
+
+// Tests 4/3x scale up.
+TEST_M(TestScale16by10Up43, 480, 300, 640, 400, 60)
+
+// Tests 2/1x scale up.
+TEST_M(TestScale16by10Up21, 320, 200, 640, 400, 60)
+
+// Tests 4/1x scale up.
+TEST_M(TestScale16by10Up41, 160, 100, 640, 400, 80)
+
+// Test 16x9 aspect ratio scaling
+
+// Tests 1/1x scale down.
+TEST_M(TestScaleDown11, 640, 360, 640, 360, 0)
+
+// Tests 3/4x scale down.
+TEST_M(TestScaleDown34, 640, 360, 480, 270, 60)
+
+// Tests 1/2x scale down.
+TEST_M(TestScaleDown12, 640, 360, 320, 180, 60)
+
+// Tests 3/8x scale down.
+TEST_M(TestScaleDown38, 640, 360, 240, 135, 60)
+
+// Tests 1/4x scale down..
+TEST_M(TestScaleDown14, 640, 360, 160, 90, 60)
+
+// Tests 3/16x scale down.
+TEST_M(TestScaleDown316, 640, 360, 120, 68, 120)
+
+// Tests 1/8x scale down.
+TEST_M(TestScaleDown18, 640, 360, 80, 45, 150)
+
+// Tests 2/3x scale down.
+TEST_M(TestScaleDown23, 480, 270, 320, 180, 60)
+
+// Tests 4/3x scale up.
+TEST_M(TestScaleUp43, 480, 270, 640, 360, 60)
+
+// Tests 2/1x scale up.
+TEST_M(TestScaleUp21, 320, 180, 640, 360, 60)
+
+// Tests 4/1x scale up.
+TEST_M(TestScaleUp41, 160, 90, 640, 360, 80)
+
+// Test HD 4x3 aspect ratio scaling
+
+// Tests 1/1x scale down.
+TEST_M(TestScaleHD4x3Down11, 1280, 960, 1280, 960, 0)
+
+// Tests 3/4x scale down.
+TEST_M(TestScaleHD4x3Down34, 1280, 960, 960, 720, 60)
+
+// Tests 1/2x scale down.
+TEST_M(TestScaleHD4x3Down12, 1280, 960, 640, 480, 60)
+
+// Tests 3/8x scale down.
+TEST_M(TestScaleHD4x3Down38, 1280, 960, 480, 360, 60)
+
+// Tests 1/4x scale down..
+TEST_M(TestScaleHD4x3Down14, 1280, 960, 320, 240, 60)
+
+// Tests 3/16x scale down.
+TEST_M(TestScaleHD4x3Down316, 1280, 960, 240, 180, 120)
+
+// Tests 1/8x scale down.
+TEST_M(TestScaleHD4x3Down18, 1280, 960, 160, 120, 150)
+
+// Tests 2/3x scale down.
+TEST_M(TestScaleHD4x3Down23, 960, 720, 640, 480, 60)
+
+// Tests 4/3x scale up.
+TEST_M(TestScaleHD4x3Up43, 960, 720, 1280, 960, 60)
+
+// Tests 2/1x scale up.
+TEST_M(TestScaleHD4x3Up21, 640, 480, 1280, 960, 60)
+
+// Tests 4/1x scale up.
+TEST_M(TestScaleHD4x3Up41, 320, 240, 1280, 960, 80)
+
+// Test HD 16x10 aspect ratio scaling
+
+// Tests 1/1x scale down.
+TEST_M(TestScaleHD16x10Down11, 1280, 800, 1280, 800, 0)
+
+// Tests 3/4x scale down.
+TEST_M(TestScaleHD16x10Down34, 1280, 800, 960, 600, 60)
+
+// Tests 1/2x scale down.
+TEST_M(TestScaleHD16x10Down12, 1280, 800, 640, 400, 60)
+
+// Tests 3/8x scale down.
+TEST_M(TestScaleHD16x10Down38, 1280, 800, 480, 300, 60)
+
+// Tests 1/4x scale down..
+TEST_M(TestScaleHD16x10Down14, 1280, 800, 320, 200, 60)
+
+// Tests 3/16x scale down.
+TEST_M(TestScaleHD16x10Down316, 1280, 800, 240, 150, 120)
+
+// Tests 1/8x scale down.
+TEST_M(TestScaleHD16x10Down18, 1280, 800, 160, 100, 150)
+
+// Tests 2/3x scale down.
+TEST_M(TestScaleHD16x10Down23, 960, 600, 640, 400, 60)
+
+// Tests 4/3x scale up.
+TEST_M(TestScaleHD16x10Up43, 960, 600, 1280, 800, 60)
+
+// Tests 2/1x scale up.
+TEST_M(TestScaleHD16x10Up21, 640, 400, 1280, 800, 60)
+
+// Tests 4/1x scale up.
+TEST_M(TestScaleHD16x10Up41, 320, 200, 1280, 800, 80)
+
+// Test HD 16x9 aspect ratio scaling
+
+// Tests 1/1x scale down.
+TEST_M(TestScaleHDDown11, 1280, 720, 1280, 720, 0)
+
+// Tests 3/4x scale down.
+TEST_M(TestScaleHDDown34, 1280, 720, 960, 540, 60)
+
+// Tests 1/2x scale down.
+TEST_M(TestScaleHDDown12, 1280, 720, 640, 360, 60)
+
+// Tests 3/8x scale down.
+TEST_M(TestScaleHDDown38, 1280, 720, 480, 270, 60)
+
+// Tests 1/4x scale down..
+TEST_M(TestScaleHDDown14, 1280, 720, 320, 180, 60)
+
+// Tests 3/16x scale down.
+TEST_M(TestScaleHDDown316, 1280, 720, 240, 135, 120)
+
+// Tests 1/8x scale down.
+TEST_M(TestScaleHDDown18, 1280, 720, 160, 90, 150)
+
+// Tests 2/3x scale down.
+TEST_M(TestScaleHDDown23, 960, 540, 640, 360, 60)
+
+// Tests 4/3x scale up.
+TEST_M(TestScaleHDUp43, 960, 540, 1280, 720, 60)
+
+// Tests 2/1x scale up.
+TEST_M(TestScaleHDUp21, 640, 360, 1280, 720, 60)
+
+// Tests 4/1x scale up.
+TEST_M(TestScaleHDUp41, 320, 180, 1280, 720, 80)
+
+// Tests 1366x768 resolution for comparison to chromium scaler_bench
+TEST_M(TestScaleHDUp1366, 1280, 720, 1366, 768, 10)
+
+// Tests odd source/dest sizes.  3 less to make chroma odd as well.
+TEST_M(TestScaleHDUp1363, 1277, 717, 1363, 765, 10)
+
+// Tests 1/2x scale down, using optimized algorithm.
+TEST_M(TestScaleOddDown12, 180, 100, 90, 50, 50)
+
+// Tests bilinear scale down
+TEST_M(TestScaleOddDownBilin, 160, 100, 90, 50, 120)
+
+// Test huge buffer scales that are expected to use a different code path
+// that avoids stack overflow but still work using point sampling.
+// Max output size is 640 wide.
+
+// Tests interpolated 1/8x scale down, using optimized algorithm.
+TEST_H(TestScaleDown18HDOptInt, 6144, 48, 768, 6, true, ALLFLAGS, true, 1)
+
+// Tests interpolated 1/8x scale down, using c_only optimized algorithm.
+TEST_H(TestScaleDown18HDCOnlyOptInt, 6144, 48, 768, 6, true, NOSSE, true, 1)
+
+// Tests interpolated 3/8x scale down, using optimized algorithm.
+TEST_H(TestScaleDown38HDOptInt, 2048, 16, 768, 6, true, ALLFLAGS, true, 1)
+
+// Tests interpolated 3/8x scale down, using no SSSE3 optimized algorithm.
+TEST_H(TestScaleDown38HDNoSSSE3OptInt, 2048, 16, 768, 6, true, NOSSSE3, true, 1)
+
+// Tests interpolated 3/8x scale down, using c_only optimized algorithm.
+TEST_H(TestScaleDown38HDCOnlyOptInt, 2048, 16, 768, 6, true, NOSSE, true, 1)
+
+// Tests interpolated 3/16x scale down, using optimized algorithm.
+TEST_H(TestScaleDown316HDOptInt, 4096, 32, 768, 6, true, ALLFLAGS, true, 1)
+
+// Tests interpolated 3/16x scale down, using no SSSE3 optimized algorithm.
+TEST_H(TestScaleDown316HDNoSSSE3OptInt, 4096, 32, 768, 6, true, NOSSSE3, true,
+       1)
+
+// Tests interpolated 3/16x scale down, using c_only optimized algorithm.
+TEST_H(TestScaleDown316HDCOnlyOptInt, 4096, 32, 768, 6, true, NOSSE, true, 1)
+
+// Test special sizes dont crash
+// Tests scaling down to 1 pixel width
+TEST_H(TestScaleDown1x6OptInt, 3, 24, 1, 6, true, ALLFLAGS, true, 4)
+
+// Tests scaling down to 1 pixel height
+TEST_H(TestScaleDown6x1OptInt, 24, 3, 6, 1, true, ALLFLAGS, true, 4)
+
+// Tests scaling up from 1 pixel width
+TEST_H(TestScaleUp1x6OptInt, 1, 6, 3, 24, true, ALLFLAGS, true, 4)
+
+// Tests scaling up from 1 pixel height
+TEST_H(TestScaleUp6x1OptInt, 6, 1, 24, 3, true, ALLFLAGS, true, 4)
+
+// Test performance of a range of box filter scale sizes
+
+// Tests interpolated 1/2x scale down, using optimized algorithm.
+TEST_H(TestScaleDown2xHDOptInt, 1280, 720, 1280 / 2, 720 / 2, true, ALLFLAGS,
+       true, 1)
+
+// Tests interpolated 1/3x scale down, using optimized algorithm.
+TEST_H(TestScaleDown3xHDOptInt, 1280, 720, 1280 / 3, 720 / 3, true, ALLFLAGS,
+       true, 1)
+
+// Tests interpolated 1/4x scale down, using optimized algorithm.
+TEST_H(TestScaleDown4xHDOptInt, 1280, 720, 1280 / 4, 720 / 4, true, ALLFLAGS,
+       true, 1)
+
+// Tests interpolated 1/5x scale down, using optimized algorithm.
+TEST_H(TestScaleDown5xHDOptInt, 1280, 720, 1280 / 5, 720 / 5, true, ALLFLAGS,
+       true, 1)
+
+// Tests interpolated 1/6x scale down, using optimized algorithm.
+TEST_H(TestScaleDown6xHDOptInt, 1280, 720, 1280 / 6, 720 / 6, true, ALLFLAGS,
+       true, 1)
+
+// Tests interpolated 1/7x scale down, using optimized algorithm.
+TEST_H(TestScaleDown7xHDOptInt, 1280, 720, 1280 / 7, 720 / 7, true, ALLFLAGS,
+       true, 1)
+
+// Tests interpolated 1/8x scale down, using optimized algorithm.
+TEST_H(TestScaleDown8xHDOptInt, 1280, 720, 1280 / 8, 720 / 8, true, ALLFLAGS,
+       true, 1)
+
+// Tests interpolated 1/8x scale down, using optimized algorithm.
+TEST_H(TestScaleDown9xHDOptInt, 1280, 720, 1280 / 9, 720 / 9, true, ALLFLAGS,
+       true, 1)
+
+// Tests interpolated 1/8x scale down, using optimized algorithm.
+TEST_H(TestScaleDown10xHDOptInt, 1280, 720, 1280 / 10, 720 / 10, true, ALLFLAGS,
+       true, 1)