Update talk folder to revision=49713299.

TBR=mallinath@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/1848004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@4380 4adac7df-926f-26a2-2b94-8c16560cd09d
diff --git a/talk/app/webrtc/jsepsessiondescription.cc b/talk/app/webrtc/jsepsessiondescription.cc
index bc65ca5..8ec1458 100644
--- a/talk/app/webrtc/jsepsessiondescription.cc
+++ b/talk/app/webrtc/jsepsessiondescription.cc
@@ -139,7 +139,7 @@
 
   candidate_collection_[mediasection_index].add(
        new JsepIceCandidate(candidate->sdp_mid(),
-                            mediasection_index,
+                            static_cast<int>(mediasection_index),
                             updated_candidate));
   return true;
 }
diff --git a/talk/app/webrtc/peerconnection_unittest.cc b/talk/app/webrtc/peerconnection_unittest.cc
index eed6936..250e60f 100644
--- a/talk/app/webrtc/peerconnection_unittest.cc
+++ b/talk/app/webrtc/peerconnection_unittest.cc
@@ -147,7 +147,8 @@
 
   void AddMediaStream(bool audio, bool video) {
     std::string label = kStreamLabelBase +
-        talk_base::ToString<int>(peer_connection_->local_streams()->count());
+        talk_base::ToString<int>(
+            static_cast<int>(peer_connection_->local_streams()->count()));
     talk_base::scoped_refptr<webrtc::MediaStreamInterface> stream =
         peer_connection_factory_->CreateLocalMediaStream(label);
 
@@ -306,11 +307,11 @@
           desc->GetTransportDescriptionByName(contents[index].name);
 
       std::map<int, IceUfragPwdPair>::const_iterator ufragpair_it =
-          ice_ufrag_pwd_.find(index);
+          ice_ufrag_pwd_.find(static_cast<int>(index));
       if (ufragpair_it == ice_ufrag_pwd_.end()) {
         ASSERT_FALSE(ExpectIceRestart());
-        ice_ufrag_pwd_[index] = IceUfragPwdPair(transport_desc->ice_ufrag,
-                                                transport_desc->ice_pwd);
+        ice_ufrag_pwd_[static_cast<int>(index)] =
+            IceUfragPwdPair(transport_desc->ice_ufrag, transport_desc->ice_pwd);
       } else if (ExpectIceRestart()) {
         const IceUfragPwdPair& ufrag_pwd = ufragpair_it->second;
         EXPECT_NE(ufrag_pwd.first, transport_desc->ice_ufrag);
@@ -1007,13 +1008,13 @@
 
   ASSERT_LE(0, initializing_client()->rendered_height());
   double initiating_video_ratio =
-      static_cast<double> (initializing_client()->rendered_width()) /
+      static_cast<double>(initializing_client()->rendered_width()) /
       initializing_client()->rendered_height();
   EXPECT_LE(requested_ratio, initiating_video_ratio);
 
   ASSERT_LE(0, receiving_client()->rendered_height());
   double receiving_video_ratio =
-      static_cast<double> (receiving_client()->rendered_width()) /
+      static_cast<double>(receiving_client()->rendered_width()) /
       receiving_client()->rendered_height();
   EXPECT_LE(requested_ratio, receiving_video_ratio);
 }
diff --git a/talk/app/webrtc/peerconnectionfactory.cc b/talk/app/webrtc/peerconnectionfactory.cc
index 7ae5a3b..316b044 100644
--- a/talk/app/webrtc/peerconnectionfactory.cc
+++ b/talk/app/webrtc/peerconnectionfactory.cc
@@ -189,7 +189,7 @@
 void PeerConnectionFactory::OnMessage(talk_base::Message* msg) {
   switch (msg->message_id) {
     case MSG_INIT_FACTORY: {
-     InitMessageData* pdata = static_cast<InitMessageData*> (msg->pdata);
+     InitMessageData* pdata = static_cast<InitMessageData*>(msg->pdata);
      pdata->data() = Initialize_s();
      break;
     }
@@ -199,7 +199,7 @@
     }
     case MSG_CREATE_PEERCONNECTION: {
       CreatePeerConnectionParams* pdata =
-          static_cast<CreatePeerConnectionParams*> (msg->pdata);
+          static_cast<CreatePeerConnectionParams*>(msg->pdata);
       pdata->peerconnection = CreatePeerConnection_s(pdata->configuration,
                                                      pdata->constraints,
                                                      pdata->allocator_factory,
@@ -214,7 +214,7 @@
     }
     case MSG_CREATE_VIDEOSOURCE: {
       CreateVideoSourceParams* pdata =
-          static_cast<CreateVideoSourceParams*> (msg->pdata);
+          static_cast<CreateVideoSourceParams*>(msg->pdata);
       pdata->source = CreateVideoSource_s(pdata->capturer, pdata->constraints);
       break;
     }
diff --git a/talk/app/webrtc/peerconnectioninterface_unittest.cc b/talk/app/webrtc/peerconnectioninterface_unittest.cc
index 782bba1..698de3e 100644
--- a/talk/app/webrtc/peerconnectioninterface_unittest.cc
+++ b/talk/app/webrtc/peerconnectioninterface_unittest.cc
@@ -94,7 +94,7 @@
     return false;
   }
   const cricket::MediaContentDescription* media_desc =
-      static_cast<const cricket::MediaContentDescription*> (
+      static_cast<const cricket::MediaContentDescription*>(
           content_info->description);
   if (!media_desc || media_desc->streams().empty()) {
     return false;
diff --git a/talk/app/webrtc/webrtcsdp.cc b/talk/app/webrtc/webrtcsdp.cc
index f91db8d..8d9fba1 100644
--- a/talk/app/webrtc/webrtcsdp.cc
+++ b/talk/app/webrtc/webrtcsdp.cc
@@ -632,7 +632,7 @@
 void GetMediaStreamLabels(const ContentInfo* content,
                           std::set<std::string>* labels) {
   const MediaContentDescription* media_desc =
-      static_cast<const MediaContentDescription*> (
+      static_cast<const MediaContentDescription*>(
           content->description);
   const cricket::StreamParamsVec& streams =  media_desc->streams();
   for (cricket::StreamParamsVec::const_iterator it = streams.begin();
@@ -1123,7 +1123,7 @@
   // trunk/cppguide.xml?showone=Streams#Streams
   std::ostringstream os;
   const MediaContentDescription* media_desc =
-      static_cast<const MediaContentDescription*> (
+      static_cast<const MediaContentDescription*>(
           content_info->description);
   ASSERT(media_desc != NULL);
 
@@ -1928,7 +1928,7 @@
   if (!media_desc) {
     return;
   }
-  int preference = fmts.size();
+  int preference = static_cast<int>(fmts.size());
   std::vector<int>::const_iterator it = fmts.begin();
   bool add_new_codec = false;
   for (; it != fmts.end(); ++it) {
diff --git a/talk/app/webrtc/webrtcsdp_unittest.cc b/talk/app/webrtc/webrtcsdp_unittest.cc
index 9c3debd..4892f58 100644
--- a/talk/app/webrtc/webrtcsdp_unittest.cc
+++ b/talk/app/webrtc/webrtcsdp_unittest.cc
@@ -1240,7 +1240,7 @@
   int position = 0;
   for (size_t i = 0; i < string1.length() && i < string2.length(); ++i) {
     if (string1.c_str()[i] != string2.c_str()[i]) {
-      position = i;
+      position = static_cast<int>(i);
       break;
     }
   }
diff --git a/talk/app/webrtc/webrtcsession.cc b/talk/app/webrtc/webrtcsession.cc
index fee8d42..840cb3d 100644
--- a/talk/app/webrtc/webrtcsession.cc
+++ b/talk/app/webrtc/webrtcsession.cc
@@ -629,25 +629,24 @@
 
 bool WebRtcSession::SetLocalDescription(SessionDescriptionInterface* desc,
                                         std::string* err_desc) {
+  // Takes the ownership of |desc| regardless of the result.
+  talk_base::scoped_ptr<SessionDescriptionInterface> desc_temp(desc);
+
   if (error() != cricket::BaseSession::ERROR_NONE) {
-    delete desc;
     return BadLocalSdp(SessionErrorMsg(error()), err_desc);
   }
 
   if (!desc || !desc->description()) {
-    delete desc;
     return BadLocalSdp(kInvalidSdp, err_desc);
   }
   Action action = GetAction(desc->type());
   if (!ExpectSetLocalDescription(action)) {
     std::string type = desc->type();
-    delete desc;
     return BadLocalSdp(BadStateErrMsg(type, state()), err_desc);
   }
 
   if (session_desc_factory_.secure() == cricket::SEC_REQUIRED &&
       !VerifyCrypto(desc->description())) {
-    delete desc;
     return BadLocalSdp(kSdpWithoutCrypto, err_desc);
   }
 
@@ -665,10 +664,10 @@
   UpdateSessionDescriptionSecurePolicy(desc->description());
 
   set_local_description(desc->description()->Copy());
-  local_desc_.reset(desc);
+  local_desc_.reset(desc_temp.release());
 
   // Transport and Media channels will be created only when offer is set.
-  if (action == kOffer && !CreateChannels(desc->description())) {
+  if (action == kOffer && !CreateChannels(local_desc_->description())) {
     // TODO(mallinath) - Handle CreateChannel failure, as new local description
     // is applied. Restore back to old description.
     return BadLocalSdp(kCreateChannelFailed, err_desc);
@@ -676,10 +675,10 @@
 
   // Remove channel and transport proxies, if MediaContentDescription is
   // rejected.
-  RemoveUnusedChannelsAndTransports(desc->description());
+  RemoveUnusedChannelsAndTransports(local_desc_->description());
 
   if (!UpdateSessionState(action, cricket::CS_LOCAL,
-                          desc->description(), err_desc)) {
+                          local_desc_->description(), err_desc)) {
     return false;
   }
   // Kick starting the ice candidates allocation.
@@ -697,19 +696,19 @@
 
 bool WebRtcSession::SetRemoteDescription(SessionDescriptionInterface* desc,
                                          std::string* err_desc) {
+  // Takes the ownership of |desc| regardless of the result.
+  talk_base::scoped_ptr<SessionDescriptionInterface> desc_temp(desc);
+
   if (error() != cricket::BaseSession::ERROR_NONE) {
-    delete desc;
     return BadRemoteSdp(SessionErrorMsg(error()), err_desc);
   }
 
   if (!desc || !desc->description()) {
-    delete desc;
     return BadRemoteSdp(kInvalidSdp, err_desc);
   }
   Action action = GetAction(desc->type());
   if (!ExpectSetRemoteDescription(action)) {
     std::string type = desc->type();
-    delete desc;
     return BadRemoteSdp(BadStateErrMsg(type, state()), err_desc);
   }
 
@@ -720,7 +719,6 @@
 
   if (session_desc_factory_.secure() == cricket::SEC_REQUIRED &&
       !VerifyCrypto(desc->description())) {
-    delete desc;
     return BadRemoteSdp(kSdpWithoutCrypto, err_desc);
   }
 
@@ -746,7 +744,6 @@
   // Update remote MediaStreams.
   mediastream_signaling_->OnRemoteDescriptionChanged(desc);
   if (local_description() && !UseCandidatesInSessionDescription(desc)) {
-    delete desc;
     return BadRemoteSdp(kInvalidCandidates, err_desc);
   }
 
@@ -758,7 +755,7 @@
   // that indicates the remote peer requests ice restart.
   ice_restart_latch_->CheckForRemoteIceRestart(remote_desc_.get(),
                                                desc);
-  remote_desc_.reset(desc);
+  remote_desc_.reset(desc_temp.release());
   if (error() != cricket::BaseSession::ERROR_NONE) {
     return BadRemoteSdp(SessionErrorMsg(error()), err_desc);
   }
@@ -1245,7 +1242,7 @@
   const ContentInfos& contents = BaseSession::local_description()->contents();
   for (size_t index = 0; index < contents.size(); ++index) {
     if (contents[index].name == content_name) {
-      *sdp_mline_index = index;
+      *sdp_mline_index = static_cast<int>(index);
       content_found = true;
       break;
     }
@@ -1428,7 +1425,7 @@
        iter != sdesc->contents().end(); ++iter) {
     if (cricket::IsMediaContent(&*iter)) {
       MediaContentDescription* mdesc =
-          static_cast<MediaContentDescription*> (iter->description);
+          static_cast<MediaContentDescription*>(iter->description);
       if (mdesc) {
         mdesc->set_crypto_required(
             session_desc_factory_.secure() == cricket::SEC_REQUIRED);
diff --git a/talk/app/webrtc/webrtcsession.h b/talk/app/webrtc/webrtcsession.h
index 045d347..f4fe202 100644
--- a/talk/app/webrtc/webrtcsession.h
+++ b/talk/app/webrtc/webrtcsession.h
@@ -140,8 +140,10 @@
   SessionDescriptionInterface* CreateAnswer(
       const MediaConstraintsInterface* constraints);
 
+  // The ownership of |desc| will be transferred after this call.
   bool SetLocalDescription(SessionDescriptionInterface* desc,
                            std::string* err_desc);
+  // The ownership of |desc| will be transferred after this call.
   bool SetRemoteDescription(SessionDescriptionInterface* desc,
                             std::string* err_desc);
   bool ProcessIceMessage(const IceCandidateInterface* ice_candidate);
diff --git a/talk/app/webrtc/webrtcsession_unittest.cc b/talk/app/webrtc/webrtcsession_unittest.cc
index 78619c6..48cccf4 100644
--- a/talk/app/webrtc/webrtcsession_unittest.cc
+++ b/talk/app/webrtc/webrtcsession_unittest.cc
@@ -496,12 +496,12 @@
     // Create a SDP without Crypto.
     cricket::MediaSessionOptions options;
     options.has_video = true;
-    scoped_ptr<JsepSessionDescription> offer(
+    JsepSessionDescription* offer(
         CreateRemoteOffer(options, cricket::SEC_DISABLED));
-    ASSERT_TRUE(offer.get() != NULL);
+    ASSERT_TRUE(offer != NULL);
     VerifyNoCryptoParams(offer->description(), false);
     SetRemoteDescriptionExpectError("Called with a SDP without crypto enabled",
-                                    offer.release());
+                                    offer);
     const webrtc::SessionDescriptionInterface* answer =
         session_->CreateAnswer(NULL);
     // Answer should be NULL as no crypto params in offer.
@@ -832,7 +832,7 @@
     const cricket::ContentDescription* description = content->description;
     ASSERT(description != NULL);
     const cricket::AudioContentDescription* audio_content_desc =
-        static_cast<const cricket::AudioContentDescription*> (description);
+        static_cast<const cricket::AudioContentDescription*>(description);
     ASSERT(audio_content_desc != NULL);
     for (size_t i = 0; i < audio_content_desc->codecs().size(); ++i) {
       if (audio_content_desc->codecs()[i].name == "CN")
@@ -2184,16 +2184,16 @@
   SetLocalDescriptionWithoutError(ice_only_offer);
   std::string original_offer_sdp;
   EXPECT_TRUE(offer->ToString(&original_offer_sdp));
-  talk_base::scoped_ptr<SessionDescriptionInterface> pranswer_with_gice(
+  SessionDescriptionInterface* pranswer_with_gice =
       CreateSessionDescription(JsepSessionDescription::kPrAnswer,
-                               original_offer_sdp, NULL));
+                               original_offer_sdp, NULL);
   SetRemoteDescriptionExpectError(kPushDownPranswerTDFailed,
-                                  pranswer_with_gice.get());
-  talk_base::scoped_ptr<SessionDescriptionInterface> answer_with_gice(
+                                  pranswer_with_gice);
+  SessionDescriptionInterface* answer_with_gice =
       CreateSessionDescription(JsepSessionDescription::kAnswer,
-                               original_offer_sdp, NULL));
+                               original_offer_sdp, NULL);
   SetRemoteDescriptionExpectError(kPushDownAnswerTDFailed,
-                                  answer_with_gice.get());
+                                  answer_with_gice);
 }
 
 // Verifing local offer and remote answer have matching m-lines as per RFC 3264.
@@ -2207,13 +2207,13 @@
 
   cricket::SessionDescription* answer_copy = answer->description()->Copy();
   answer_copy->RemoveContentByName("video");
-  talk_base::scoped_ptr<JsepSessionDescription> modified_answer(
-      new JsepSessionDescription(JsepSessionDescription::kAnswer));
+  JsepSessionDescription* modified_answer =
+      new JsepSessionDescription(JsepSessionDescription::kAnswer);
 
   EXPECT_TRUE(modified_answer->Initialize(answer_copy,
                                           answer->session_id(),
                                           answer->session_version()));
-  SetRemoteDescriptionExpectError(kMlineMismatch, modified_answer.get());
+  SetRemoteDescriptionExpectError(kMlineMismatch, modified_answer);
 
   // Modifying content names.
   std::string sdp;
@@ -2227,9 +2227,9 @@
                              kAudioMidReplaceStr.length(),
                              &sdp);
 
-  talk_base::scoped_ptr<SessionDescriptionInterface> modified_answer1(
-      CreateSessionDescription(JsepSessionDescription::kAnswer, sdp, NULL));
-  SetRemoteDescriptionExpectError(kMlineMismatch, modified_answer1.get());
+  SessionDescriptionInterface* modified_answer1 =
+      CreateSessionDescription(JsepSessionDescription::kAnswer, sdp, NULL);
+  SetRemoteDescriptionExpectError(kMlineMismatch, modified_answer1);
 
   SetRemoteDescriptionWithoutError(answer.release());
 }
@@ -2245,13 +2245,13 @@
 
   cricket::SessionDescription* answer_copy = answer->description()->Copy();
   answer_copy->RemoveContentByName("video");
-  talk_base::scoped_ptr<JsepSessionDescription> modified_answer(
-      new JsepSessionDescription(JsepSessionDescription::kAnswer));
+  JsepSessionDescription* modified_answer =
+      new JsepSessionDescription(JsepSessionDescription::kAnswer);
 
   EXPECT_TRUE(modified_answer->Initialize(answer_copy,
                                           answer->session_id(),
                                           answer->session_version()));
-  SetLocalDescriptionExpectError(kMlineMismatch, modified_answer.get());
+  SetLocalDescriptionExpectError(kMlineMismatch, modified_answer);
   SetLocalDescriptionWithoutError(answer);
 }
 
@@ -2388,9 +2388,9 @@
   video_channel_->set_fail_set_send_codecs(true);
 
   mediastream_signaling_.SendAudioVideoStream2();
-  talk_base::scoped_ptr<SessionDescriptionInterface> answer(
-      CreateRemoteAnswer(session_->local_description()));
-  SetRemoteDescriptionExpectError("ERROR_CONTENT", answer.get());
+  SessionDescriptionInterface* answer =
+      CreateRemoteAnswer(session_->local_description());
+  SetRemoteDescriptionExpectError("ERROR_CONTENT", answer);
 }
 
 // Runs the loopback call test with BUNDLE and STUN disabled.
diff --git a/talk/base/common.cc b/talk/base/common.cc
index 842d925..3c0c352 100644
--- a/talk/base/common.cc
+++ b/talk/base/common.cc
@@ -25,6 +25,7 @@
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <signal.h>
 #include <stdlib.h>
 #include <stdio.h>
 #include <memory.h>
@@ -51,14 +52,14 @@
 void Break() {
 #if WIN32
   ::DebugBreak();
-#elif OSX  // !WIN32
-  __asm__("int $3");
-#else // !OSX && !WIN32
-#if _DEBUG_HAVE_BACKTRACE
-  OutputTrace();
+#else  // !WIN32
+  // On POSIX systems, SIGTRAP signals debuggers to break without killing the
+  // process. If a debugger isn't attached, the uncaught SIGTRAP will crash the
+  // app.
+  raise(SIGTRAP);
 #endif
-  abort();
-#endif // !OSX && !WIN32
+  // If a debugger wasn't attached, we will have crashed by this point. If a
+  // debugger is attached, we'll continue from here.
 }
 
 static AssertLogger custom_assert_logger_ = NULL;
diff --git a/talk/base/common.h b/talk/base/common.h
index d624ddc..b9aeca4 100644
--- a/talk/base/common.h
+++ b/talk/base/common.h
@@ -112,7 +112,8 @@
 
 namespace talk_base {
 
-// Break causes the debugger to stop executing, or the program to abort.
+// If a debugger is attached, triggers a debugger breakpoint. If a debugger is
+// not attached, forces program termination.
 void Break();
 
 inline bool Assert(bool result, const char* function, const char* file,
diff --git a/talk/base/helpers.cc b/talk/base/helpers.cc
index c2d989b..bda940b 100644
--- a/talk/base/helpers.cc
+++ b/talk/base/helpers.cc
@@ -270,7 +270,7 @@
 }
 
 uint64 CreateRandomId64() {
-  return static_cast<uint64> (CreateRandomId()) << 32 | CreateRandomId();
+  return static_cast<uint64>(CreateRandomId()) << 32 | CreateRandomId();
 }
 
 uint32 CreateRandomNonZeroId() {
diff --git a/talk/base/md5.cc b/talk/base/md5.cc
index 84e06f9..7adb12d 100644
--- a/talk/base/md5.cc
+++ b/talk/base/md5.cc
@@ -53,7 +53,7 @@
   if ((ctx->bits[0] = t + (static_cast<uint32>(len) << 3)) < t) {
     ctx->bits[1]++;  // Carry from low to high.
   }
-  ctx->bits[1] += len >> 29;
+  ctx->bits[1] += static_cast<uint32>(len >> 29);
   t = (t >> 3) & 0x3f;  // Bytes already in shsInfo->data.
 
   // Handle any leading odd-sized chunks.
diff --git a/talk/base/sslstreamadapter_unittest.cc b/talk/base/sslstreamadapter_unittest.cc
index 3b08baf..1fe1a66 100644
--- a/talk/base/sslstreamadapter_unittest.cc
+++ b/talk/base/sslstreamadapter_unittest.cc
@@ -207,6 +207,7 @@
   ~SSLStreamAdapterTestBase() {
     // Put it back for the next test.
     talk_base::SetRandomTestMode(false);
+    talk_base::CleanupSSL();
   }
 
   static void SetUpTestCase() {
@@ -571,14 +572,13 @@
   }
 
   virtual void ReadData(talk_base::StreamInterface *stream) {
-    unsigned char *buffer = new unsigned char[2000];
+    unsigned char buffer[2000];
     size_t bread;
     int err2;
     talk_base::StreamResult r;
 
     for (;;) {
-      r = stream->Read(buffer, 2000,
-                       &bread, &err2);
+      r = stream->Read(buffer, 2000, &bread, &err2);
 
       if (r == talk_base::SR_ERROR) {
         // Unfortunately, errors are the way that the stream adapter
@@ -595,7 +595,8 @@
 
       // Now parse the datagram
       ASSERT_EQ(packet_size_, bread);
-      uint32_t packet_num = *(reinterpret_cast<uint32_t *>(buffer));
+      unsigned char* ptr_to_buffer = buffer;
+      uint32_t packet_num = *(reinterpret_cast<uint32_t *>(ptr_to_buffer));
 
       for (size_t i = 4; i < packet_size_; i++) {
         ASSERT_EQ((packet_num & 0xff), buffer[i]);
diff --git a/talk/build/common.gypi b/talk/build/common.gypi
index f2322b1..1e9df34 100644
--- a/talk/build/common.gypi
+++ b/talk/build/common.gypi
@@ -36,7 +36,9 @@
     # flood of chromium-style warnings.
     'clang_use_chrome_plugins%': 0,
     'libpeer_target_type%': 'static_library',
-    'java_home%': '<!(python -c "import os; print os.getenv(\'JAVA_HOME\');")',
+    # TODO(henrike): make sure waterfall bots have $JAVA_HOME configured
+    # properly and remove the default value below. See issue 2113.
+    'java_home%': '<!(python -c "import os; print os.getenv(\'JAVA_HOME\', \'/usr/lib/jvm/java-6-sun\');")',
     # Whether or not to build the ObjectiveC PeerConnection API & tests.
     'libjingle_objc%' : 0,
   },
diff --git a/talk/examples/peerconnection/client/main_wnd.cc b/talk/examples/peerconnection/client/main_wnd.cc
index 0a22d03..6211e99 100644
--- a/talk/examples/peerconnection/client/main_wnd.cc
+++ b/talk/examples/peerconnection/client/main_wnd.cc
@@ -499,7 +499,9 @@
     size_t y = rc.bottom / 2;
     for (size_t i = 0; i < ARRAYSIZE(windows); ++i) {
       size_t top = y - (windows[i].height / 2);
-      ::MoveWindow(windows[i].wnd, x, top, windows[i].width, windows[i].height,
+      ::MoveWindow(windows[i].wnd, static_cast<int>(x), static_cast<int>(top),
+                   static_cast<int>(windows[i].width),
+                   static_cast<int>(windows[i].height),
                    TRUE);
       x += kSeparator + windows[i].width;
       if (windows[i].text[0] != 'X')
diff --git a/talk/examples/peerconnection/client/peer_connection_client.cc b/talk/examples/peerconnection/client/peer_connection_client.cc
index dc66946..403fabd 100644
--- a/talk/examples/peerconnection/client/peer_connection_client.cc
+++ b/talk/examples/peerconnection/client/peer_connection_client.cc
@@ -264,7 +264,7 @@
   char buffer[1024];
   sprintfn(buffer, sizeof(buffer),
            "GET /wait?peer_id=%i HTTP/1.0\r\n\r\n", my_id_);
-  int len = strlen(buffer);
+  int len = static_cast<int>(strlen(buffer));
   int sent = socket->Send(buffer, len);
   ASSERT(sent == len);
   UNUSED2(sent, len);
@@ -357,7 +357,7 @@
       if (my_id_ == -1) {
         // First response.  Let's store our server assigned ID.
         ASSERT(state_ == SIGNING_IN);
-        my_id_ = peer_id;
+        my_id_ = static_cast<int>(peer_id);
         ASSERT(my_id_ != -1);
 
         // The body of the response will be a list of already connected peers.
@@ -427,7 +427,8 @@
           }
         }
       } else {
-        OnMessageFromPeer(peer_id, notification_data_.substr(pos));
+        OnMessageFromPeer(static_cast<int>(peer_id),
+                          notification_data_.substr(pos));
       }
     }
 
diff --git a/talk/examples/peerconnection/server/data_socket.cc b/talk/examples/peerconnection/server/data_socket.cc
index 58370b4..37e7cd5 100644
--- a/talk/examples/peerconnection/server/data_socket.cc
+++ b/talk/examples/peerconnection/server/data_socket.cc
@@ -27,6 +27,7 @@
 
 #include "talk/examples/peerconnection/server/data_socket.h"
 
+#include <ctype.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
@@ -131,7 +132,8 @@
 }
 
 bool DataSocket::Send(const std::string& data) const {
-  return send(socket_, data.data(), data.length(), 0) != SOCKET_ERROR;
+  return send(socket_, data.data(), static_cast<int>(data.length()), 0) !=
+      SOCKET_ERROR;
 }
 
 bool DataSocket::Send(const std::string& status, bool connection_close,
@@ -151,7 +153,8 @@
   if (!content_type.empty())
     buffer += "Content-Type: " + content_type + "\r\n";
 
-  buffer += "Content-Length: " + int2str(data.size()) + "\r\n";
+  buffer += "Content-Length: " + int2str(static_cast<int>(data.size())) +
+            "\r\n";
 
   if (!extra_headers.empty()) {
     buffer += extra_headers;
diff --git a/talk/libjingle.gyp b/talk/libjingle.gyp
index 270a55d..9207d0e 100755
--- a/talk/libjingle.gyp
+++ b/talk/libjingle.gyp
@@ -679,17 +679,15 @@
             'base/winping.cc',
             'base/winping.h',
           ],
+          'link_settings': {
+            'libraries': [
+              '-lcrypt32.lib',
+              '-liphlpapi.lib',
+              '-lsecur32.lib',
+            ],
+          },
           # Suppress warnings about WIN32_LEAN_AND_MEAN.
           'msvs_disabled_warnings': [4005],
-          'msvs_settings': {
-            'VCLibrarianTool': {
-              'AdditionalDependencies': [
-                'crypt32.lib',
-                'iphlpapi.lib',
-                'secur32.lib',
-              ],
-            },
-          },
         }],
         ['os_posix==1', {
           'sources': [
diff --git a/talk/media/base/capturerenderadapter.cc b/talk/media/base/capturerenderadapter.cc
index 8fbf34e..d7b3995 100644
--- a/talk/media/base/capturerenderadapter.cc
+++ b/talk/media/base/capturerenderadapter.cc
@@ -39,11 +39,8 @@
 }
 
 CaptureRenderAdapter::~CaptureRenderAdapter() {
-  // Have to disconnect here since |video_capturer_| lives on past the
-  // destruction of this object.
-  if (video_capturer_) {
-    video_capturer_->SignalVideoFrame.disconnect(this);
-  }
+  // has_slots destructor will disconnect us from any signals we may be
+  // connected to.
 }
 
 CaptureRenderAdapter* CaptureRenderAdapter::Create(
@@ -111,7 +108,8 @@
     const bool new_resolution = iter->render_width != frame->GetWidth() ||
         iter->render_height != frame->GetHeight();
     if (new_resolution) {
-      if (iter->renderer->SetSize(frame->GetWidth(), frame->GetHeight(), 0)) {
+      if (iter->renderer->SetSize(static_cast<int>(frame->GetWidth()),
+                                  static_cast<int>(frame->GetHeight()), 0)) {
         iter->render_width = frame->GetWidth();
         iter->render_height = frame->GetHeight();
       } else {
diff --git a/talk/media/base/fakenetworkinterface.h b/talk/media/base/fakenetworkinterface.h
index 25016844..2fdd1d4 100644
--- a/talk/media/base/fakenetworkinterface.h
+++ b/talk/media/base/fakenetworkinterface.h
@@ -69,7 +69,7 @@
     talk_base::CritScope cs(&crit_);
     int bytes = 0;
     for (size_t i = 0; i < rtp_packets_.size(); ++i) {
-      bytes += rtp_packets_[i].length();
+      bytes += static_cast<int>(rtp_packets_[i].length());
     }
     return bytes;
   }
@@ -83,7 +83,7 @@
 
   int NumRtpPackets() {
     talk_base::CritScope cs(&crit_);
-    return rtp_packets_.size();
+    return static_cast<int>(rtp_packets_.size());
   }
 
   int NumRtpPackets(uint32 ssrc) {
@@ -95,7 +95,7 @@
 
   int NumSentSsrcs() {
     talk_base::CritScope cs(&crit_);
-    return sent_ssrcs_.size();
+    return static_cast<int>(sent_ssrcs_.size());
   }
 
   // Note: callers are responsible for deleting the returned buffer.
@@ -109,7 +109,7 @@
 
   int NumRtcpPackets() {
     talk_base::CritScope cs(&crit_);
-    return rtcp_packets_.size();
+    return static_cast<int>(rtcp_packets_.size());
   }
 
   // Note: callers are responsible for deleting the returned buffer.
@@ -218,7 +218,7 @@
       }
       if (ssrc == cur_ssrc) {
         if (bytes) {
-          *bytes += rtp_packets_[i].length();
+          *bytes += static_cast<int>(rtp_packets_[i].length());
         }
         if (packets) {
           ++(*packets);
diff --git a/talk/media/base/fakevideocapturer.h b/talk/media/base/fakevideocapturer.h
index 4f51b66..5a33265 100644
--- a/talk/media/base/fakevideocapturer.h
+++ b/talk/media/base/fakevideocapturer.h
@@ -84,7 +84,7 @@
     if (fourcc == cricket::FOURCC_ARGB) {
       size = width * 4 * height;
     } else if (fourcc == cricket::FOURCC_I420) {
-      size = cricket::VideoFrame::SizeOf(width, height);
+      size = static_cast<uint32>(cricket::VideoFrame::SizeOf(width, height));
     } else {
       return false;  // Unsupported FOURCC.
     }
diff --git a/talk/media/base/filemediaengine_unittest.cc b/talk/media/base/filemediaengine_unittest.cc
index a2c91a1..703fc11 100644
--- a/talk/media/base/filemediaengine_unittest.cc
+++ b/talk/media/base/filemediaengine_unittest.cc
@@ -163,7 +163,8 @@
     for (size_t i = 0; i < ssrc_count; ++i) {
       ret &= RtpTestUtility::WriteTestPackets(
           RtpTestUtility::GetTestPacketCount(), false,
-          RtpTestUtility::kDefaultSsrc + i, &writer);
+          static_cast<uint32>(RtpTestUtility::kDefaultSsrc + i),
+          &writer);
     }
     return ret;
   }
diff --git a/talk/media/base/hybridvideoengine.h b/talk/media/base/hybridvideoengine.h
index 1e43a30b7..ab638c9 100644
--- a/talk/media/base/hybridvideoengine.h
+++ b/talk/media/base/hybridvideoengine.h
@@ -225,18 +225,12 @@
   bool SetCaptureDevice(const Device* device) {
     return video2_.SetCaptureDevice(device);
   }
-  bool SetVideoCapturer(VideoCapturer* capturer) {
-    return video2_.SetVideoCapturer(capturer);
-  }
   VideoCapturer* GetVideoCapturer() const {
     return video2_.GetVideoCapturer();
   }
   bool SetLocalRenderer(VideoRenderer* renderer) {
     return video2_.SetLocalRenderer(renderer);
   }
-  bool SetCapture(bool capture) {
-    return video2_.SetCapture(capture);
-  }
   sigslot::repeater2<VideoCapturer*, CaptureState> SignalCaptureStateChange;
 
   virtual bool HasCodec1(const VideoCodec& codec) {
diff --git a/talk/media/base/mediachannel.h b/talk/media/base/mediachannel.h
index 441fbff..82b9ddb 100644
--- a/talk/media/base/mediachannel.h
+++ b/talk/media/base/mediachannel.h
@@ -234,6 +234,7 @@
   void SetAll(const VideoOptions& change) {
     adapt_input_to_encoder.SetFrom(change.adapt_input_to_encoder);
     adapt_input_to_cpu_usage.SetFrom(change.adapt_input_to_cpu_usage);
+    adapt_cpu_with_smoothing.SetFrom(change.adapt_cpu_with_smoothing);
     adapt_view_switch.SetFrom(change.adapt_view_switch);
     video_noise_reduction.SetFrom(change.video_noise_reduction);
     video_three_layers.SetFrom(change.video_three_layers);
@@ -256,6 +257,7 @@
   bool operator==(const VideoOptions& o) const {
     return adapt_input_to_encoder == o.adapt_input_to_encoder &&
         adapt_input_to_cpu_usage == o.adapt_input_to_cpu_usage &&
+        adapt_cpu_with_smoothing == o.adapt_cpu_with_smoothing &&
         adapt_view_switch == o.adapt_view_switch &&
         video_noise_reduction == o.video_noise_reduction &&
         video_three_layers == o.video_three_layers &&
@@ -279,6 +281,7 @@
     ost << "VideoOptions {";
     ost << ToStringIfSet("encoder adaption", adapt_input_to_encoder);
     ost << ToStringIfSet("cpu adaption", adapt_input_to_cpu_usage);
+    ost << ToStringIfSet("cpu adaptation smoothing", adapt_cpu_with_smoothing);
     ost << ToStringIfSet("adapt view switch", adapt_view_switch);
     ost << ToStringIfSet("noise reduction", video_noise_reduction);
     ost << ToStringIfSet("3 layers", video_three_layers);
@@ -303,6 +306,8 @@
   Settable<bool> adapt_input_to_encoder;
   // Enable CPU adaptation?
   Settable<bool> adapt_input_to_cpu_usage;
+  // Enable CPU adaptation smoothing?
+  Settable<bool> adapt_cpu_with_smoothing;
   // Enable Adapt View Switch?
   Settable<bool> adapt_view_switch;
   // Enable denoising?
diff --git a/talk/media/base/mediaengine.h b/talk/media/base/mediaengine.h
index 5cfcb4d..7a1244d 100644
--- a/talk/media/base/mediaengine.h
+++ b/talk/media/base/mediaengine.h
@@ -125,10 +125,6 @@
   // TODO(tschmelcher): Add method for selecting the soundclip device.
   virtual bool SetSoundDevices(const Device* in_device,
                                const Device* out_device) = 0;
-  // Sets the externally provided video capturer. The ssrc is the ssrc of the
-  // (video) stream for which the video capturer should be set.
-  virtual bool SetVideoCapturer(VideoCapturer* capturer) = 0;
-  virtual VideoCapturer* GetVideoCapturer() const = 0;
 
   // Device configuration
   // Gets the current speaker volume, as a value between 0 and 255.
@@ -145,8 +141,6 @@
   virtual bool SetLocalMonitor(bool enable) = 0;
   // Installs a callback for raw frames from the local camera.
   virtual bool SetLocalRenderer(VideoRenderer* renderer) = 0;
-  // Starts/stops local camera.
-  virtual bool SetVideoCapture(bool capture) = 0;
 
   virtual const std::vector<AudioCodec>& audio_codecs() = 0;
   virtual const std::vector<RtpHeaderExtension>&
@@ -233,12 +227,6 @@
                                const Device* out_device) {
     return voice_.SetDevices(in_device, out_device);
   }
-  virtual bool SetVideoCapturer(VideoCapturer* capturer) {
-    return video_.SetVideoCapturer(capturer);
-  }
-  virtual VideoCapturer* GetVideoCapturer() const {
-    return video_.GetVideoCapturer();
-  }
 
   virtual bool GetOutputVolume(int* level) {
     return voice_.GetOutputVolume(level);
@@ -256,9 +244,6 @@
   virtual bool SetLocalRenderer(VideoRenderer* renderer) {
     return video_.SetLocalRenderer(renderer);
   }
-  virtual bool SetVideoCapture(bool capture) {
-    return video_.SetCapture(capture);
-  }
 
   virtual const std::vector<AudioCodec>& audio_codecs() {
     return voice_.codecs();
@@ -364,15 +349,12 @@
     return true;
   }
   bool SetLocalRenderer(VideoRenderer* renderer) { return true; }
-  bool SetCapture(bool capture) { return true;  }
   const std::vector<VideoCodec>& codecs() { return codecs_; }
   const std::vector<RtpHeaderExtension>& rtp_header_extensions() {
     return rtp_header_extensions_;
   }
   void SetLogging(int min_sev, const char* filter) {}
   VideoFormat GetStartCaptureFormat() const { return VideoFormat(); }
-  bool SetVideoCapturer(VideoCapturer* capturer) { return true; }
-  VideoCapturer* GetVideoCapturer() const { return NULL; }
 
   sigslot::signal2<VideoCapturer*, CaptureState> SignalCaptureStateChange;
  private:
diff --git a/talk/media/base/testutils.cc b/talk/media/base/testutils.cc
index a5e2df9..3edb5c7 100644
--- a/talk/media/base/testutils.cc
+++ b/talk/media/base/testutils.cc
@@ -186,9 +186,10 @@
       result &= rtp_packet.ReadFromByteBuffer(&buf);
       result &= rtp_packet.SameExceptSeqNumTimestampSsrc(
           kTestRawRtpPackets[index],
-          kTestRawRtpPackets[index].sequence_number +
-              loop * GetTestPacketCount(),
-          kTestRawRtpPackets[index].timestamp + loop * kRtpTimestampIncrease,
+          static_cast<uint16>(kTestRawRtpPackets[index].sequence_number +
+                              loop * GetTestPacketCount()),
+          static_cast<uint32>(kTestRawRtpPackets[index].timestamp +
+                              loop * kRtpTimestampIncrease),
           ssrc);
     }
   }
diff --git a/talk/media/base/videoadapter.cc b/talk/media/base/videoadapter.cc
index 1e5918a..cef4248 100644
--- a/talk/media/base/videoadapter.cc
+++ b/talk/media/base/videoadapter.cc
@@ -27,16 +27,23 @@
 
 #include <limits.h>  // For INT_MAX
 
-#include "talk/media/base/constants.h"
 #include "talk/base/logging.h"
 #include "talk/base/timeutils.h"
+#include "talk/media/base/constants.h"
 #include "talk/media/base/videoframe.h"
 
 namespace cricket {
 
 // TODO(fbarchard): Make downgrades settable
 static const int kMaxCpuDowngrades = 2;  // Downgrade at most 2 times for CPU.
-static const int kDefaultDowngradeWaitTimeMs = 2000;
+// The number of milliseconds of data to require before acting on cpu sampling
+// information.
+static const size_t kCpuLoadMinSampleTime = 5000;
+// The amount of weight to give to each new cpu load sample. The lower the
+// value, the slower we'll adapt to changing cpu conditions.
+static const float kCpuLoadWeightCoefficient = 0.4f;
+// The seed value for the cpu load moving average.
+static const float kCpuLoadInitialAverage = 0.5f;
 
 // TODO(fbarchard): Consider making scale factor table settable, to allow
 // application to select quality vs performance tradeoff.
@@ -150,8 +157,8 @@
 
 void VideoAdapter::SetInputFormat(const VideoFrame& in_frame) {
   talk_base::CritScope cs(&critical_section_);
-  input_format_.width = in_frame.GetWidth();
-  input_format_.height = in_frame.GetHeight();
+  input_format_.width = static_cast<int>(in_frame.GetWidth());
+  input_format_.height = static_cast<int>(in_frame.GetHeight());
 }
 
 void VideoAdapter::SetInputFormat(const VideoFormat& format) {
@@ -230,9 +237,10 @@
   }
 
   if (output_num_pixels_) {
-    float scale = VideoAdapter::FindClosestScale(in_frame->GetWidth(),
-                                                 in_frame->GetHeight(),
-                                                 output_num_pixels_);
+    float scale = VideoAdapter::FindClosestScale(
+        static_cast<int>(in_frame->GetWidth()),
+        static_cast<int>(in_frame->GetHeight()),
+        output_num_pixels_);
     output_format_.width = static_cast<int>(in_frame->GetWidth() * scale + .5f);
     output_format_.height = static_cast<int>(in_frame->GetHeight() * scale +
                                              .5f);
@@ -291,11 +299,12 @@
 // Implementation of CoordinatedVideoAdapter
 CoordinatedVideoAdapter::CoordinatedVideoAdapter()
     : cpu_adaptation_(false),
+      cpu_smoothing_(false),
       gd_adaptation_(true),
       view_adaptation_(true),
       view_switch_(false),
       cpu_downgrade_count_(0),
-      cpu_downgrade_wait_time_(0),
+      cpu_adapt_wait_time_(0),
       high_system_threshold_(kHighSystemCpuThreshold),
       low_system_threshold_(kLowSystemCpuThreshold),
       process_threshold_(kProcessCpuThreshold),
@@ -303,7 +312,8 @@
       view_desired_interval_(0),
       encoder_desired_num_pixels_(INT_MAX),
       cpu_desired_num_pixels_(INT_MAX),
-      adapt_reason_(0) {
+      adapt_reason_(0),
+      system_load_average_(kCpuLoadInitialAverage) {
 }
 
 // Helper function to UPGRADE or DOWNGRADE a number of pixels
@@ -406,28 +416,40 @@
   if (!cpu_adaptation_) {
     return;
   }
+  // Update the moving average of system load. Even if we aren't smoothing,
+  // we'll still calculate this information, in case smoothing is later enabled.
+  system_load_average_ = kCpuLoadWeightCoefficient * system_load +
+      (1.0f - kCpuLoadWeightCoefficient) * system_load_average_;
+  if (cpu_smoothing_) {
+    system_load = system_load_average_;
+  }
+  // If we haven't started taking samples yet, wait until we have at least
+  // the correct number of samples per the wait time.
+  if (cpu_adapt_wait_time_ == 0) {
+    cpu_adapt_wait_time_ = talk_base::TimeAfter(kCpuLoadMinSampleTime);
+  }
   AdaptRequest request = FindCpuRequest(current_cpus, max_cpus,
                                         process_load, system_load);
+  // Make sure we're not adapting too quickly.
+  if (request != KEEP) {
+    if (talk_base::TimeIsLater(talk_base::Time(),
+                               cpu_adapt_wait_time_)) {
+      LOG(LS_VERBOSE) << "VAdapt CPU load high/low but do not adapt until "
+                      << talk_base::TimeUntil(cpu_adapt_wait_time_) << " ms";
+      request = KEEP;
+    }
+  }
+
   // Update how many times we have downgraded due to the cpu load.
   switch (request) {
     case DOWNGRADE:
+      // Ignore downgrades if we have downgraded the maximum times.
       if (cpu_downgrade_count_ < kMaxCpuDowngrades) {
-        // Ignore downgrades if we have downgraded the maximum times or we just
-        // downgraded in a short time.
-        if (cpu_downgrade_wait_time_ != 0 &&
-            talk_base::TimeIsLater(talk_base::Time(),
-                                   cpu_downgrade_wait_time_)) {
-          LOG(LS_VERBOSE) << "VAdapt CPU load high but do not downgrade until "
-                          << talk_base::TimeUntil(cpu_downgrade_wait_time_)
-                          << " ms.";
-          request = KEEP;
-        } else {
-          ++cpu_downgrade_count_;
-        }
+        ++cpu_downgrade_count_;
       } else {
-          LOG(LS_VERBOSE) << "VAdapt CPU load high but do not downgrade "
-                             "because maximum downgrades reached";
-          SignalCpuAdaptationUnable();
+        LOG(LS_VERBOSE) << "VAdapt CPU load high but do not downgrade "
+                           "because maximum downgrades reached";
+        SignalCpuAdaptationUnable();
       }
       break;
     case UPGRADE:
@@ -517,9 +539,6 @@
   if (cpu_adaptation_ && cpu_desired_num_pixels_ &&
       (cpu_desired_num_pixels_ < min_num_pixels)) {
     min_num_pixels = cpu_desired_num_pixels_;
-    // Update the cpu_downgrade_wait_time_ if we are going to downgrade video.
-    cpu_downgrade_wait_time_ =
-      talk_base::TimeAfter(kDefaultDowngradeWaitTimeMs);
   }
 
   // Determine which factors are keeping adapter resolution low.
@@ -582,6 +601,14 @@
                   << "x" << new_output.height
                   << " Changed: " << (changed ? "true" : "false")
                   << " Reason: " << kReasons[adapt_reason_];
+
+  if (changed) {
+    // When any adaptation occurs, historic CPU load levels are no longer
+    // accurate. Clear out our state so we can re-learn at the new normal.
+    cpu_adapt_wait_time_ = talk_base::TimeAfter(kCpuLoadMinSampleTime);
+    system_load_average_ = kCpuLoadInitialAverage;
+  }
+
   return changed;
 }
 
diff --git a/talk/media/base/videoadapter.h b/talk/media/base/videoadapter.h
index 14829ab..c41ac6e 100644
--- a/talk/media/base/videoadapter.h
+++ b/talk/media/base/videoadapter.h
@@ -105,6 +105,15 @@
   // Enable or disable video adaptation due to the change of the CPU load.
   void set_cpu_adaptation(bool enable) { cpu_adaptation_ = enable; }
   bool cpu_adaptation() const { return cpu_adaptation_; }
+  // Enable or disable smoothing when doing CPU adaptation. When smoothing is
+  // enabled, system CPU load is tracked using an exponential weighted
+  // average.
+  void set_cpu_smoothing(bool enable) {
+    LOG(LS_INFO) << "CPU smoothing is now "
+                 << (enable ? "enabled" : "disabled");
+    cpu_smoothing_ = enable;
+  }
+  bool cpu_smoothing() const { return cpu_smoothing_; }
   // Enable or disable video adaptation due to the change of the GD
   void set_gd_adaptation(bool enable) { gd_adaptation_ = enable; }
   bool gd_adaptation() const { return gd_adaptation_; }
@@ -121,12 +130,12 @@
 
   // When the video is decreased, set the waiting time for CPU adaptation to
   // decrease video again.
-  void set_cpu_downgrade_wait_time(uint32 cpu_downgrade_wait_time) {
-    if (cpu_downgrade_wait_time_ != static_cast<int>(cpu_downgrade_wait_time)) {
-      LOG(LS_INFO) << "VAdapt Change Cpu Downgrade Wait Time from: "
-                   << cpu_downgrade_wait_time_ << " to "
-                   << cpu_downgrade_wait_time;
-      cpu_downgrade_wait_time_ = static_cast<int>(cpu_downgrade_wait_time);
+  void set_cpu_adapt_wait_time(uint32 cpu_adapt_wait_time) {
+    if (cpu_adapt_wait_time_ != static_cast<int>(cpu_adapt_wait_time)) {
+      LOG(LS_INFO) << "VAdapt Change Cpu Adapt Wait Time from: "
+                   << cpu_adapt_wait_time_ << " to "
+                   << cpu_adapt_wait_time;
+      cpu_adapt_wait_time_ = static_cast<int>(cpu_adapt_wait_time);
     }
   }
   // CPU system load high threshold for reducing resolution.  e.g. 0.85f
@@ -175,7 +184,7 @@
 
  private:
   // Adapt to the minimum of the formats the server requests, the CPU wants, and
-  // the encoder wants.  Returns true if resolution changed.
+  // the encoder wants. Returns true if resolution changed.
   bool AdaptToMinimumFormat(int* new_width, int* new_height);
   bool IsMinimumFormat(int pixels);
   void StepPixelCount(CoordinatedVideoAdapter::AdaptRequest request,
@@ -185,11 +194,12 @@
     float process_load, float system_load);
 
   bool cpu_adaptation_;  // True if cpu adaptation is enabled.
+  bool cpu_smoothing_;  // True if cpu smoothing is enabled (with adaptation).
   bool gd_adaptation_;  // True if gd adaptation is enabled.
   bool view_adaptation_;  // True if view adaptation is enabled.
   bool view_switch_;  // True if view switch is enabled.
   int cpu_downgrade_count_;
-  int cpu_downgrade_wait_time_;
+  int cpu_adapt_wait_time_;
   // cpu system load thresholds relative to max cpus.
   float high_system_threshold_;
   float low_system_threshold_;
@@ -205,6 +215,10 @@
   // The critical section to protect handling requests.
   talk_base::CriticalSection request_critical_section_;
 
+  // The weighted average of cpu load over time. It's always updated (if cpu
+  // adaptation is on), but only used if cpu_smoothing_ is set.
+  float system_load_average_;
+
   DISALLOW_COPY_AND_ASSIGN(CoordinatedVideoAdapter);
 };
 
diff --git a/talk/media/base/videoengine_unittest.h b/talk/media/base/videoengine_unittest.h
index dcef4ac..0679f6a 100644
--- a/talk/media/base/videoengine_unittest.h
+++ b/talk/media/base/videoengine_unittest.h
@@ -76,7 +76,7 @@
 }
 
 inline int TimeBetweenSend(const cricket::VideoCodec& codec) {
-  return static_cast<int> (
+  return static_cast<int>(
       cricket::VideoFormat::FpsToInterval(codec.framerate) /
       talk_base::kNumNanosecsPerMillisec);
 }
@@ -95,12 +95,12 @@
   }
   bool is_camera_on() const { return T::GetVideoCapturer()->IsRunning(); }
   void set_has_senders(bool has_senders) {
+    cricket::VideoCapturer* video_capturer = T::GetVideoCapturer();
     if (has_senders) {
-      this->RegisterSender(this,
-                           &VideoEngineOverride<T>::OnLocalFrame,
-                           &VideoEngineOverride<T>::OnLocalFrameFormat);
+      video_capturer->SignalVideoFrame.connect(this,
+          &VideoEngineOverride<T>::OnLocalFrame);
     } else {
-      this->UnregisterSender(this);
+      video_capturer->SignalVideoFrame.disconnect(this);
     }
   }
   void OnLocalFrame(cricket::VideoCapturer*,
@@ -164,39 +164,6 @@
   }
 #endif
 
-  // Tests starting and stopping the capturer.
-  void SetCapture() {
-    EXPECT_FALSE(engine_.GetVideoCapturer());
-    EXPECT_TRUE(engine_.Init(talk_base::Thread::Current()));
-    ResetCapturer();
-    EXPECT_TRUE(engine_.GetVideoCapturer() != NULL);
-    EXPECT_FALSE(engine_.is_camera_on());
-    EXPECT_TRUE(engine_.SetCapture(true));
-    EXPECT_TRUE(engine_.is_camera_on());
-    EXPECT_TRUE(engine_.SetCapture(false));
-    EXPECT_FALSE(engine_.is_camera_on());
-    engine_.set_has_senders(true);
-    EXPECT_TRUE(engine_.is_camera_on());
-    EXPECT_TRUE(engine_.SetCapture(true));
-    EXPECT_TRUE(engine_.is_camera_on());
-    EXPECT_TRUE(engine_.SetCapture(false));
-    EXPECT_TRUE(engine_.is_camera_on());
-    engine_.set_has_senders(false);
-    EXPECT_FALSE(engine_.is_camera_on());
-    EXPECT_TRUE(engine_.SetCapture(true));
-    EXPECT_TRUE(engine_.is_camera_on());
-    EXPECT_TRUE(engine_.SetCapture(false));
-    EXPECT_FALSE(engine_.is_camera_on());
-    EXPECT_TRUE(engine_.SetVideoCapturer(NULL));
-    EXPECT_TRUE(engine_.GetVideoCapturer() == NULL);
-    engine_.Terminate();
-  }
-  void ResetCapturer() {
-    cricket::Device device("test", "device");
-    video_capturer_.reset(new cricket::FakeVideoCapturer);
-    EXPECT_TRUE(engine_.SetVideoCapturer(video_capturer_.get()));
-  }
-
   void ConstrainNewCodecBody() {
     cricket::VideoCodec empty, in, out;
     cricket::VideoCodec max_settings(engine_.codecs()[0].id,
@@ -482,9 +449,6 @@
   virtual void SetUp() {
     cricket::Device device("test", "device");
     EXPECT_TRUE(engine_.Init(talk_base::Thread::Current()));
-    video_capturer_.reset(new cricket::FakeVideoCapturer);
-    EXPECT_TRUE(video_capturer_.get() != NULL);
-    EXPECT_TRUE(engine_.SetVideoCapturer(video_capturer_.get()));
     channel_.reset(engine_.CreateChannel(NULL));
     EXPECT_TRUE(channel_.get() != NULL);
     ConnectVideoChannelError();
@@ -494,19 +458,34 @@
     media_error_ = cricket::VideoMediaChannel::ERROR_NONE;
     channel_->SetRecvCodecs(engine_.codecs());
     EXPECT_TRUE(channel_->AddSendStream(DefaultSendStreamParams()));
+
+    video_capturer_.reset(new cricket::FakeVideoCapturer);
+    cricket::VideoFormat format(640, 480,
+                                cricket::VideoFormat::FpsToInterval(30),
+                                cricket::FOURCC_I420);
+    EXPECT_EQ(cricket::CS_RUNNING, video_capturer_->Start(format));
+    EXPECT_TRUE(channel_->SetCapturer(kSsrc, video_capturer_.get()));
   }
   void SetUpSecondStream() {
     EXPECT_TRUE(channel_->AddRecvStream(
         cricket::StreamParams::CreateLegacy(kSsrc)));
     EXPECT_TRUE(channel_->AddRecvStream(
-        cricket::StreamParams::CreateLegacy(kSsrc+2)));
+        cricket::StreamParams::CreateLegacy(kSsrc + 2)));
     // SetUp() already added kSsrc make sure duplicate SSRCs cant be added.
     EXPECT_FALSE(channel_->AddSendStream(
         cricket::StreamParams::CreateLegacy(kSsrc)));
     EXPECT_TRUE(channel_->AddSendStream(
-        cricket::StreamParams::CreateLegacy(kSsrc+2)));
+        cricket::StreamParams::CreateLegacy(kSsrc + 2)));
+
+    video_capturer_2_.reset(new cricket::FakeVideoCapturer());
+    cricket::VideoFormat format(640, 480,
+                                cricket::VideoFormat::FpsToInterval(30),
+                                cricket::FOURCC_I420);
+    EXPECT_EQ(cricket::CS_RUNNING, video_capturer_2_->Start(format));
+
+    EXPECT_TRUE(channel_->SetCapturer(kSsrc + 2, video_capturer_2_.get()));
     // Make the second renderer available for use by a new stream.
-    EXPECT_TRUE(channel_->SetRenderer(kSsrc+2, &renderer2_));
+    EXPECT_TRUE(channel_->SetRenderer(kSsrc + 2, &renderer2_));
   }
   virtual void TearDown() {
     channel_.reset();
@@ -529,6 +508,19 @@
   bool SetOneCodec(const cricket::VideoCodec& codec) {
     std::vector<cricket::VideoCodec> codecs;
     codecs.push_back(codec);
+
+    cricket::VideoFormat capture_format(codec.width, codec.height,
+        cricket::VideoFormat::FpsToInterval(codec.framerate),
+        cricket::FOURCC_I420);
+
+    if (video_capturer_) {
+      EXPECT_EQ(cricket::CS_RUNNING, video_capturer_->Start(capture_format));
+    }
+
+    if (video_capturer_2_) {
+      EXPECT_EQ(cricket::CS_RUNNING, video_capturer_2_->Start(capture_format));
+    }
+
     bool sending = channel_->sending();
     bool success = SetSend(false);
     if (success)
@@ -550,6 +542,9 @@
     return NumRtpPackets();
   }
   bool SendFrame() {
+    if (video_capturer_2_) {
+      video_capturer_2_->CaptureFrame();
+    }
     return video_capturer_.get() &&
         video_capturer_->CaptureFrame();
   }
@@ -705,6 +700,7 @@
   // Test that SetSend works.
   void SetSend() {
     EXPECT_FALSE(channel_->sending());
+    EXPECT_TRUE(channel_->SetCapturer(kSsrc, video_capturer_.get()));
     EXPECT_TRUE(SetOneCodec(DefaultCodec()));
     EXPECT_FALSE(channel_->sending());
     EXPECT_TRUE(SetSend(true));
@@ -877,6 +873,7 @@
     EXPECT_TRUE(channel_->SetOptions(vmo));
     EXPECT_TRUE(channel_->AddRecvStream(
         cricket::StreamParams::CreateLegacy(1234)));
+    channel_->UpdateAspectRatio(640, 400);
     EXPECT_TRUE(SetSend(true));
     EXPECT_TRUE(channel_->SetRender(true));
     EXPECT_TRUE(SendFrame());
@@ -948,6 +945,7 @@
     EXPECT_TRUE(SetDefaultCodec());
     EXPECT_TRUE(channel_->AddSendStream(
         cricket::StreamParams::CreateLegacy(999)));
+    EXPECT_TRUE(channel_->SetCapturer(999u, video_capturer_.get()));
     EXPECT_TRUE(SetSend(true));
     EXPECT_TRUE(WaitAndSendFrame(0));
     EXPECT_TRUE_WAIT(NumRtpPackets() > 0, kTimeout);
@@ -1012,6 +1010,7 @@
 
     EXPECT_TRUE(channel_->AddSendStream(
         cricket::StreamParams::CreateLegacy(789u)));
+    EXPECT_TRUE(channel_->SetCapturer(789u, video_capturer_.get()));
     EXPECT_EQ(rtp_packets, NumRtpPackets());
     // Wait 30ms to guarantee the engine does not drop the frame.
     EXPECT_TRUE(WaitAndSendFrame(30));
@@ -1236,13 +1235,25 @@
       EXPECT_TRUE(capturer->CaptureCustomFrame(format.width, format.height,
                                                cricket::FOURCC_I420));
       ++captured_frames;
-      EXPECT_FRAME_WAIT(captured_frames, format.width, format.height, kTimeout);
+      // Wait until frame of right size is captured.
+      EXPECT_TRUE_WAIT(renderer_.num_rendered_frames() >= captured_frames &&
+                       format.width == renderer_.width() &&
+                       format.height == renderer_.height(), kTimeout);
+      EXPECT_GE(renderer_.num_rendered_frames(), captured_frames);
+      EXPECT_EQ(format.width, renderer_.width());
+      EXPECT_EQ(format.height, renderer_.height());
+      captured_frames = renderer_.num_rendered_frames() + 1;
       EXPECT_FALSE(renderer_.black_frame());
       EXPECT_TRUE(channel_->SetCapturer(kSsrc, NULL));
-      // Make sure a black frame is generated as no new frame is captured.
-      // A black frame should be the resolution of the send codec.
-      ++captured_frames;
-      EXPECT_FRAME_WAIT(captured_frames, codec.width, codec.height, kTimeout);
+      // Make sure a black frame is generated within the specified timeout.
+      // The black frame should be the resolution of the send codec.
+      EXPECT_TRUE_WAIT(renderer_.num_rendered_frames() >= captured_frames &&
+                       codec.width == renderer_.width() &&
+                       codec.height == renderer_.height() &&
+                       renderer_.black_frame(), kTimeout);
+      EXPECT_GE(renderer_.num_rendered_frames(), captured_frames);
+      EXPECT_EQ(codec.width, renderer_.width());
+      EXPECT_EQ(codec.height, renderer_.height());
       EXPECT_TRUE(renderer_.black_frame());
 
       // The black frame has the same timestamp as the next frame since it's
@@ -1263,13 +1274,18 @@
     EXPECT_EQ(0, renderer_.num_rendered_frames());
     EXPECT_TRUE(SendFrame());
     EXPECT_FRAME_WAIT(1, 640, 400, kTimeout);
+    // Remove the capturer.
+    EXPECT_TRUE(channel_->SetCapturer(kSsrc, NULL));
     // No capturer was added, so this RemoveCapturer should
     // fail.
     EXPECT_FALSE(channel_->SetCapturer(kSsrc, NULL));
-    // Wait for kTimeout, to make sure no frames are sent
-    WAIT(renderer_.num_rendered_frames() != 1, kTimeout);
-    // Still a single frame, from the original SendFrame() call.
-    EXPECT_EQ(1, renderer_.num_rendered_frames());
+    // Wait for frames to stop flowing.
+    talk_base::Thread::Current()->ProcessMessages(300);
+    int num_frames = renderer_.num_rendered_frames();
+    // Wait to make sure no more frames are sent
+    WAIT(renderer_.num_rendered_frames() != num_frames, 300);
+    // Verify no more frames were sent.
+    EXPECT_EQ(num_frames, renderer_.num_rendered_frames());
   }
 
   // Tests that we can add and remove capturer as unique sources.
@@ -1328,6 +1344,9 @@
     // Capture a frame with additional capturer2, frames should be received
     EXPECT_TRUE(capturer2->CaptureCustomFrame(1024, 768, cricket::FOURCC_I420));
     EXPECT_FRAME_ON_RENDERER_WAIT(renderer2, 1, 1024, 768, kTimeout);
+    // Successfully remove the capturer.
+    EXPECT_TRUE(channel_->SetCapturer(kSsrc, NULL));
+    // Fail to re-remove the capturer.
     EXPECT_FALSE(channel_->SetCapturer(kSsrc, NULL));
     // The capturers must be unregistered here as it runs out of it's scope
     // next.
@@ -1372,8 +1391,9 @@
     EXPECT_TRUE(capturer->CaptureCustomFrame(kWidth, kHeight,
                                              cricket::FOURCC_ARGB));
     EXPECT_TRUE(capturer->CaptureFrame());
-    EXPECT_FRAME_ON_RENDERER_WAIT(renderer, 2, kScaledWidth, kScaledHeight,
-                                  kTimeout);
+    EXPECT_EQ_WAIT(2, renderer.num_rendered_frames(), kTimeout);
+    EXPECT_TRUE_WAIT(kScaledWidth == renderer.width() &&
+                     kScaledHeight == renderer.height(), kTimeout);
     EXPECT_TRUE(channel_->SetCapturer(kSsrc, NULL));
   }
 
@@ -1626,12 +1646,15 @@
     EXPECT_FALSE(channel_->AddRecvStream(
         cricket::StreamParams::CreateLegacy(kSsrc)));
 
+    EXPECT_TRUE(channel_->SetCapturer(kSsrc, video_capturer_.get()));
+
     SendAndReceive(codec);
     EXPECT_TRUE(channel_->RemoveSendStream(0));
   }
 
   VideoEngineOverride<E> engine_;
   talk_base::scoped_ptr<cricket::FakeVideoCapturer> video_capturer_;
+  talk_base::scoped_ptr<cricket::FakeVideoCapturer> video_capturer_2_;
   talk_base::scoped_ptr<C> channel_;
   cricket::FakeNetworkInterface network_interface_;
   cricket::FakeVideoRenderer renderer_;
diff --git a/talk/media/base/videoframe.cc b/talk/media/base/videoframe.cc
index f5b0b6d..7a82305 100644
--- a/talk/media/base/videoframe.cc
+++ b/talk/media/base/videoframe.cc
@@ -87,8 +87,8 @@
     uint8* dst_y, uint8* dst_u, uint8* dst_v,
     int32 dst_pitch_y, int32 dst_pitch_u, int32 dst_pitch_v) const {
 #if !defined(DISABLE_YUV)
-  int32 src_width = GetWidth();
-  int32 src_height = GetHeight();
+  int32 src_width = static_cast<int>(GetWidth());
+  int32 src_height = static_cast<int>(GetHeight());
   return libyuv::I420Copy(GetYPlane(), GetYPitch(),
                           GetUPlane(), GetUPitch(),
                           GetVPlane(), GetVPitch(),
@@ -147,7 +147,8 @@
     } else if (src_width * height < src_height * width) {
       // Reduce the input height.
       src_height = src_width * height / width;
-      int32 iheight_offset = (GetHeight() - src_height) >> 2;
+      int32 iheight_offset = static_cast<int32>(
+          (GetHeight() - src_height) >> 2);
       iheight_offset <<= 1;  // Ensure that iheight_offset is even.
       src_y += iheight_offset * GetYPitch();
       src_u += iheight_offset / 2 * GetUPitch();
@@ -160,9 +161,9 @@
   // Scale to the output I420 frame.
   libyuv::Scale(src_y, src_u, src_v,
                 GetYPitch(), GetUPitch(), GetVPitch(),
-                src_width, src_height,
+                static_cast<int>(src_width), static_cast<int>(src_height),
                 dst_y, dst_u, dst_v, dst_pitch_y, dst_pitch_u, dst_pitch_v,
-                width, height, interpolate);
+                static_cast<int>(width), static_cast<int>(height), interpolate);
 #endif
 }
 
@@ -180,7 +181,9 @@
     uint8* dst_u = dst_y + dst_width * dst_height;
     uint8* dst_v = dst_u + ((dst_width + 1) >> 1) * ((dst_height + 1) >> 1);
     StretchToPlanes(dst_y, dst_u, dst_v,
-                    dst_width, (dst_width + 1) >> 1, (dst_width + 1) >> 1,
+                    static_cast<int32>(dst_width),
+                    static_cast<int32>((dst_width + 1) >> 1),
+                    static_cast<int32>((dst_width + 1) >> 1),
                     dst_width, dst_height, interpolate, vert_crop);
   }
   return needed;
@@ -203,7 +206,8 @@
 
 VideoFrame* VideoFrame::Stretch(size_t dst_width, size_t dst_height,
                                 bool interpolate, bool vert_crop) const {
-  VideoFrame* dest = CreateEmptyFrame(dst_width, dst_height,
+  VideoFrame* dest = CreateEmptyFrame(static_cast<int>(dst_width),
+                                      static_cast<int>(dst_height),
                                       GetPixelWidth(), GetPixelHeight(),
                                       GetElapsedTime(), GetTimeStamp());
   if (dest) {
@@ -217,7 +221,9 @@
   return libyuv::I420Rect(GetYPlane(), GetYPitch(),
                           GetUPlane(), GetUPitch(),
                           GetVPlane(), GetVPitch(),
-                          0, 0, GetWidth(), GetHeight(),
+                          0, 0,
+                          static_cast<int>(GetWidth()),
+                          static_cast<int>(GetHeight()),
                           16, 128, 128) == 0;
 #else
   int uv_size = GetUPitch() * GetChromaHeight();
diff --git a/talk/media/devices/fakedevicemanager.h b/talk/media/devices/fakedevicemanager.h
index 7000ef9..0dbed43 100644
--- a/talk/media/devices/fakedevicemanager.h
+++ b/talk/media/devices/fakedevicemanager.h
@@ -152,21 +152,24 @@
   void SetAudioInputDevices(const std::vector<std::string>& devices) {
     input_devices_.clear();
     for (size_t i = 0; i < devices.size(); ++i) {
-      input_devices_.push_back(Device(devices[i], i));
+      input_devices_.push_back(Device(devices[i],
+                                      static_cast<int>(i)));
     }
     SignalDevicesChange();
   }
   void SetAudioOutputDevices(const std::vector<std::string>& devices) {
     output_devices_.clear();
     for (size_t i = 0; i < devices.size(); ++i) {
-      output_devices_.push_back(Device(devices[i], i));
+      output_devices_.push_back(Device(devices[i],
+                                       static_cast<int>(i)));
     }
     SignalDevicesChange();
   }
   void SetVideoCaptureDevices(const std::vector<std::string>& devices) {
     vidcap_devices_.clear();
     for (size_t i = 0; i < devices.size(); ++i) {
-      vidcap_devices_.push_back(Device(devices[i], i));
+      vidcap_devices_.push_back(Device(devices[i],
+                                       static_cast<int>(i)));
     }
     SignalDevicesChange();
   }
diff --git a/talk/media/devices/filevideocapturer.cc b/talk/media/devices/filevideocapturer.cc
index 8946fea..1747816 100644
--- a/talk/media/devices/filevideocapturer.cc
+++ b/talk/media/devices/filevideocapturer.cc
@@ -157,7 +157,7 @@
 
 FileVideoCapturer::~FileVideoCapturer() {
   Stop();
-  delete[] static_cast<char*> (captured_frame_.data);
+  delete[] static_cast<char*>(captured_frame_.data);
 }
 
 bool FileVideoCapturer::Init(const Device& device) {
@@ -330,7 +330,7 @@
   // 2.2 Reallocate memory for the frame data if necessary.
   if (frame_buffer_size_ < captured_frame_.data_size) {
     frame_buffer_size_ = captured_frame_.data_size;
-    delete[] static_cast<char*> (captured_frame_.data);
+    delete[] static_cast<char*>(captured_frame_.data);
     captured_frame_.data = new char[frame_buffer_size_];
   }
   // 2.3 Read the frame adata.
diff --git a/talk/media/sctp/sctpdataengine.cc b/talk/media/sctp/sctpdataengine.cc
index cb70182..935ccb8 100644
--- a/talk/media/sctp/sctpdataengine.cc
+++ b/talk/media/sctp/sctpdataengine.cc
@@ -27,7 +27,6 @@
 
 #include "talk/media/sctp/sctpdataengine.h"
 
-#include <errno.h>
 #include <stdarg.h>
 #include <stdio.h>
 #include <vector>
@@ -40,14 +39,6 @@
 #include "talk/media/base/streamparams.h"
 #include "usrsctplib/usrsctp.h"
 
-#ifdef _WIN32
-// EINPROGRESS gets #defined to WSAEINPROGRESS in some headers above, which
-// is not 112.  112 is the value defined in <errno.h>.  usrsctp uses 112 for
-// EINPROGRESS.
-#undef EINPROGRESS
-#define EINPROGRESS (112)
-#endif
-
 namespace cricket {
 
 // This is the SCTP port to use. It is passed along the wire and the listener
@@ -343,8 +334,9 @@
   sockaddr_conn remote_sconn = GetSctpSockAddr(remote_port_);
   int connect_result = usrsctp_connect(
       sock_, reinterpret_cast<sockaddr *>(&remote_sconn), sizeof(remote_sconn));
-  if (connect_result < 0 && errno != EINPROGRESS) {
-    LOG_ERRNO(LS_ERROR) << debug_name_ << "Failed usrsctp_connect";
+  if (connect_result < 0 && errno != SCTP_EINPROGRESS) {
+    LOG_ERRNO(LS_ERROR) << debug_name_ << "Failed usrsctp_connect. got errno="
+                        << errno << ", but wanted " << SCTP_EINPROGRESS;
     CloseSctpSocket();
     return false;
   }
diff --git a/talk/media/sctp/sctpdataengine.h b/talk/media/sctp/sctpdataengine.h
index 9f95666..b0d44d3 100644
--- a/talk/media/sctp/sctpdataengine.h
+++ b/talk/media/sctp/sctpdataengine.h
@@ -28,9 +28,19 @@
 #ifndef TALK_MEDIA_SCTP_SCTPDATAENGINE_H_
 #define TALK_MEDIA_SCTP_SCTPDATAENGINE_H_
 
+#include <errno.h>
 #include <string>
 #include <vector>
 
+namespace cricket {
+// Some ERRNO values get re-#defined to WSA* equivalents in some talk/
+// headers.  We save the original ones in an enum.
+enum PreservedErrno {
+  SCTP_EINPROGRESS = EINPROGRESS,
+  SCTP_EWOULDBLOCK = EWOULDBLOCK
+};
+}  // namespace cricket
+
 #include "talk/base/buffer.h"
 #include "talk/base/scoped_ptr.h"
 #include "talk/media/base/codec.h"
diff --git a/talk/media/webrtc/fakewebrtcdeviceinfo.h b/talk/media/webrtc/fakewebrtcdeviceinfo.h
index 210792a..585f31e 100644
--- a/talk/media/webrtc/fakewebrtcdeviceinfo.h
+++ b/talk/media/webrtc/fakewebrtcdeviceinfo.h
@@ -53,7 +53,7 @@
     dev->caps.push_back(cap);
   }
   virtual uint32_t NumberOfDevices() {
-    return devices_.size();
+    return static_cast<int>(devices_.size());
   }
   virtual int32_t GetDeviceName(uint32_t device_num,
                                 char* device_name,
@@ -77,7 +77,7 @@
   virtual int32_t NumberOfCapabilities(const char* device_id) {
     Device* dev = GetDeviceById(device_id);
     if (!dev) return -1;
-    return dev->caps.size();
+    return static_cast<int32_t>(dev->caps.size());
   }
   virtual int32_t GetCapability(const char* device_id,
                                 const uint32_t device_cap_num,
diff --git a/talk/media/webrtc/fakewebrtcvideoengine.h b/talk/media/webrtc/fakewebrtcvideoengine.h
index 68963f7..a64beb9 100644
--- a/talk/media/webrtc/fakewebrtcvideoengine.h
+++ b/talk/media/webrtc/fakewebrtcvideoengine.h
@@ -392,7 +392,7 @@
     return -1;
   }
 
-  int GetNumChannels() const { return channels_.size(); }
+  int GetNumChannels() const { return static_cast<int>(channels_.size()); }
   bool IsChannel(int channel) const {
     return (channels_.find(channel) != channels_.end());
   }
@@ -401,7 +401,7 @@
   }
 
   int GetLastCapturer() const { return last_capturer_; }
-  int GetNumCapturers() const { return capturers_.size(); }
+  int GetNumCapturers() const { return static_cast<int>(capturers_.size()); }
   void set_fail_alloc_capturer(bool fail_alloc_capturer) {
     fail_alloc_capturer_ = fail_alloc_capturer;
   }
@@ -497,7 +497,8 @@
   }
   int GetNumSsrcs(int channel) const {
     WEBRTC_ASSERT_CHANNEL(channel);
-    return channels_.find(channel)->second->ssrcs_.size();
+    return static_cast<int>(
+        channels_.find(channel)->second->ssrcs_.size());
   }
   bool GetIsTransmitting(int channel) const {
     WEBRTC_ASSERT_CHANNEL(channel);
@@ -518,7 +519,8 @@
   };
   int GetNumExternalDecoderRegistered(int channel) const {
     WEBRTC_ASSERT_CHANNEL(channel);
-    return channels_.find(channel)->second->ext_decoder_pl_types_.size();
+    return static_cast<int>(
+        channels_.find(channel)->second->ext_decoder_pl_types_.size());
   };
   bool ExternalEncoderRegistered(int channel,
                                  unsigned int pl_type) const {
@@ -528,13 +530,15 @@
   };
   int GetNumExternalEncoderRegistered(int channel) const {
     WEBRTC_ASSERT_CHANNEL(channel);
-    return channels_.find(channel)->second->ext_encoder_pl_types_.size();
+    return static_cast<int>(
+        channels_.find(channel)->second->ext_encoder_pl_types_.size());
   };
   int GetTotalNumExternalEncoderRegistered() const {
     std::map<int, Channel*>::const_iterator it;
     int total_num_registered = 0;
     for (it = channels_.begin(); it != channels_.end(); ++it)
-      total_num_registered += it->second->ext_encoder_pl_types_.size();
+      total_num_registered +=
+          static_cast<int>(it->second->ext_encoder_pl_types_.size());
     return total_num_registered;
   }
   void SetSendBitrates(int channel, unsigned int video_bitrate,
@@ -708,10 +712,8 @@
   WEBRTC_STUB(DeregisterDecoderObserver, (const int));
   WEBRTC_STUB(SendKeyFrame, (const int));
   WEBRTC_STUB(WaitForFirstKeyFrame, (const int, const bool));
-#ifdef USE_WEBRTC_DEV_BRANCH
   WEBRTC_STUB(StartDebugRecording, (int, const char*));
   WEBRTC_STUB(StopDebugRecording, (int));
-#endif
 
   // webrtc::ViECapture
   WEBRTC_STUB(NumberOfCaptureDevices, ());
@@ -783,12 +785,10 @@
   // Not using WEBRTC_STUB due to bool return value
   virtual bool IsIPv6Enabled(int channel) { return true; }
   WEBRTC_STUB(SetMTU, (int, unsigned int));
-#ifndef USE_WEBRTC_DEV_BRANCH
   WEBRTC_STUB(SetPacketTimeoutNotification, (const int, bool, int));
   WEBRTC_STUB(RegisterObserver, (const int, webrtc::ViENetworkObserver&));
   WEBRTC_STUB(SetPeriodicDeadOrAliveStatus, (const int, const bool,
     const unsigned int));
-#endif
 
   // webrtc::ViERender
   WEBRTC_STUB(RegisterVideoRenderModule, (webrtc::VideoRender&));
diff --git a/talk/media/webrtc/fakewebrtcvoiceengine.h b/talk/media/webrtc/fakewebrtcvoiceengine.h
index 65139aa..7202e15 100644
--- a/talk/media/webrtc/fakewebrtcvoiceengine.h
+++ b/talk/media/webrtc/fakewebrtcvoiceengine.h
@@ -609,7 +609,6 @@
   }
   WEBRTC_STUB(ReceivedRTCPPacket, (int channel, const void* data,
                                    unsigned int length));
-#ifndef USE_WEBRTC_DEV_BRANCH
   // Not using WEBRTC_STUB due to bool return value
   WEBRTC_STUB(SetPacketTimeoutNotification, (int channel, bool enable,
                                              int timeoutSeconds));
@@ -622,7 +621,6 @@
                                              int& sampleTimeSeconds));
   WEBRTC_STUB(SetPeriodicDeadOrAliveStatus, (int channel, bool enable,
                                              int sampleTimeSeconds));
-#endif
 
   // webrtc::VoERTP_RTCP
   WEBRTC_STUB(RegisterRTPObserver, (int channel,
@@ -743,11 +741,7 @@
   // webrtc::VoEVideoSync
   WEBRTC_STUB(GetPlayoutBufferSize, (int& bufferMs));
   WEBRTC_STUB(GetPlayoutTimestamp, (int channel, unsigned int& timestamp));
-#ifdef USE_WEBRTC_DEV_BRANCH
-  WEBRTC_STUB(GetRtpRtcp, (int, webrtc::RtpRtcp**, webrtc::RtpReceiver**));
-#else
   WEBRTC_STUB(GetRtpRtcp, (int, webrtc::RtpRtcp*&));
-#endif
   WEBRTC_STUB(SetInitTimestamp, (int channel, unsigned int timestamp));
   WEBRTC_STUB(SetInitSequenceNumber, (int channel, short sequenceNumber));
   WEBRTC_STUB(SetMinimumPlayoutDelay, (int channel, int delayMs));
diff --git a/talk/media/webrtc/webrtcpassthroughrender.h b/talk/media/webrtc/webrtcpassthroughrender.h
index 967a29b..e09182f 100644
--- a/talk/media/webrtc/webrtcpassthroughrender.h
+++ b/talk/media/webrtc/webrtcpassthroughrender.h
@@ -92,7 +92,7 @@
   }
 
   virtual uint32_t GetNumIncomingRenderStreams() const {
-    return stream_render_map_.size();
+    return static_cast<uint32_t>(stream_render_map_.size());
   }
 
   virtual bool HasIncomingRenderStream(const uint32_t stream_id) const;
diff --git a/talk/media/webrtc/webrtcvideoengine.cc b/talk/media/webrtc/webrtcvideoengine.cc
index 5047974..e95e142 100644
--- a/talk/media/webrtc/webrtcvideoengine.cc
+++ b/talk/media/webrtc/webrtcvideoengine.cc
@@ -257,7 +257,7 @@
   }
   int framerate() {
     talk_base::CritScope cs(&crit_);
-    return frame_rate_tracker_.units_second();
+    return static_cast<int>(frame_rate_tracker_.units_second());
   }
   VideoRenderer* renderer() {
     talk_base::CritScope cs(&crit_);
@@ -356,7 +356,7 @@
   }
   int framerate() {
     talk_base::CritScope cs(&crit_);
-    return rate_tracker_.units_second();
+    return static_cast<int>(rate_tracker_.units_second());
   }
   void GetLastFrameInfo(
       size_t* width, size_t* height, int64* elapsed_time) const {
@@ -525,17 +525,27 @@
     if (video_capturer && !video_capturer->IsScreencast()) {
       const VideoFormat* capture_format = video_capturer->GetCaptureFormat();
       if (capture_format) {
+        // TODO(thorcarpenter): This is broken. Video capturer doesn't have
+        // a capture format until the capturer is started. So, if
+        // the capturer is started immediately after calling set_video_capturer
+        // video adapter may not have the input format set, the interval may
+        // be zero, and all frames may be dropped.
+        // Consider fixing this by having video_adapter keep a pointer to the
+        // video capturer.
         video_adapter_->SetInputFormat(*capture_format);
       }
     }
   }
 
   void ApplyCpuOptions(const VideoOptions& options) {
-    bool cpu_adapt;
+    bool cpu_adapt, cpu_smoothing;
     float low, med, high;
     if (options.adapt_input_to_cpu_usage.Get(&cpu_adapt)) {
       video_adapter_->set_cpu_adaptation(cpu_adapt);
     }
+    if (options.adapt_cpu_with_smoothing.Get(&cpu_smoothing)) {
+      video_adapter_->set_cpu_smoothing(cpu_smoothing);
+    }
     if (options.process_adaptation_threshhold.Get(&med)) {
       video_adapter_->set_process_threshold(med);
     }
@@ -552,8 +562,9 @@
       *processed_frame = original_frame.Copy();
     } else {
       WebRtcVideoFrame* black_frame = new WebRtcVideoFrame();
-      black_frame->InitToBlack(original_frame.GetWidth(),
-                               original_frame.GetHeight(), 1, 1,
+      black_frame->InitToBlack(static_cast<int>(original_frame.GetWidth()),
+                               static_cast<int>(original_frame.GetHeight()),
+                               1, 1,
                                original_frame.GetElapsedTime(),
                                original_frame.GetTimeStamp());
       *processed_frame = black_frame;
@@ -675,8 +686,6 @@
   render_module_.reset(new WebRtcPassthroughRender());
   local_renderer_w_ = local_renderer_h_ = 0;
   local_renderer_ = NULL;
-  video_capturer_ = NULL;
-  frame_listeners_ = 0;
   capture_started_ = false;
   decoder_factory_ = NULL;
   encoder_factory_ = NULL;
@@ -712,7 +721,6 @@
 }
 
 WebRtcVideoEngine::~WebRtcVideoEngine() {
-  ClearCapturer();
   LOG(LS_INFO) << "WebRtcVideoEngine::~WebRtcVideoEngine";
   if (initialized_) {
     Terminate();
@@ -791,7 +799,6 @@
 void WebRtcVideoEngine::Terminate() {
   LOG(LS_INFO) << "WebRtcVideoEngine::Terminate";
   initialized_ = false;
-  SetCapture(false);
 
   if (vie_wrapper_->render()->DeRegisterVideoRenderModule(
       *render_module_.get()) != 0) {
@@ -847,132 +854,12 @@
   return channel;
 }
 
-bool WebRtcVideoEngine::SetVideoCapturer(VideoCapturer* capturer) {
-  return SetCapturer(capturer);
-}
-
-VideoCapturer* WebRtcVideoEngine::GetVideoCapturer() const {
-  return video_capturer_;
-}
-
 bool WebRtcVideoEngine::SetLocalRenderer(VideoRenderer* renderer) {
   local_renderer_w_ = local_renderer_h_ = 0;
   local_renderer_ = renderer;
   return true;
 }
 
-bool WebRtcVideoEngine::SetCapture(bool capture) {
-  bool old_capture = capture_started_;
-  capture_started_ = capture;
-  CaptureState result = UpdateCapturingState();
-  if (result == CS_FAILED || result == CS_NO_DEVICE) {
-    capture_started_ = old_capture;
-    return false;
-  }
-  return true;
-}
-
-CaptureState WebRtcVideoEngine::UpdateCapturingState() {
-  bool capture = capture_started_ && frame_listeners_;
-  CaptureState result = CS_RUNNING;
-  if (!IsCapturing() && capture) {  // Start capturing.
-    if (video_capturer_ == NULL) {
-      return CS_NO_DEVICE;
-    }
-
-    VideoFormat capture_format;
-    if (!video_capturer_->GetBestCaptureFormat(default_codec_format_,
-                                               &capture_format)) {
-      LOG(LS_WARNING) << "Unsupported format:"
-                      << " width=" << default_codec_format_.width
-                      << " height=" << default_codec_format_.height
-                      << ". Supported formats are:";
-      const std::vector<VideoFormat>* formats =
-          video_capturer_->GetSupportedFormats();
-      if (formats) {
-        for (std::vector<VideoFormat>::const_iterator i = formats->begin();
-             i != formats->end(); ++i) {
-          const VideoFormat& format = *i;
-          LOG(LS_WARNING) << "  " << GetFourccName(format.fourcc) << ":"
-                          << format.width << "x" << format.height << "x"
-                          << format.framerate();
-        }
-      }
-      return CS_FAILED;
-    }
-
-    // Start the video capturer.
-    result = video_capturer_->Start(capture_format);
-    if (CS_RUNNING != result && CS_STARTING != result) {
-      LOG(LS_ERROR) << "Failed to start the video capturer";
-      return result;
-    }
-  } else if (IsCapturing() && !capture) {  // Stop capturing.
-    video_capturer_->Stop();
-    result = CS_STOPPED;
-  }
-
-  return result;
-}
-
-bool WebRtcVideoEngine::IsCapturing() const {
-  return (video_capturer_ != NULL) && video_capturer_->IsRunning();
-}
-
-// TODO(thorcarpenter): Remove this fn, it's only used for unittests!
-void WebRtcVideoEngine::OnFrameCaptured(VideoCapturer* capturer,
-                                        const CapturedFrame* frame) {
-  // Crop to desired aspect ratio.
-  int cropped_width, cropped_height;
-  ComputeCrop(default_codec_format_.width, default_codec_format_.height,
-              frame->width, abs(frame->height),
-              frame->pixel_width, frame->pixel_height,
-              frame->rotation, &cropped_width, &cropped_height);
-
-  // This CapturedFrame* will already be in I420. In the future, when
-  // WebRtcVideoFrame has support for independent planes, we can just attach
-  // to it and update the pointers when cropping.
-  WebRtcVideoFrame i420_frame;
-  if (!i420_frame.Init(frame, cropped_width, cropped_height)) {
-    LOG(LS_ERROR) << "Couldn't convert to I420! "
-                  << cropped_width << " x " << cropped_height;
-    return;
-  }
-
-  // TODO(janahan): This is the trigger point for Tx video processing.
-  // Once the capturer refactoring is done, we will move this into the
-  // capturer...it's not there right now because that image is in not in the
-  // I420 color space.
-  // The clients that subscribe will obtain meta info from the frame.
-  // When this trigger is switched over to capturer, need to pass in the real
-  // ssrc.
-  bool drop_frame = false;
-  {
-    talk_base::CritScope cs(&signal_media_critical_);
-    SignalMediaFrame(kDummyVideoSsrc, &i420_frame, &drop_frame);
-  }
-  if (drop_frame) {
-    LOG(LS_VERBOSE) << "Media Effects dropped a frame.";
-    return;
-  }
-
-  // Send I420 frame to the local renderer.
-  if (local_renderer_) {
-    if (local_renderer_w_ != static_cast<int>(i420_frame.GetWidth()) ||
-        local_renderer_h_ != static_cast<int>(i420_frame.GetHeight())) {
-      local_renderer_->SetSize(local_renderer_w_ = i420_frame.GetWidth(),
-                               local_renderer_h_ = i420_frame.GetHeight(), 0);
-    }
-    local_renderer_->RenderFrame(&i420_frame);
-  }
-  // Send I420 frame to the registered senders.
-  talk_base::CritScope cs(&channels_crit_);
-  for (VideoChannels::iterator it = channels_.begin();
-      it != channels_.end(); ++it) {
-    if ((*it)->sending()) (*it)->SendFrame(capturer, &i420_frame);
-  }
-}
-
 const std::vector<VideoCodec>& WebRtcVideoEngine::codecs() const {
   return video_codecs_;
 }
@@ -1004,7 +891,7 @@
         const std::vector<WebRtcVideoEncoderFactory::VideoCodec>& codecs =
             encoder_factory_->codecs();
         for (size_t j = 0; j < codecs.size(); ++j) {
-          VideoCodec codec(GetExternalVideoPayloadType(j),
+          VideoCodec codec(GetExternalVideoPayloadType(static_cast<int>(j)),
                            codecs[j].name, 0, 0, 0, 0);
           if (codec.Matches(in))
             return true;
@@ -1136,7 +1023,7 @@
     for (size_t i = 0; i < codecs.size(); ++i) {
       if (_stricmp(in_codec.name.c_str(), codecs[i].name.c_str()) == 0) {
         out_codec->codecType = codecs[i].type;
-        out_codec->plType = GetExternalVideoPayloadType(i);
+        out_codec->plType = GetExternalVideoPayloadType(static_cast<int>(i));
         talk_base::strcpyn(out_codec->plName, sizeof(out_codec->plName),
                            codecs[i].name.c_str(), codecs[i].name.length());
         found = true;
@@ -1267,12 +1154,13 @@
     for (size_t i = 0; i < codecs.size(); ++i) {
       if (!found)
         found = (in_codec.name == codecs[i].name);
-      VideoCodec codec(GetExternalVideoPayloadType(i),
-                       codecs[i].name,
-                       codecs[i].max_width,
-                       codecs[i].max_height,
-                       codecs[i].max_fps,
-                       codecs.size() + ARRAY_SIZE(kVideoCodecPrefs) - i);
+      VideoCodec codec(
+          GetExternalVideoPayloadType(static_cast<int>(i)),
+          codecs[i].name,
+          codecs[i].max_width,
+          codecs[i].max_height,
+          codecs[i].max_fps,
+          static_cast<int>(codecs.size() + ARRAY_SIZE(kVideoCodecPrefs) - i));
       AddDefaultFeedbackParams(&codec);
       video_codecs_.push_back(codec);
       external_codec_names.insert(codecs[i].name);
@@ -1287,7 +1175,7 @@
     if (found && !is_external_codec) {
       VideoCodec codec(pref.payload_type, pref.name,
                        in_codec.width, in_codec.height, in_codec.framerate,
-                       ARRAY_SIZE(kVideoCodecPrefs) - i);
+                       static_cast<int>(ARRAY_SIZE(kVideoCodecPrefs) - i));
       if (_stricmp(kVp8PayloadName, codec.name.c_str()) == 0) {
         AddDefaultFeedbackParams(&codec);
       }
@@ -1298,32 +1186,6 @@
   return true;
 }
 
-bool WebRtcVideoEngine::SetCapturer(VideoCapturer* capturer) {
-  if (capturer == NULL) {
-    // Stop capturing before clearing the capturer.
-    if (!SetCapture(false)) {
-      LOG(LS_WARNING) << "Camera failed to stop";
-      return false;
-    }
-    ClearCapturer();
-    return true;
-  }
-
-  // Hook up signals and install the supplied capturer.
-  SignalCaptureStateChange.repeat(capturer->SignalStateChange);
-  capturer->SignalFrameCaptured.connect(this,
-      &WebRtcVideoEngine::OnFrameCaptured);
-  ClearCapturer();
-  video_capturer_ = capturer;
-  // Possibly restart the capturer if it is supposed to be running.
-  CaptureState result = UpdateCapturingState();
-  if (result == CS_FAILED || result == CS_NO_DEVICE) {
-    LOG(LS_WARNING) << "Camera failed to restart";
-    return false;
-  }
-  return true;
-}
-
 // Ignore spammy trace messages, mostly from the stats API when we haven't
 // gotten RTCP info yet from the remote side.
 bool WebRtcVideoEngine::ShouldIgnoreTrace(const std::string& trace) {
@@ -1340,22 +1202,7 @@
 
 int WebRtcVideoEngine::GetNumOfChannels() {
   talk_base::CritScope cs(&channels_crit_);
-  return channels_.size();
-}
-
-void WebRtcVideoEngine::IncrementFrameListeners() {
-  if (++frame_listeners_ == 1) {
-    UpdateCapturingState();
-  }
-  // In the unlikely event of wrapparound.
-  ASSERT(frame_listeners_ >= 0);
-}
-
-void WebRtcVideoEngine::DecrementFrameListeners() {
-  if (--frame_listeners_ == 0) {
-    UpdateCapturingState();
-  }
-  ASSERT(frame_listeners_ >= 0);
+  return static_cast<int>(channels_.size());
 }
 
 void WebRtcVideoEngine::Print(webrtc::TraceLevel level, const char* trace,
@@ -1384,20 +1231,6 @@
   }
 }
 
-bool WebRtcVideoEngine::RegisterProcessor(
-    VideoProcessor* video_processor) {
-  talk_base::CritScope cs(&signal_media_critical_);
-  SignalMediaFrame.connect(video_processor,
-                           &VideoProcessor::OnFrame);
-  return true;
-}
-bool WebRtcVideoEngine::UnregisterProcessor(
-    VideoProcessor* video_processor) {
-  talk_base::CritScope cs(&signal_media_critical_);
-  SignalMediaFrame.disconnect(video_processor);
-  return true;
-}
-
 webrtc::VideoDecoder* WebRtcVideoEngine::CreateExternalDecoder(
     webrtc::VideoCodecType type) {
   if (decoder_factory_ == NULL) {
@@ -1442,10 +1275,6 @@
   return false;
 }
 
-void WebRtcVideoEngine::ClearCapturer() {
-  video_capturer_ = NULL;
-}
-
 void WebRtcVideoEngine::SetExternalDecoderFactory(
     WebRtcVideoDecoderFactory* decoder_factory) {
   decoder_factory_ = decoder_factory;
@@ -1996,9 +1825,6 @@
   }
 
   send_channel->set_sending(true);
-  if (!send_channel->video_capturer()) {
-    engine_->IncrementFrameListeners();
-  }
   return true;
 }
 
@@ -2022,9 +1848,6 @@
     return false;
   }
   send_channel->set_sending(false);
-  if (!send_channel->video_capturer()) {
-    engine_->DecrementFrameListeners();
-  }
   return true;
 }
 
@@ -2185,9 +2008,6 @@
   }
   capturer->SignalVideoFrame.disconnect(this);
   send_channel->set_video_capturer(NULL);
-  if (send_channel->sending()) {
-    engine_->IncrementFrameListeners();
-  }
   const int64 timestamp = send_channel->local_stream_info()->time_stamp();
   if (send_codec_) {
     QueueBlackFrame(ssrc, timestamp, send_codec_->maxFramerate);
@@ -2261,8 +2081,8 @@
       sinfo.firs_rcvd = -1;
       sinfo.nacks_rcvd = -1;
       sinfo.rtt_ms = -1;
-      sinfo.frame_width = channel_stream_info->width();
-      sinfo.frame_height = channel_stream_info->height();
+      sinfo.frame_width = static_cast<int>(channel_stream_info->width());
+      sinfo.frame_height = static_cast<int>(channel_stream_info->height());
       sinfo.framerate_input = channel_stream_info->framerate();
       sinfo.framerate_sent = send_channel->encoder_observer()->framerate();
       sinfo.nominal_bitrate = send_channel->encoder_observer()->bitrate();
@@ -2415,9 +2235,6 @@
     return false;
   }
   VideoCapturer* old_capturer = send_channel->video_capturer();
-  if (send_channel->sending() && !old_capturer) {
-    engine_->DecrementFrameListeners();
-  }
   if (old_capturer) {
     old_capturer->SignalVideoFrame.disconnect(this);
   }
@@ -2454,9 +2271,10 @@
     which_channel = video_channel();
   }
 
-  engine()->vie()->network()->ReceivedRTPPacket(which_channel,
-                                                packet->data(),
-                                                packet->length());
+  engine()->vie()->network()->ReceivedRTPPacket(
+      which_channel,
+      packet->data(),
+      static_cast<int>(packet->length()));
 }
 
 void WebRtcVideoMediaChannel::OnRtcpReceived(talk_base::Buffer* packet) {
@@ -2480,9 +2298,10 @@
   if (type == kRtcpTypeSR) {
     int which_channel = GetRecvChannelNum(ssrc);
     if (which_channel != -1 && !IsDefaultChannel(which_channel)) {
-      engine_->vie()->network()->ReceivedRTCPPacket(which_channel,
-                                                    packet->data(),
-                                                    packet->length());
+      engine_->vie()->network()->ReceivedRTCPPacket(
+          which_channel,
+          packet->data(),
+          static_cast<int>(packet->length()));
     }
   }
   // SR may continue RR and any RR entry may correspond to any one of the send
@@ -2492,9 +2311,10 @@
        iter != send_channels_.end(); ++iter) {
     WebRtcVideoChannelSendInfo* send_channel = iter->second;
     int channel_id = send_channel->channel_id();
-    engine_->vie()->network()->ReceivedRTCPPacket(channel_id,
-                                                  packet->data(),
-                                                  packet->length());
+    engine_->vie()->network()->ReceivedRTCPPacket(
+        channel_id,
+        packet->data(),
+        static_cast<int>(packet->length()));
   }
 }
 
@@ -2765,6 +2585,9 @@
     SendFrame(capturer, frame);
     return;
   }
+  // TODO(thorcarpenter): This is broken. One capturer registered on two ssrc
+  // will not send any video to the second ssrc send channel. We should remove
+  // GetSendChannel(capturer) and pass in an ssrc here.
   WebRtcVideoChannelSendInfo* send_channel = GetSendChannel(capturer);
   if (!send_channel) {
     SendFrame(capturer, frame);
@@ -2819,8 +2642,10 @@
   }
 
   // Checks if we need to reset vie send codec.
-  if (!MaybeResetVieSendCodec(send_channel, frame->GetWidth(),
-                              frame->GetHeight(), is_screencast, NULL)) {
+  if (!MaybeResetVieSendCodec(send_channel,
+                              static_cast<int>(frame->GetWidth()),
+                              static_cast<int>(frame->GetHeight()),
+                              is_screencast, NULL)) {
     LOG(LS_ERROR) << "MaybeResetVieSendCodec failed with "
                   << frame->GetWidth() << "x" << frame->GetHeight();
     return false;
@@ -2843,8 +2668,8 @@
   frame_i420.y_pitch = frame_out->GetYPitch();
   frame_i420.u_pitch = frame_out->GetUPitch();
   frame_i420.v_pitch = frame_out->GetVPitch();
-  frame_i420.width = frame_out->GetWidth();
-  frame_i420.height = frame_out->GetHeight();
+  frame_i420.width = static_cast<unsigned short>(frame_out->GetWidth());
+  frame_i420.height = static_cast<unsigned short>(frame_out->GetHeight());
 
   int64 timestamp_ntp_ms = 0;
   // TODO(justinlin): Reenable after Windows issues with clock drift are fixed.
@@ -3518,7 +3343,7 @@
 
 void WebRtcVideoMediaChannel::OnMessage(talk_base::Message* msg) {
   FlushBlackFrameData* black_frame_data =
-      static_cast<FlushBlackFrameData*> (msg->pdata);
+      static_cast<FlushBlackFrameData*>(msg->pdata);
   FlushBlackFrame(black_frame_data->ssrc, black_frame_data->timestamp);
   delete black_frame_data;
 }
@@ -3548,7 +3373,7 @@
     FlushBlackFrameData* black_frame_data = new FlushBlackFrameData(
         ssrc,
         timestamp);
-    const int delay_ms = static_cast<int> (
+    const int delay_ms = static_cast<int>(
         2 * cricket::VideoFormat::FpsToInterval(framerate) *
         talk_base::kNumMillisecsPerSec / talk_base::kNumNanosecsPerSec);
     worker_thread()->PostDelayed(delay_ms, this, 0, black_frame_data);
diff --git a/talk/media/webrtc/webrtcvideoengine.h b/talk/media/webrtc/webrtcvideoengine.h
index f2dc18c..2f0fd3e 100644
--- a/talk/media/webrtc/webrtcvideoengine.h
+++ b/talk/media/webrtc/webrtcvideoengine.h
@@ -113,16 +113,8 @@
   const std::vector<RtpHeaderExtension>& rtp_header_extensions() const;
   void SetLogging(int min_sev, const char* filter);
 
-  // If capturer is NULL, unregisters the capturer and stops capturing.
-  // Otherwise sets the capturer and starts capturing.
-  bool SetVideoCapturer(VideoCapturer* capturer);
-  VideoCapturer* GetVideoCapturer() const;
   bool SetLocalRenderer(VideoRenderer* renderer);
-  bool SetCapture(bool capture);
   sigslot::repeater2<VideoCapturer*, CaptureState> SignalCaptureStateChange;
-  CaptureState UpdateCapturingState();
-  bool IsCapturing() const;
-  void OnFrameCaptured(VideoCapturer* capturer, const CapturedFrame* frame);
 
   // Set the VoiceEngine for A/V sync. This can only be called before Init.
   bool SetVoiceEngine(WebRtcVoiceEngine* voice_engine);
@@ -137,9 +129,6 @@
   // Enable the render module with timing control.
   bool EnableTimedRender();
 
-  bool RegisterProcessor(VideoProcessor* video_processor);
-  bool UnregisterProcessor(VideoProcessor* video_processor);
-
   // Returns an external decoder for the given codec type. The return value
   // can be NULL if decoder factory is not given or it does not support the
   // codec type. The caller takes the ownership of the returned object.
@@ -175,9 +164,6 @@
   bool ShouldIgnoreTrace(const std::string& trace);
   int GetNumOfChannels();
 
-  void IncrementFrameListeners();
-  void DecrementFrameListeners();
-
   VideoFormat GetStartCaptureFormat() const { return default_codec_format_; }
 
   talk_base::CpuMonitor* cpu_monitor() { return cpu_monitor_.get(); }
@@ -209,11 +195,9 @@
   void SetTraceFilter(int filter);
   void SetTraceOptions(const std::string& options);
   bool InitVideoEngine();
-  bool SetCapturer(VideoCapturer* capturer);
 
   // webrtc::TraceCallback implementation.
   virtual void Print(webrtc::TraceLevel level, const char* trace, int length);
-  void ClearCapturer();
 
   // WebRtcVideoEncoderFactory::Observer implementation.
   virtual void OnCodecsAvailable();
@@ -234,8 +218,6 @@
   talk_base::CriticalSection channels_crit_;
   VideoChannels channels_;
 
-  VideoCapturer* video_capturer_;
-  int frame_listeners_;
   bool capture_started_;
   int local_renderer_w_;
   int local_renderer_h_;
diff --git a/talk/media/webrtc/webrtcvideoengine_unittest.cc b/talk/media/webrtc/webrtcvideoengine_unittest.cc
index 37b212f..376f295 100644
--- a/talk/media/webrtc/webrtcvideoengine_unittest.cc
+++ b/talk/media/webrtc/webrtcvideoengine_unittest.cc
@@ -202,11 +202,8 @@
   virtual cricket::VideoCodec DefaultCodec() { return kVP8Codec; }
   virtual void SetUp() {
     Base::SetUp();
-    // Need to start the capturer to allow us to pump in frames.
-    engine_.SetCapture(true);
   }
   virtual void TearDown() {
-    engine_.SetCapture(false);
     Base::TearDown();
   }
 };
@@ -1356,41 +1353,6 @@
   delete channel;
 }
 
-TEST_F(WebRtcVideoMediaChannelTest, TestVideoProcessor_DropFrames) {
-  // Connect a video processor.
-  cricket::FakeMediaProcessor vp;
-  vp.set_drop_frames(false);
-  EXPECT_TRUE(engine_.RegisterProcessor(&vp));
-  EXPECT_EQ(0, vp.dropped_frame_count());
-  // Send the first frame with default codec.
-  int packets = NumRtpPackets();
-  cricket::VideoCodec codec(DefaultCodec());
-  EXPECT_TRUE(SetOneCodec(codec));
-  EXPECT_TRUE(SetSend(true));
-  EXPECT_TRUE(channel_->SetRender(true));
-  EXPECT_EQ(0, renderer_.num_rendered_frames());
-  EXPECT_TRUE(WaitAndSendFrame(30));
-  EXPECT_FRAME_WAIT(1, codec.width, codec.height, kTimeout);
-  // Verify frame was sent.
-  EXPECT_TRUE_WAIT(NumRtpPackets() > packets, kTimeout);
-  packets = NumRtpPackets();
-  EXPECT_EQ(0, vp.dropped_frame_count());
-  // Send another frame and expect it to be sent.
-  EXPECT_TRUE(WaitAndSendFrame(30));
-  EXPECT_FRAME_WAIT(2, codec.width, codec.height, kTimeout);
-  EXPECT_TRUE_WAIT(NumRtpPackets() > packets, kTimeout);
-  packets = NumRtpPackets();
-  EXPECT_EQ(0, vp.dropped_frame_count());
-  // Attempt to send a frame and expect it to be dropped.
-  vp.set_drop_frames(true);
-  EXPECT_TRUE(WaitAndSendFrame(30));
-  DrainOutgoingPackets();
-  EXPECT_FRAME_WAIT(2, codec.width, codec.height, kTimeout);
-  EXPECT_EQ(packets, NumRtpPackets());
-  EXPECT_EQ(1, vp.dropped_frame_count());
-  // Disconnect video processor.
-  EXPECT_TRUE(engine_.UnregisterProcessor(&vp));
-}
 TEST_F(WebRtcVideoMediaChannelTest, SetRecvCodecs) {
   std::vector<cricket::VideoCodec> codecs;
   codecs.push_back(kVP8Codec);
@@ -1433,7 +1395,7 @@
 }
 
 TEST_F(WebRtcVideoMediaChannelTest, SendVp8HdAndReceiveAdaptedVp8Vga) {
-  EXPECT_TRUE(engine_.SetVideoCapturer(NULL));
+  EXPECT_TRUE(channel_->SetCapturer(kSsrc, NULL));
   channel_->UpdateAspectRatio(1280, 720);
   video_capturer_.reset(new cricket::FakeVideoCapturer);
   const std::vector<cricket::VideoFormat>* formats =
@@ -1502,33 +1464,6 @@
   Base::AddRemoveSendStreams();
 }
 
-TEST_F(WebRtcVideoMediaChannelTest, SetVideoCapturer) {
-  // Use 123 to verify there's no assumption to the module id
-  FakeWebRtcVideoCaptureModule* vcm =
-      new FakeWebRtcVideoCaptureModule(NULL, 123);
-  talk_base::scoped_ptr<cricket::WebRtcVideoCapturer> capturer(
-      new cricket::WebRtcVideoCapturer);
-  EXPECT_TRUE(capturer->Init(vcm));
-  EXPECT_TRUE(engine_.SetVideoCapturer(capturer.get()));
-  EXPECT_FALSE(engine_.IsCapturing());
-  EXPECT_TRUE(engine_.SetCapture(true));
-  cricket::VideoCodec codec(DefaultCodec());
-  EXPECT_TRUE(SetOneCodec(codec));
-  EXPECT_TRUE(channel_->SetSend(true));
-  EXPECT_TRUE(engine_.IsCapturing());
-
-  EXPECT_EQ(engine_.default_codec_format().width, vcm->cap().width);
-  EXPECT_EQ(engine_.default_codec_format().height, vcm->cap().height);
-  EXPECT_EQ(cricket::VideoFormat::IntervalToFps(
-      engine_.default_codec_format().interval),
-            vcm->cap().maxFPS);
-  EXPECT_EQ(webrtc::kVideoI420, vcm->cap().rawType);
-  EXPECT_EQ(webrtc::kVideoCodecUnknown, vcm->cap().codecType);
-
-  EXPECT_TRUE(engine_.SetVideoCapturer(NULL));
-  EXPECT_FALSE(engine_.IsCapturing());
-}
-
 TEST_F(WebRtcVideoMediaChannelTest, SimulateConference) {
   Base::SimulateConference();
 }
diff --git a/talk/media/webrtc/webrtcvideoframe.cc b/talk/media/webrtc/webrtcvideoframe.cc
index 80f2481..584aac0 100644
--- a/talk/media/webrtc/webrtcvideoframe.cc
+++ b/talk/media/webrtc/webrtcvideoframe.cc
@@ -62,8 +62,8 @@
   data_.reset(data);
   length_ = length;
   uint8_t* new_memory = reinterpret_cast<uint8_t*>(data);
-  uint32_t new_length = length;
-  uint32_t new_size = length;
+  uint32_t new_length = static_cast<int>(length);
+  uint32_t new_size = static_cast<int>(length);
   video_frame_.Swap(new_memory, new_length, new_size);
 }
 
@@ -150,7 +150,7 @@
 const uint8* WebRtcVideoFrame::GetVPlane() const {
   uint8_t* buffer = frame()->Buffer();
   if (buffer) {
-    int uv_size = GetChromaSize();
+    int uv_size = static_cast<int>(GetChromaSize());
     buffer += frame()->Width() * frame()->Height() + uv_size;
   }
   return buffer;
@@ -172,7 +172,7 @@
 uint8* WebRtcVideoFrame::GetVPlane() {
   uint8_t* buffer = frame()->Buffer();
   if (buffer) {
-    int uv_size = GetChromaSize();
+    int uv_size = static_cast<int>(GetChromaSize());
     buffer += frame()->Width() * frame()->Height() + uv_size;
   }
   return buffer;
@@ -192,7 +192,7 @@
 }
 
 bool WebRtcVideoFrame::MakeExclusive() {
-  const int length = video_buffer_->length();
+  const int length = static_cast<int>(video_buffer_->length());
   RefCountedBuffer* exclusive_buffer = new RefCountedBuffer(length);
   memcpy(exclusive_buffer->data(), video_buffer_->data(), length);
   Attach(exclusive_buffer, length, frame()->Width(), frame()->Height(),
@@ -228,7 +228,10 @@
 
   if (libyuv::ConvertFromI420(GetYPlane(), GetYPitch(), GetUPlane(),
                               GetUPitch(), GetVPlane(), GetVPitch(), buffer,
-                              stride_rgb, width, height, to_fourcc)) {
+                              stride_rgb,
+                              static_cast<int>(width),
+                              static_cast<int>(height),
+                              to_fourcc)) {
     LOG(LS_WARNING) << "RGB type not supported: " << to_fourcc;
     return 0;  // 0 indicates error
   }
diff --git a/talk/media/webrtc/webrtcvoiceengine.cc b/talk/media/webrtc/webrtcvoiceengine.cc
index f392751..c0522c0 100644
--- a/talk/media/webrtc/webrtcvoiceengine.cc
+++ b/talk/media/webrtc/webrtcvoiceengine.cc
@@ -2344,9 +2344,10 @@
   }
 
   // Pass it off to the decoder.
-  engine()->voe()->network()->ReceivedRTPPacket(which_channel,
-                                                   packet->data(),
-                                                   packet->length());
+  engine()->voe()->network()->ReceivedRTPPacket(
+      which_channel,
+      packet->data(),
+      static_cast<unsigned int>(packet->length()));
 }
 
 void WebRtcVoiceMediaChannel::OnRtcpReceived(talk_base::Buffer* packet) {
@@ -2357,9 +2358,10 @@
     which_channel = voe_channel();
   }
 
-  engine()->voe()->network()->ReceivedRTCPPacket(which_channel,
-                                                    packet->data(),
-                                                    packet->length());
+  engine()->voe()->network()->ReceivedRTCPPacket(
+      which_channel,
+      packet->data(),
+      static_cast<unsigned int>(packet->length()));
 }
 
 bool WebRtcVoiceMediaChannel::MuteStream(uint32 ssrc, bool muted) {
@@ -2543,7 +2545,7 @@
         rinfo.jitter_buffer_ms = ns.currentBufferSize;
         rinfo.jitter_buffer_preferred_ms = ns.preferredBufferSize;
         rinfo.expand_rate =
-            static_cast<float> (ns.currentExpandRate) / (1 << 14);
+            static_cast<float>(ns.currentExpandRate) / (1 << 14);
       }
       if (engine()->voe()->sync()) {
         int playout_buffer_delay_ms = 0;
@@ -2757,7 +2759,7 @@
 int WebRtcSoundclipStream::Read(void *buf, int len) {
   size_t res = 0;
   mem_.Read(buf, len, &res, NULL);
-  return res;
+  return static_cast<int>(res);
 }
 
 int WebRtcSoundclipStream::Rewind() {
diff --git a/talk/p2p/base/dtlstransportchannel_unittest.cc b/talk/p2p/base/dtlstransportchannel_unittest.cc
index 8de3b07..cbc3abb 100644
--- a/talk/p2p/base/dtlstransportchannel_unittest.cc
+++ b/talk/p2p/base/dtlstransportchannel_unittest.cc
@@ -206,7 +206,8 @@
       // against, and make sure that it doesn't look like DTLS.
       memset(packet.get(), sent & 0xff, size);
       packet[0] = (srtp) ? 0x80 : 0x00;
-      talk_base::SetBE32(packet.get() + kPacketNumOffset, sent);
+      talk_base::SetBE32(packet.get() + kPacketNumOffset,
+                         static_cast<uint32>(sent));
 
       // Only set the bypass flag if we've activated DTLS.
       int flags = (identity_.get() && srtp) ? cricket::PF_SRTP_BYPASS : 0;
@@ -342,7 +343,7 @@
   }
 
   void SetChannelCount(size_t channel_ct) {
-    channel_ct_ = channel_ct;
+    channel_ct_ = static_cast<int>(channel_ct);
   }
   void PrepareDtls(bool c1, bool c2) {
     if (c1) {
diff --git a/talk/p2p/base/fakesession.h b/talk/p2p/base/fakesession.h
index 3a825dd..2146abd 100644
--- a/talk/p2p/base/fakesession.h
+++ b/talk/p2p/base/fakesession.h
@@ -175,7 +175,7 @@
     } else {
       talk_base::Thread::Current()->Send(this, 0, packet);
     }
-    return len;
+    return static_cast<int>(len);
   }
   virtual int SetOption(talk_base::Socket::Option opt, int value) {
     return true;
diff --git a/talk/p2p/base/port.cc b/talk/p2p/base/port.cc
index b310fea..0f12122 100644
--- a/talk/p2p/base/port.cc
+++ b/talk/p2p/base/port.cc
@@ -473,7 +473,7 @@
       return false;
     }
   } else if (IsGoogleIce()) {
-    int remote_frag_len = username_attr_str.size();
+    int remote_frag_len = static_cast<int>(username_attr_str.size());
     remote_frag_len -= static_cast<int>(username_fragment().size());
     if (remote_frag_len < 0)
       return false;
@@ -752,8 +752,10 @@
 
     // connection_ already holds this ping, so subtract one from count.
     if (connection_->port()->send_retransmit_count_attribute()) {
-      request->AddAttribute(new StunUInt32Attribute(STUN_ATTR_RETRANSMIT_COUNT,
-          connection_->pings_since_last_response_.size() - 1));
+      request->AddAttribute(new StunUInt32Attribute(
+          STUN_ATTR_RETRANSMIT_COUNT,
+          static_cast<uint32>(
+              connection_->pings_since_last_response_.size() - 1)));
     }
 
     // Adding ICE-specific attributes to the STUN request message.
diff --git a/talk/p2p/base/port_unittest.cc b/talk/p2p/base/port_unittest.cc
index d6aa92d..bb6b22f 100644
--- a/talk/p2p/base/port_unittest.cc
+++ b/talk/p2p/base/port_unittest.cc
@@ -187,7 +187,7 @@
       last_stun_buf_.reset(buf);
       last_stun_msg_.reset(msg);
     }
-    return size;
+    return static_cast<int>(size);
   }
   virtual int SetOption(talk_base::Socket::Option opt, int value) {
     return 0;
@@ -789,10 +789,10 @@
 
   // Send a packet.
   virtual int Send(const void *pv, size_t cb) {
-    return cb;
+    return static_cast<int>(cb);
   }
   virtual int SendTo(const void *pv, size_t cb, const SocketAddress& addr) {
-    return cb;
+    return static_cast<int>(cb);
   }
   virtual int Close() {
     return 0;
@@ -2258,4 +2258,3 @@
   EXPECT_TRUE(msg->GetByteString(STUN_ATTR_USE_CANDIDATE) != NULL);
   ch1.Stop();
 }
-
diff --git a/talk/p2p/base/portallocatorsessionproxy.cc b/talk/p2p/base/portallocatorsessionproxy.cc
index 1a201d3..d804bdc 100644
--- a/talk/p2p/base/portallocatorsessionproxy.cc
+++ b/talk/p2p/base/portallocatorsessionproxy.cc
@@ -115,7 +115,7 @@
 }
 
 void PortAllocatorSessionMuxer::OnMessage(talk_base::Message *pmsg) {
-  ProxyObjData* proxy = static_cast<ProxyObjData*> (pmsg->pdata);
+  ProxyObjData* proxy = static_cast<ProxyObjData*>(pmsg->pdata);
   switch (pmsg->message_id) {
     case MSG_SEND_ALLOCATION_DONE:
       SendAllocationDone_w(proxy->data());
diff --git a/talk/p2p/base/portallocatorsessionproxy_unittest.cc b/talk/p2p/base/portallocatorsessionproxy_unittest.cc
index fc6dc59..2d3ec3d 100644
--- a/talk/p2p/base/portallocatorsessionproxy_unittest.cc
+++ b/talk/p2p/base/portallocatorsessionproxy_unittest.cc
@@ -63,7 +63,7 @@
   void OnCandidatesReady(PortAllocatorSession* session,
                          const std::vector<Candidate>& candidates) {
     EXPECT_EQ(proxy_session_, session);
-    candidates_count_ += candidates.size();
+    candidates_count_ += static_cast<int>(candidates.size());
   }
   void OnCandidatesAllocationDone(PortAllocatorSession* session) {
     EXPECT_EQ(proxy_session_, session);
diff --git a/talk/p2p/base/pseudotcp.cc b/talk/p2p/base/pseudotcp.cc
index 2cf2799..6a2c1d2 100644
--- a/talk/p2p/base/pseudotcp.cc
+++ b/talk/p2p/base/pseudotcp.cc
@@ -428,7 +428,7 @@
 uint32 PseudoTcp::GetBytesBufferedNotSent() const {
   size_t buffered_bytes = 0;
   m_sbuf.GetBuffered(&buffered_bytes);
-  return m_snd_una + buffered_bytes - m_snd_nxt;
+  return static_cast<uint32>(m_snd_una + buffered_bytes - m_snd_nxt);
 }
 
 uint32 PseudoTcp::GetRoundTripTimeEstimateMs() const {
@@ -461,15 +461,16 @@
 
   if (uint32(available_space) - m_rcv_wnd >=
       talk_base::_min<uint32>(m_rbuf_len / 2, m_mss)) {
-    bool bWasClosed = (m_rcv_wnd == 0); // !?! Not sure about this was closed business
-    m_rcv_wnd = available_space;
+    // TODO(jbeda): !?! Not sure about this was closed business
+    bool bWasClosed = (m_rcv_wnd == 0);
+    m_rcv_wnd = static_cast<uint32>(available_space);
 
     if (bWasClosed) {
       attemptSend(sfImmediateAck);
     }
   }
 
-  return read;
+  return static_cast<int>(read);
 }
 
 int PseudoTcp::Send(const char* buffer, size_t len) {
@@ -516,18 +517,19 @@
 
   // We can concatenate data if the last segment is the same type
   // (control v. regular data), and has not been transmitted yet
-  if (!m_slist.empty() && (m_slist.back().bCtrl == bCtrl) && (m_slist.back().xmit == 0)) {
+  if (!m_slist.empty() && (m_slist.back().bCtrl == bCtrl) &&
+      (m_slist.back().xmit == 0)) {
     m_slist.back().len += len;
   } else {
     size_t snd_buffered = 0;
     m_sbuf.GetBuffered(&snd_buffered);
-    SSegment sseg(m_snd_una + snd_buffered, len, bCtrl);
+    SSegment sseg(static_cast<uint32>(m_snd_una + snd_buffered), len, bCtrl);
     m_slist.push_back(sseg);
   }
 
   size_t written = 0;
   m_sbuf.Write(data, len, &written, NULL);
-  return written;
+  return static_cast<uint32>(written);
 }
 
 IPseudoTcpNotify::WriteResult PseudoTcp::packet(uint32 seq, uint8 flags,
@@ -1184,8 +1186,8 @@
     buf.WriteUInt8(1);
     buf.WriteUInt8(m_rwnd_scale);
   }
-  m_snd_wnd = buf.Length();
-  queue(buf.Data(), buf.Length(), true);
+  m_snd_wnd = static_cast<uint32>(buf.Length());
+  queue(buf.Data(), static_cast<uint32>(buf.Length()), true);
 }
 
 void
@@ -1290,7 +1292,7 @@
 
   size_t available_space = 0;
   m_rbuf.GetWriteRemaining(&available_space);
-  m_rcv_wnd = available_space;
+  m_rcv_wnd = static_cast<uint32>(available_space);
 }
 
 }  // namespace cricket
diff --git a/talk/p2p/base/pseudotcp_unittest.cc b/talk/p2p/base/pseudotcp_unittest.cc
index 09eac16..b9a672e 100644
--- a/talk/p2p/base/pseudotcp_unittest.cc
+++ b/talk/p2p/base/pseudotcp_unittest.cc
@@ -319,7 +319,7 @@
           LOG(LS_VERBOSE) << "Flow Controlled";
         }
       } else {
-        sent = tosend = 0;
+        sent = static_cast<int>(tosend = 0);
       }
     } while (sent > 0);
     *done = (tosend == 0);
@@ -439,7 +439,7 @@
           LOG(LS_VERBOSE) << "Flow Controlled";
         }
       } else {
-        sent = tosend = 0;
+        sent = static_cast<int>(tosend = 0);
       }
     } while (sent > 0);
   }
@@ -507,11 +507,11 @@
   }
 
   uint32 EstimateReceiveWindowSize() const {
-    return recv_position_[0];
+    return static_cast<uint32>(recv_position_[0]);
   }
 
   uint32 EstimateSendWindowSize() const {
-    return send_position_[0] - recv_position_[0];
+    return static_cast<uint32>(send_position_[0] - recv_position_[0]);
   }
 
  private:
@@ -566,12 +566,13 @@
           LOG(LS_VERBOSE) << "Flow Controlled";
         }
       } else {
-        sent = tosend = 0;
+        sent = static_cast<int>(tosend = 0);
       }
     } while (sent > 0);
     // At this point, we've filled up the available space in the send queue.
 
-    int message_queue_size = talk_base::Thread::Current()->size();
+    int message_queue_size =
+        static_cast<int>(talk_base::Thread::Current()->size());
     // The message queue will always have at least 2 messages, an RCLOCK and
     // an LCLOCK, since they are added back on the delay queue at the same time
     // they are pulled off and therefore are never really removed.
diff --git a/talk/p2p/base/relayport.cc b/talk/p2p/base/relayport.cc
index 7abe942..0cd40e5 100644
--- a/talk/p2p/base/relayport.cc
+++ b/talk/p2p/base/relayport.cc
@@ -349,7 +349,7 @@
   }
   // The caller of the function is expecting the number of user data bytes,
   // rather than the size of the packet.
-  return size;
+  return static_cast<int>(size);
 }
 
 int RelayPort::SetOption(talk_base::Socket::Option opt, int value) {
diff --git a/talk/p2p/base/relayserver.cc b/talk/p2p/base/relayserver.cc
index 0470e9e..2c05f23 100644
--- a/talk/p2p/base/relayserver.cc
+++ b/talk/p2p/base/relayserver.cc
@@ -162,7 +162,7 @@
 }
 
 int RelayServer::GetConnectionCount() const {
-  return connections_.size();
+  return static_cast<int>(connections_.size());
 }
 
 talk_base::SocketAddressPair RelayServer::GetConnection(int connection) const {
diff --git a/talk/p2p/base/relayserver_unittest.cc b/talk/p2p/base/relayserver_unittest.cc
index 349fe08..7580e45 100644
--- a/talk/p2p/base/relayserver_unittest.cc
+++ b/talk/p2p/base/relayserver_unittest.cc
@@ -95,12 +95,12 @@
   void Send1(const StunMessage* msg) {
     talk_base::ByteBuffer buf;
     msg->Write(&buf);
-    SendRaw1(buf.Data(), buf.Length());
+    SendRaw1(buf.Data(), static_cast<int>(buf.Length()));
   }
   void Send2(const StunMessage* msg) {
     talk_base::ByteBuffer buf;
     msg->Write(&buf);
-    SendRaw2(buf.Data(), buf.Length());
+    SendRaw2(buf.Data(), static_cast<int>(buf.Length()));
   }
   void SendRaw1(const char* data, int len) {
     return Send(client1_.get(), data, len, server_int_addr);
@@ -192,7 +192,7 @@
 TEST_F(RelayServerTest, TestBadRequest) {
   talk_base::scoped_ptr<StunMessage> res;
 
-  SendRaw1(bad, std::strlen(bad));
+  SendRaw1(bad, static_cast<int>(std::strlen(bad)));
   res.reset(Receive1());
 
   ASSERT_TRUE(!res);
@@ -335,7 +335,7 @@
   Allocate();
   Bind();
 
-  SendRaw1(bad, std::strlen(bad));
+  SendRaw1(bad, static_cast<int>(std::strlen(bad)));
   EXPECT_TRUE(Receive1() == NULL);
   EXPECT_TRUE(Receive2() == NULL);
 }
@@ -481,7 +481,7 @@
 
     Send1(req.get());
     EXPECT_EQ(msg1, ReceiveRaw2());
-    SendRaw2(msg2, std::strlen(msg2));
+    SendRaw2(msg2, static_cast<int>(std::strlen(msg2)));
     res.reset(Receive1());
 
     ASSERT_TRUE(res);
@@ -534,6 +534,6 @@
   EXPECT_EQ("Operation Not Supported", err->reason());
 
   // Also verify that traffic from the external client is ignored.
-  SendRaw2(msg2, std::strlen(msg2));
+  SendRaw2(msg2, static_cast<int>(std::strlen(msg2)));
   EXPECT_TRUE(ReceiveRaw1().empty());
 }
diff --git a/talk/p2p/base/session_unittest.cc b/talk/p2p/base/session_unittest.cc
index 73933bb..69028ac 100644
--- a/talk/p2p/base/session_unittest.cc
+++ b/talk/p2p/base/session_unittest.cc
@@ -921,7 +921,7 @@
   }
 
   uint32 sent_stanza_count() const {
-    return sent_stanzas.size();
+    return static_cast<uint32>(sent_stanzas.size());
   }
 
   const buzz::XmlElement* stanza() const {
diff --git a/talk/p2p/base/stun.cc b/talk/p2p/base/stun.cc
index 06a71a1..2a0f6d9 100644
--- a/talk/p2p/base/stun.cc
+++ b/talk/p2p/base/stun.cc
@@ -98,7 +98,7 @@
   if (attr_length % 4 != 0) {
     attr_length += (4 - (attr_length % 4));
   }
-  length_ += attr_length + 4;
+  length_ += static_cast<uint16>(attr_length + 4);
   return true;
 }
 
@@ -203,7 +203,8 @@
     //     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     //     |0 0|     STUN Message Type     |         Message Length        |
     //     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-    talk_base::SetBE16(temp_data.get() + 2, new_adjusted_len);
+    talk_base::SetBE16(temp_data.get() + 2,
+                       static_cast<uint16>(new_adjusted_len));
   }
 
   char hmac[kStunMessageIntegritySize];
@@ -238,8 +239,8 @@
   if (!Write(&buf))
     return false;
 
-  int msg_len_for_hmac = buf.Length() -
-      kStunAttributeHeaderSize - msg_integrity_attr->length();
+  int msg_len_for_hmac = static_cast<int>(
+      buf.Length() - kStunAttributeHeaderSize - msg_integrity_attr->length());
   char hmac[kStunMessageIntegritySize];
   size_t ret = talk_base::ComputeHmac(talk_base::DIGEST_SHA_1,
                                       key, keylen,
@@ -299,8 +300,8 @@
   if (!Write(&buf))
     return false;
 
-  int msg_len_for_crc32 = buf.Length() -
-      kStunAttributeHeaderSize - fingerprint_attr->length();
+  int msg_len_for_crc32 = static_cast<int>(
+      buf.Length() - kStunAttributeHeaderSize - fingerprint_attr->length());
   uint32 c = talk_base::ComputeCrc32(buf.Data(), msg_len_for_crc32);
 
   // Insert the correct CRC-32, XORed with a constant, into the attribute.
@@ -380,7 +381,7 @@
 
   for (size_t i = 0; i < attrs_->size(); ++i) {
     buf->WriteUInt16((*attrs_)[i]->type());
-    buf->WriteUInt16((*attrs_)[i]->length());
+    buf->WriteUInt16(static_cast<uint16>((*attrs_)[i]->length()));
     if (!(*attrs_)[i]->Write(buf))
       return false;
   }
@@ -408,7 +409,8 @@
 
 StunAttribute* StunMessage::CreateAttribute(int type, size_t length) /*const*/ {
   StunAttributeValueType value_type = GetAttributeValueType(type);
-  return StunAttribute::Create(value_type, type, length, this);
+  return StunAttribute::Create(value_type, type,
+                               static_cast<uint16>(length), this);
 }
 
 const StunAttribute* StunMessage::GetAttribute(int type) const {
@@ -767,7 +769,7 @@
 void StunByteStringAttribute::SetBytes(char* bytes, size_t length) {
   delete [] bytes_;
   bytes_ = bytes;
-  SetLength(length);
+  SetLength(static_cast<uint16>(length));
 }
 
 StunErrorCodeAttribute::StunErrorCodeAttribute(uint16 type, int code,
diff --git a/talk/p2p/base/stun_unittest.cc b/talk/p2p/base/stun_unittest.cc
index 43db959..6a5bcd9 100644
--- a/talk/p2p/base/stun_unittest.cc
+++ b/talk/p2p/base/stun_unittest.cc
@@ -924,7 +924,7 @@
   talk_base::ByteBuffer out;
   EXPECT_TRUE(msg.Write(&out));
   ASSERT_EQ(out.Length(), sizeof(kStunMessageWithIPv6MappedAddress));
-  int len1 = out.Length();
+  int len1 = static_cast<int>(out.Length());
   std::string bytes;
   out.ReadString(&bytes, len1);
   ASSERT_EQ(0, std::memcmp(bytes.c_str(),
@@ -955,7 +955,7 @@
   talk_base::ByteBuffer out;
   EXPECT_TRUE(msg.Write(&out));
   ASSERT_EQ(out.Length(), sizeof(kStunMessageWithIPv4MappedAddress));
-  int len1 = out.Length();
+  int len1 = static_cast<int>(out.Length());
   std::string bytes;
   out.ReadString(&bytes, len1);
   ASSERT_EQ(0, std::memcmp(bytes.c_str(),
@@ -986,7 +986,7 @@
   talk_base::ByteBuffer out;
   EXPECT_TRUE(msg.Write(&out));
   ASSERT_EQ(out.Length(), sizeof(kStunMessageWithIPv6XorMappedAddress));
-  int len1 = out.Length();
+  int len1 = static_cast<int>(out.Length());
   std::string bytes;
   out.ReadString(&bytes, len1);
   ASSERT_EQ(0, std::memcmp(bytes.c_str(),
@@ -1017,7 +1017,7 @@
   talk_base::ByteBuffer out;
   EXPECT_TRUE(msg.Write(&out));
   ASSERT_EQ(out.Length(), sizeof(kStunMessageWithIPv4XorMappedAddress));
-  int len1 = out.Length();
+  int len1 = static_cast<int>(out.Length());
   std::string bytes;
   out.ReadString(&bytes, len1);
   ASSERT_EQ(0, std::memcmp(bytes.c_str(),
diff --git a/talk/p2p/base/stunserver_unittest.cc b/talk/p2p/base/stunserver_unittest.cc
index 1d2cd0d..7f4db3b 100644
--- a/talk/p2p/base/stunserver_unittest.cc
+++ b/talk/p2p/base/stunserver_unittest.cc
@@ -58,7 +58,7 @@
   void Send(const StunMessage& msg) {
     talk_base::ByteBuffer buf;
     msg.Write(&buf);
-    Send(buf.Data(), buf.Length());
+    Send(buf.Data(), static_cast<int>(buf.Length()));
   }
   void Send(const char* buf, int len) {
     client_->SendTo(buf, len, server_addr);
@@ -113,7 +113,7 @@
   const char* bad = "this is a completely nonsensical message whose only "
                     "purpose is to make the parser go 'ack'.  it doesn't "
                     "look anything like a normal stun message";
-  Send(bad, std::strlen(bad));
+  Send(bad, static_cast<int>(std::strlen(bad)));
 
   StunMessage* msg = Receive();
   ASSERT_TRUE(msg == NULL);
diff --git a/talk/p2p/base/turnport.cc b/talk/p2p/base/turnport.cc
index 9fad274..6c1165d 100644
--- a/talk/p2p/base/turnport.cc
+++ b/talk/p2p/base/turnport.cc
@@ -332,7 +332,7 @@
 
   // The caller of the function is expecting the number of user data bytes,
   // rather than the size of the packet.
-  return size;
+  return static_cast<int>(size);
 }
 
 void TurnPort::OnReadPacket(talk_base::AsyncPacketSocket* socket,
@@ -901,7 +901,7 @@
   } else {
     // If the channel is bound, we can send the data as a Channel Message.
     buf.WriteUInt16(channel_id_);
-    buf.WriteUInt16(size);
+    buf.WriteUInt16(static_cast<uint16>(size));
     buf.WriteBytes(reinterpret_cast<const char*>(data), size);
   }
   return port_->Send(buf.Data(), buf.Length());
diff --git a/talk/p2p/base/turnserver.cc b/talk/p2p/base/turnserver.cc
index e82455a..565a087 100644
--- a/talk/p2p/base/turnserver.cc
+++ b/talk/p2p/base/turnserver.cc
@@ -847,7 +847,7 @@
     // There is a channel bound to this address. Send as a channel message.
     talk_base::ByteBuffer buf;
     buf.WriteUInt16(channel->id());
-    buf.WriteUInt16(size);
+    buf.WriteUInt16(static_cast<uint16>(size));
     buf.WriteBytes(data, size);
     server_->Send(&conn_, buf);
   } else if (HasPermission(addr.ipaddr())) {
diff --git a/talk/session/media/channel.cc b/talk/session/media/channel.cc
index 1048cdf..66f4054 100644
--- a/talk/session/media/channel.cc
+++ b/talk/session/media/channel.cc
@@ -615,7 +615,7 @@
 bool BaseChannel::PacketIsRtcp(const TransportChannel* channel,
                                const char* data, size_t len) {
   return (channel == rtcp_transport_channel_ ||
-          rtcp_mux_filter_.DemuxRtcp(data, len));
+          rtcp_mux_filter_.DemuxRtcp(data, static_cast<int>(len)));
 }
 
 bool BaseChannel::SendPacket(bool rtcp, talk_base::Buffer* packet) {
@@ -669,9 +669,10 @@
   if (srtp_filter_.IsActive()) {
     bool res;
     char* data = packet->data();
-    int len = packet->length();
+    int len = static_cast<int>(packet->length());
     if (!rtcp) {
-      res = srtp_filter_.ProtectRtp(data, len, packet->capacity(), &len);
+      res = srtp_filter_.ProtectRtp(data, len,
+                                    static_cast<int>(packet->capacity()), &len);
       if (!res) {
         int seq_num = -1;
         uint32 ssrc = 0;
@@ -683,7 +684,9 @@
         return false;
       }
     } else {
-      res = srtp_filter_.ProtectRtcp(data, len, packet->capacity(), &len);
+      res = srtp_filter_.ProtectRtcp(data, len,
+                                     static_cast<int>(packet->capacity()),
+                                     &len);
       if (!res) {
         int type = -1;
         GetRtcpType(data, len, &type);
@@ -761,7 +764,7 @@
   // Unprotect the packet, if needed.
   if (srtp_filter_.IsActive()) {
     char* data = packet->data();
-    int len = packet->length();
+    int len = static_cast<int>(packet->length());
     bool res;
     if (!rtcp) {
       res = srtp_filter_.UnprotectRtp(data, len, &len);
@@ -1009,15 +1012,21 @@
   }
 
   if (rtcp_channel) {
-    ret = srtp_filter_.SetRtcpParams(selected_cipher,
-      &(*send_key)[0], send_key->size(),
-      selected_cipher,
-      &(*recv_key)[0], recv_key->size());
+    ret = srtp_filter_.SetRtcpParams(
+        selected_cipher,
+        &(*send_key)[0],
+        static_cast<int>(send_key->size()),
+        selected_cipher,
+        &(*recv_key)[0],
+        static_cast<int>(recv_key->size()));
   } else {
-    ret = srtp_filter_.SetRtpParams(selected_cipher,
-      &(*send_key)[0], send_key->size(),
-      selected_cipher,
-      &(*recv_key)[0], recv_key->size());
+    ret = srtp_filter_.SetRtpParams(
+        selected_cipher,
+        &(*send_key)[0],
+        static_cast<int>(send_key->size()),
+        selected_cipher,
+        &(*recv_key)[0],
+        static_cast<int>(recv_key->size()));
   }
 
   if (!ret)
diff --git a/talk/session/media/channel_unittest.cc b/talk/session/media/channel_unittest.cc
index c9918f8..378f552 100644
--- a/talk/session/media/channel_unittest.cc
+++ b/talk/session/media/channel_unittest.cc
@@ -377,64 +377,78 @@
   }
 
   bool SendRtp1() {
-    return media_channel1_->SendRtp(rtp_packet_.c_str(), rtp_packet_.size());
+    return media_channel1_->SendRtp(rtp_packet_.c_str(),
+                                    static_cast<int>(rtp_packet_.size()));
   }
   bool SendRtp2() {
-    return media_channel2_->SendRtp(rtp_packet_.c_str(), rtp_packet_.size());
+    return media_channel2_->SendRtp(rtp_packet_.c_str(),
+                                    static_cast<int>(rtp_packet_.size()));
   }
   bool SendRtcp1() {
-    return media_channel1_->SendRtcp(rtcp_packet_.c_str(), rtcp_packet_.size());
+    return media_channel1_->SendRtcp(rtcp_packet_.c_str(),
+                                     static_cast<int>(rtcp_packet_.size()));
   }
   bool SendRtcp2() {
-    return media_channel2_->SendRtcp(rtcp_packet_.c_str(), rtcp_packet_.size());
+    return media_channel2_->SendRtcp(rtcp_packet_.c_str(),
+                                     static_cast<int>(rtcp_packet_.size()));
   }
   // Methods to send custom data.
   bool SendCustomRtp1(uint32 ssrc, int sequence_number) {
     std::string data(CreateRtpData(ssrc, sequence_number));
-    return media_channel1_->SendRtp(data.c_str(), data.size());
+    return media_channel1_->SendRtp(data.c_str(),
+                                    static_cast<int>(data.size()));
   }
   bool SendCustomRtp2(uint32 ssrc, int sequence_number) {
     std::string data(CreateRtpData(ssrc, sequence_number));
-    return media_channel2_->SendRtp(data.c_str(), data.size());
+    return media_channel2_->SendRtp(data.c_str(),
+                                    static_cast<int>(data.size()));
   }
   bool SendCustomRtcp1(uint32 ssrc) {
     std::string data(CreateRtcpData(ssrc));
-    return media_channel1_->SendRtcp(data.c_str(), data.size());
+    return media_channel1_->SendRtcp(data.c_str(),
+                                     static_cast<int>(data.size()));
   }
   bool SendCustomRtcp2(uint32 ssrc) {
     std::string data(CreateRtcpData(ssrc));
-    return media_channel2_->SendRtcp(data.c_str(), data.size());
+    return media_channel2_->SendRtcp(data.c_str(),
+                                     static_cast<int>(data.size()));
   }
   bool CheckRtp1() {
-    return media_channel1_->CheckRtp(rtp_packet_.c_str(), rtp_packet_.size());
+    return media_channel1_->CheckRtp(rtp_packet_.c_str(),
+                                     static_cast<int>(rtp_packet_.size()));
   }
   bool CheckRtp2() {
-    return media_channel2_->CheckRtp(rtp_packet_.c_str(), rtp_packet_.size());
+    return media_channel2_->CheckRtp(rtp_packet_.c_str(),
+                                     static_cast<int>(rtp_packet_.size()));
   }
   bool CheckRtcp1() {
     return media_channel1_->CheckRtcp(rtcp_packet_.c_str(),
-                                      rtcp_packet_.size());
+                                      static_cast<int>(rtcp_packet_.size()));
   }
   bool CheckRtcp2() {
     return media_channel2_->CheckRtcp(rtcp_packet_.c_str(),
-                                      rtcp_packet_.size());
+                                      static_cast<int>(rtcp_packet_.size()));
   }
   // Methods to check custom data.
   bool CheckCustomRtp1(uint32 ssrc, int sequence_number) {
     std::string data(CreateRtpData(ssrc, sequence_number));
-    return media_channel1_->CheckRtp(data.c_str(), data.size());
+    return media_channel1_->CheckRtp(data.c_str(),
+                                     static_cast<int>(data.size()));
   }
   bool CheckCustomRtp2(uint32 ssrc, int sequence_number) {
     std::string data(CreateRtpData(ssrc, sequence_number));
-    return media_channel2_->CheckRtp(data.c_str(), data.size());
+    return media_channel2_->CheckRtp(data.c_str(),
+                                     static_cast<int>(data.size()));
   }
   bool CheckCustomRtcp1(uint32 ssrc) {
     std::string data(CreateRtcpData(ssrc));
-    return media_channel1_->CheckRtcp(data.c_str(), data.size());
+    return media_channel1_->CheckRtcp(data.c_str(),
+                                      static_cast<int>(data.size()));
   }
   bool CheckCustomRtcp2(uint32 ssrc) {
     std::string data(CreateRtcpData(ssrc));
-    return media_channel2_->CheckRtcp(data.c_str(), data.size());
+    return media_channel2_->CheckRtcp(data.c_str(),
+                                      static_cast<int>(data.size()));
   }
   std::string CreateRtpData(uint32 ssrc, int sequence_number) {
     std::string data(rtp_packet_);
@@ -1744,7 +1758,7 @@
 
   void TestSrtpError() {
     static const unsigned char kBadPacket[] = {
-      0x90, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01
+      0x84, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01
     };
     CreateChannels(RTCP | SECURE, RTCP | SECURE);
     EXPECT_FALSE(channel1_->secure());
diff --git a/talk/session/media/channelmanager.cc b/talk/session/media/channelmanager.cc
index 063a6c8..88e7f79 100644
--- a/talk/session/media/channelmanager.cc
+++ b/talk/session/media/channelmanager.cc
@@ -713,26 +713,6 @@
   return ret;
 }
 
-bool ChannelManager::SetVideoCapturer(VideoCapturer* capturer) {
-  bool ret = true;
-  if (initialized_) {
-    ret = worker_thread_->Invoke<bool>(
-        Bind(&MediaEngineInterface::SetVideoCapturer,
-             media_engine_.get(), capturer));
-  }
-  return ret;
-}
-
-bool ChannelManager::SetVideoCapture(bool capture) {
-  bool ret = initialized_ && worker_thread_->Invoke<bool>(
-      Bind(&MediaEngineInterface::SetVideoCapture,
-           media_engine_.get(), capture));
-  if (ret) {
-    capturing_ = capture;
-  }
-  return ret;
-}
-
 void ChannelManager::SetVoiceLogging(int level, const char* filter) {
   if (initialized_) {
     worker_thread_->Invoke<void>(
diff --git a/talk/session/media/channelmanager.h b/talk/session/media/channelmanager.h
index 6d53c63..e5e6e44 100644
--- a/talk/session/media/channelmanager.h
+++ b/talk/session/media/channelmanager.h
@@ -166,8 +166,6 @@
   // Sets the externally provided video capturer. The ssrc is the ssrc of the
   // (video) stream for which the video capturer should be set.
   bool SetVideoCapturer(VideoCapturer* capturer);
-  // Starts and stops the local camera and renders it to the local renderer.
-  bool SetVideoCapture(bool capture);
   bool capturing() const { return capturing_; }
 
   // Configures the logging output of the mediaengine(s).
diff --git a/talk/session/media/channelmanager_unittest.cc b/talk/session/media/channelmanager_unittest.cc
index 20db58d..32321eb 100644
--- a/talk/session/media/channelmanager_unittest.cc
+++ b/talk/session/media/channelmanager_unittest.cc
@@ -524,20 +524,6 @@
   EXPECT_STREQ("test-video", fme_->video_logfilter().c_str());
 }
 
-// Test that SetVideoCapture passes through the right value.
-TEST_F(ChannelManagerTest, SetVideoCapture) {
-  // Should fail until we are initialized.
-  EXPECT_FALSE(fme_->capture());
-  EXPECT_FALSE(cm_->SetVideoCapture(true));
-  EXPECT_FALSE(fme_->capture());
-  EXPECT_TRUE(cm_->Init());
-  EXPECT_FALSE(fme_->capture());
-  EXPECT_TRUE(cm_->SetVideoCapture(true));
-  EXPECT_TRUE(fme_->capture());
-  EXPECT_TRUE(cm_->SetVideoCapture(false));
-  EXPECT_FALSE(fme_->capture());
-}
-
 // Test that the Video/Voice Processors register and unregister
 TEST_F(ChannelManagerTest, RegisterProcessors) {
   cricket::FakeMediaProcessor fmp;
diff --git a/talk/session/media/mediasession.cc b/talk/session/media/mediasession.cc
index 3d00418..2c087e9 100644
--- a/talk/session/media/mediasession.cc
+++ b/talk/session/media/mediasession.cc
@@ -92,7 +92,7 @@
 #ifdef HAVE_SRTP
 static bool AddCryptoParams(const std::string& cipher_suite,
                             CryptoParamsVec *out) {
-  int size = out->size();
+  int size = static_cast<int>(out->size());
 
   out->resize(size + 1);
   return CreateCryptoParams(size, cipher_suite, &out->at(size));
diff --git a/talk/session/media/mediasession_unittest.cc b/talk/session/media/mediasession_unittest.cc
index 5b0a859..6e04915 100644
--- a/talk/session/media/mediasession_unittest.cc
+++ b/talk/session/media/mediasession_unittest.cc
@@ -329,11 +329,11 @@
     }
     ASSERT_TRUE(desc.get() != NULL);
     const cricket::MediaContentDescription* audio_media_desc =
-        static_cast<const cricket::MediaContentDescription*> (
+        static_cast<const cricket::MediaContentDescription*>(
             desc.get()->GetContentDescriptionByName("audio"));
     ASSERT_TRUE(audio_media_desc != NULL);
     const cricket::MediaContentDescription* video_media_desc =
-        static_cast<const cricket::MediaContentDescription*> (
+        static_cast<const cricket::MediaContentDescription*>(
             desc.get()->GetContentDescriptionByName("video"));
     ASSERT_TRUE(video_media_desc != NULL);
     EXPECT_TRUE(CompareCryptoParams(audio_media_desc->cryptos(),
@@ -345,7 +345,7 @@
     // Verify the selected crypto is one from the reference audio
     // media content.
     const cricket::MediaContentDescription* ref_audio_media_desc =
-        static_cast<const cricket::MediaContentDescription*> (
+        static_cast<const cricket::MediaContentDescription*>(
             ref_desc.get()->GetContentDescriptionByName("audio"));
     bool found = false;
     for (size_t i = 0; i < ref_audio_media_desc->cryptos().size(); ++i) {
@@ -394,7 +394,7 @@
     const cricket::ContentDescription* description = content->description;
     ASSERT(description != NULL);
     const cricket::AudioContentDescription* audio_content_desc =
-        static_cast<const cricket::AudioContentDescription*> (description);
+        static_cast<const cricket::AudioContentDescription*>(description);
     ASSERT(audio_content_desc != NULL);
     for (size_t i = 0; i < audio_content_desc->codecs().size(); ++i) {
       if (audio_content_desc->codecs()[i].name == "CN")
@@ -1751,7 +1751,7 @@
   audio_media_desc = static_cast<const cricket::MediaContentDescription*>(
       offer->GetContentDescriptionByName("audio"));
   ASSERT_TRUE(audio_media_desc != NULL);
-  video_media_desc = static_cast<const cricket::MediaContentDescription*> (
+  video_media_desc = static_cast<const cricket::MediaContentDescription*>(
       offer->GetContentDescriptionByName("video"));
   ASSERT_TRUE(video_media_desc != NULL);
   EXPECT_EQ(2u, audio_media_desc->cryptos().size());
@@ -1768,10 +1768,10 @@
   answer.reset(f2_.CreateAnswer(offer.get(), options, NULL));
   ASSERT_TRUE(answer.get() != NULL);
 
-  audio_media_desc = static_cast<const cricket::MediaContentDescription*> (
+  audio_media_desc = static_cast<const cricket::MediaContentDescription*>(
       answer->GetContentDescriptionByName("audio"));
   ASSERT_TRUE(audio_media_desc != NULL);
-  video_media_desc = static_cast<const cricket::MediaContentDescription*> (
+  video_media_desc = static_cast<const cricket::MediaContentDescription*>(
       answer->GetContentDescriptionByName("video"));
   ASSERT_TRUE(video_media_desc != NULL);
   EXPECT_EQ(1u, audio_media_desc->cryptos().size());
@@ -1789,10 +1789,10 @@
   answer.reset(f2_.CreateAnswer(offer.get(), options, NULL));
   ASSERT_TRUE(answer.get() != NULL);
 
-  audio_media_desc = static_cast<const cricket::MediaContentDescription*> (
+  audio_media_desc = static_cast<const cricket::MediaContentDescription*>(
       answer->GetContentDescriptionByName("audio"));
   ASSERT_TRUE(audio_media_desc != NULL);
-  video_media_desc = static_cast<const cricket::MediaContentDescription*> (
+  video_media_desc = static_cast<const cricket::MediaContentDescription*>(
       answer->GetContentDescriptionByName("video"));
   ASSERT_TRUE(video_media_desc != NULL);
   EXPECT_TRUE(audio_media_desc->cryptos().empty());
diff --git a/talk/xmpp/jid.cc b/talk/xmpp/jid.cc
index 66d5cde..45838710c 100644
--- a/talk/xmpp/jid.cc
+++ b/talk/xmpp/jid.cc
@@ -322,7 +322,7 @@
     std::string* buf, bool* valid) {
   *valid = false;
 
-  int start_len = buf->length();
+  int start_len = static_cast<int>(buf->length());
   for (std::string::const_iterator i = start; i < end; ++i) {
     bool char_valid = true;
     unsigned char ch = *i;
@@ -338,7 +338,7 @@
     }
   }
 
-  int count = buf->length() - start_len;
+  int count = static_cast<int>(buf->length() - start_len);
   if (count == 0) {
     return;
   }
diff --git a/talk/xmpp/xmppclient.cc b/talk/xmpp/xmppclient.cc
index f7d7cf2..9c49a9c 100644
--- a/talk/xmpp/xmppclient.cc
+++ b/talk/xmpp/xmppclient.cc
@@ -379,7 +379,7 @@
       return;
 
 //#ifdef _DEBUG
-    client_->SignalLogInput(bytes, bytes_read);
+    client_->SignalLogInput(bytes, static_cast<int>(bytes_read));
 //#endif
 
     engine_->HandleInput(bytes, bytes_read);
@@ -403,7 +403,7 @@
 
 void XmppClient::Private::WriteOutput(const char* bytes, size_t len) {
 //#ifdef _DEBUG
-  client_->SignalLogOutput(bytes, len);
+  client_->SignalLogOutput(bytes, static_cast<int>(len));
 //#endif
 
   socket_->Write(bytes, len);