Revert of Add EncodedImageCallback::OnEncodedImage(). (patchset #13 id:280001 of https://codereview.webrtc.org/2089773002/ )

Reason for revert:
broke internal tests

Original issue's description:
> Add EncodedImageCallback::OnEncodedImage().
>
> OnEncodedImage() is going to replace Encoded(), which is deprecated now.
> The new OnEncodedImage() returns Result struct that contains frame_id,
> which tells the encoder RTP timestamp for the frame.
>
> BUG=chromium:621691
> R=niklas.enbom@webrtc.org, sprang@webrtc.org, stefan@webrtc.org
>
> Committed: https://crrev.com/ad34dbe934d47f88011045671b4aea00dbd5a795
> Cr-Commit-Position: refs/heads/master@{#13613}

TBR=pbos@webrtc.org,mflodman@webrtc.org,sprang@webrtc.org,stefan@webrtc.org,niklas.enbom@webrtc.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=chromium:621691

Review-Url: https://codereview.webrtc.org/2206743002
Cr-Commit-Position: refs/heads/master@{#13614}
diff --git a/webrtc/modules/rtp_rtcp/include/rtp_rtcp.h b/webrtc/modules/rtp_rtcp/include/rtp_rtcp.h
index 85d14bd..f0d2342 100644
--- a/webrtc/modules/rtp_rtcp/include/rtp_rtcp.h
+++ b/webrtc/modules/rtp_rtcp/include/rtp_rtcp.h
@@ -225,21 +225,8 @@
   // |payload_size|  - size of payload buffer to send
   // |fragmentation| - fragmentation offset data for fragmented frames such
   //                   as layers or RED
-  // |transport_frame_id_out| - set to RTP timestamp.
-  // Returns true on success.
-
-  virtual bool SendOutgoingData(FrameType frame_type,
-                                int8_t payload_type,
-                                uint32_t timestamp,
-                                int64_t capture_time_ms,
-                                const uint8_t* payload_data,
-                                size_t payload_size,
-                                const RTPFragmentationHeader* fragmentation,
-                                const RTPVideoHeader* rtp_video_header,
-                                uint32_t* transport_frame_id_out) = 0;
-
-  // Deprecated version of the method above.
-  int32_t SendOutgoingData(
+  // Returns -1 on failure else 0.
+  virtual int32_t SendOutgoingData(
       FrameType frame_type,
       int8_t payload_type,
       uint32_t timestamp,
@@ -247,14 +234,7 @@
       const uint8_t* payload_data,
       size_t payload_size,
       const RTPFragmentationHeader* fragmentation = nullptr,
-      const RTPVideoHeader* rtp_video_header = nullptr) {
-    return SendOutgoingData(frame_type, payload_type, timestamp,
-                            capture_time_ms, payload_data, payload_size,
-                            fragmentation, rtp_video_header,
-                            /*frame_id_out=*/nullptr)
-               ? 0
-               : -1;
-  }
+      const RTPVideoHeader* rtp_video_header = nullptr) = 0;
 
   virtual bool TimeToSendPacket(uint32_t ssrc,
                                 uint16_t sequence_number,
diff --git a/webrtc/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h b/webrtc/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h
index 6834da3..8a44159 100644
--- a/webrtc/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h
+++ b/webrtc/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h
@@ -98,16 +98,15 @@
   MOCK_CONST_METHOD0(GetVideoBitrateObserver, BitrateStatisticsObserver*(void));
   MOCK_CONST_METHOD1(EstimatedReceiveBandwidth,
                      int(uint32_t* available_bandwidth));
-  MOCK_METHOD9(SendOutgoingData,
-               bool(FrameType frame_type,
-                    int8_t payload_type,
-                    uint32_t timestamp,
-                    int64_t capture_time_ms,
-                    const uint8_t* payload_data,
-                    size_t payload_size,
-                    const RTPFragmentationHeader* fragmentation,
-                    const RTPVideoHeader* rtp_video_header,
-                    uint32_t* frame_id_out));
+  MOCK_METHOD8(SendOutgoingData,
+               int32_t(FrameType frame_type,
+                       int8_t payload_type,
+                       uint32_t timestamp,
+                       int64_t capture_time_ms,
+                       const uint8_t* payload_data,
+                       size_t payload_size,
+                       const RTPFragmentationHeader* fragmentation,
+                       const RTPVideoHeader* rtp_video_header));
   MOCK_METHOD5(TimeToSendPacket,
                bool(uint32_t ssrc,
                     uint16_t sequence_number,
diff --git a/webrtc/modules/rtp_rtcp/source/nack_rtx_unittest.cc b/webrtc/modules/rtp_rtcp/source/nack_rtx_unittest.cc
index 052060c..a0e0cec 100644
--- a/webrtc/modules/rtp_rtcp/source/nack_rtx_unittest.cc
+++ b/webrtc/modules/rtp_rtcp/source/nack_rtx_unittest.cc
@@ -264,9 +264,9 @@
     uint32_t timestamp = 3000;
     uint16_t nack_list[kVideoNackListSize];
     for (int frame = 0; frame < kNumFrames; ++frame) {
-      EXPECT_TRUE(rtp_rtcp_module_->SendOutgoingData(
-          webrtc::kVideoFrameDelta, kPayloadType, timestamp, timestamp / 90,
-          payload_data, payload_data_length, nullptr, nullptr, nullptr));
+      EXPECT_EQ(0, rtp_rtcp_module_->SendOutgoingData(
+                       webrtc::kVideoFrameDelta, kPayloadType, timestamp,
+                       timestamp / 90, payload_data, payload_data_length));
       // Min required delay until retransmit = 5 + RTT ms (RTT = 0).
       fake_clock.AdvanceTimeMilliseconds(5);
       int length = BuildNackList(nack_list);
@@ -310,9 +310,9 @@
   // Send 30 frames which at the default size is roughly what we need to get
   // enough packets.
   for (int frame = 0; frame < kNumFrames; ++frame) {
-    EXPECT_TRUE(rtp_rtcp_module_->SendOutgoingData(
-        webrtc::kVideoFrameDelta, kPayloadType, timestamp, timestamp / 90,
-        payload_data, payload_data_length, nullptr, nullptr, nullptr));
+    EXPECT_EQ(0, rtp_rtcp_module_->SendOutgoingData(
+                     webrtc::kVideoFrameDelta, kPayloadType, timestamp,
+                     timestamp / 90, payload_data, payload_data_length));
     // Prepare next frame.
     timestamp += 3000;
     fake_clock.AdvanceTimeMilliseconds(33);
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.cc b/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
index e58ac3c..190136d 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
@@ -384,7 +384,7 @@
   return rtp_sender_.SendingMedia();
 }
 
-bool ModuleRtpRtcpImpl::SendOutgoingData(
+int32_t ModuleRtpRtcpImpl::SendOutgoingData(
     FrameType frame_type,
     int8_t payload_type,
     uint32_t time_stamp,
@@ -392,8 +392,7 @@
     const uint8_t* payload_data,
     size_t payload_size,
     const RTPFragmentationHeader* fragmentation,
-    const RTPVideoHeader* rtp_video_header,
-    uint32_t* transport_frame_id_out) {
+    const RTPVideoHeader* rtp_video_header) {
   rtcp_sender_.SetLastRtpTime(time_stamp, capture_time_ms);
   // Make sure an RTCP report isn't queued behind a key frame.
   if (rtcp_sender_.TimeToSendRTCPReport(kVideoFrameKey == frame_type)) {
@@ -401,7 +400,7 @@
   }
   return rtp_sender_.SendOutgoingData(
       frame_type, payload_type, time_stamp, capture_time_ms, payload_data,
-      payload_size, fragmentation, rtp_video_header, transport_frame_id_out);
+      payload_size, fragmentation, rtp_video_header);
 }
 
 bool ModuleRtpRtcpImpl::TimeToSendPacket(uint32_t ssrc,
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h b/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h
index fd44a59..e8fc545 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h
+++ b/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h
@@ -112,15 +112,15 @@
 
   // Used by the codec module to deliver a video or audio frame for
   // packetization.
-  bool SendOutgoingData(FrameType frame_type,
-                        int8_t payload_type,
-                        uint32_t time_stamp,
-                        int64_t capture_time_ms,
-                        const uint8_t* payload_data,
-                        size_t payload_size,
-                        const RTPFragmentationHeader* fragmentation,
-                        const RTPVideoHeader* rtp_video_header,
-                        uint32_t* transport_frame_id_out) override;
+  int32_t SendOutgoingData(
+      FrameType frame_type,
+      int8_t payload_type,
+      uint32_t time_stamp,
+      int64_t capture_time_ms,
+      const uint8_t* payload_data,
+      size_t payload_size,
+      const RTPFragmentationHeader* fragmentation = NULL,
+      const RTPVideoHeader* rtp_video_header = NULL) override;
 
   bool TimeToSendPacket(uint32_t ssrc,
                         uint16_t sequence_number,
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc b/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
index acbe56c..9dfcc13 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
@@ -206,9 +206,14 @@
         kRtpVideoVp8, {vp8_header}};
 
     const uint8_t payload[100] = {0};
-    EXPECT_EQ(true, module->impl_->SendOutgoingData(
-                     kVideoFrameKey, codec_.plType, 0, 0, payload,
-                     sizeof(payload), nullptr, &rtp_video_header, nullptr));
+    EXPECT_EQ(0, module->impl_->SendOutgoingData(kVideoFrameKey,
+                                                 codec_.plType,
+                                                 0,
+                                                 0,
+                                                 payload,
+                                                 sizeof(payload),
+                                                 NULL,
+                                                 &rtp_video_header));
   }
 
   void IncomingRtcpNack(const RtpRtcpModule* module, uint16_t sequence_number) {
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_sender.cc b/webrtc/modules/rtp_rtcp/source/rtp_sender.cc
index 58dbc3e..2fe80ae 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_sender.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_sender.cc
@@ -448,15 +448,14 @@
   return video_rotation_active_;
 }
 
-bool RTPSender::SendOutgoingData(FrameType frame_type,
-                                 int8_t payload_type,
-                                 uint32_t capture_timestamp,
-                                 int64_t capture_time_ms,
-                                 const uint8_t* payload_data,
-                                 size_t payload_size,
-                                 const RTPFragmentationHeader* fragmentation,
-                                 const RTPVideoHeader* rtp_header,
-                                 uint32_t* transport_frame_id_out) {
+int32_t RTPSender::SendOutgoingData(FrameType frame_type,
+                                    int8_t payload_type,
+                                    uint32_t capture_timestamp,
+                                    int64_t capture_time_ms,
+                                    const uint8_t* payload_data,
+                                    size_t payload_size,
+                                    const RTPFragmentationHeader* fragmentation,
+                                    const RTPVideoHeader* rtp_hdr) {
   uint32_t ssrc;
   uint16_t sequence_number;
   {
@@ -464,35 +463,36 @@
     rtc::CritScope lock(&send_critsect_);
     ssrc = ssrc_;
     sequence_number = sequence_number_;
-    if (!sending_media_)
-      return true;
+    if (!sending_media_) {
+      return 0;
+    }
   }
   RtpVideoCodecTypes video_type = kRtpVideoGeneric;
   if (CheckPayloadType(payload_type, &video_type) != 0) {
     LOG(LS_ERROR) << "Don't send data with unknown payload type: "
                   << static_cast<int>(payload_type) << ".";
-    return false;
+    return -1;
   }
 
-  bool result;
+  int32_t ret_val;
   if (audio_configured_) {
     TRACE_EVENT_ASYNC_STEP1("webrtc", "Audio", capture_timestamp,
                             "Send", "type", FrameTypeToString(frame_type));
     assert(frame_type == kAudioFrameSpeech || frame_type == kAudioFrameCN ||
            frame_type == kEmptyFrame);
 
-    result = audio_->SendAudio(frame_type, payload_type, capture_timestamp,
-                               payload_data, payload_size, fragmentation);
+    ret_val = audio_->SendAudio(frame_type, payload_type, capture_timestamp,
+                                payload_data, payload_size, fragmentation);
   } else {
     TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", capture_time_ms,
                             "Send", "type", FrameTypeToString(frame_type));
     assert(frame_type != kAudioFrameSpeech && frame_type != kAudioFrameCN);
 
     if (frame_type == kEmptyFrame)
-      return true;
+      return 0;
 
-    if (rtp_header) {
-      playout_delay_oracle_.UpdateRequest(ssrc, rtp_header->playout_delay,
+    if (rtp_hdr) {
+      playout_delay_oracle_.UpdateRequest(ssrc, rtp_hdr->playout_delay,
                                           sequence_number);
     }
 
@@ -507,16 +507,9 @@
       }
     }
 
-    result = video_->SendVideo(video_type, frame_type, payload_type,
-                               capture_timestamp, capture_time_ms, payload_data,
-                               payload_size, fragmentation, rtp_header);
-  }
-
-  if (transport_frame_id_out) {
-    rtc::CritScope lock(&send_critsect_);
-    // TODO(sergeyu): Move RTP timestamp calculation from BuildRTPheader() to
-    // SendOutgoingData() and pass it to SendVideo()/SendAudio() calls.
-    *transport_frame_id_out = timestamp_;
+    ret_val = video_->SendVideo(
+        video_type, frame_type, payload_type, capture_timestamp,
+        capture_time_ms, payload_data, payload_size, fragmentation, rtp_hdr);
   }
 
   rtc::CritScope cs(&statistics_crit_);
@@ -530,7 +523,7 @@
     frame_count_observer_->FrameCountUpdated(frame_counts_, ssrc);
   }
 
-  return result;
+  return ret_val;
 }
 
 size_t RTPSender::TrySendRedundantPayloads(size_t bytes_to_send,
@@ -952,12 +945,12 @@
 }
 
 // TODO(pwestin): send in the RtpHeaderParser to avoid parsing it again.
-bool RTPSender::SendToNetwork(uint8_t* buffer,
-                              size_t payload_length,
-                              size_t rtp_header_length,
-                              int64_t capture_time_ms,
-                              StorageType storage,
-                              RtpPacketSender::Priority priority) {
+int32_t RTPSender::SendToNetwork(uint8_t* buffer,
+                                 size_t payload_length,
+                                 size_t rtp_header_length,
+                                 int64_t capture_time_ms,
+                                 StorageType storage,
+                                 RtpPacketSender::Priority priority) {
   size_t length = payload_length + rtp_header_length;
   RtpUtility::RtpHeaderParser rtp_parser(buffer, length);
 
@@ -979,7 +972,7 @@
   // Used for NACK and to spread out the transmission of packets.
   if (packet_history_.PutRTPPacket(buffer, length, capture_time_ms, storage) !=
       0) {
-    return false;
+    return -1;
   }
 
   if (paced_sender_) {
@@ -996,7 +989,7 @@
                                "PacedSend", corrected_time_ms,
                                "capture_time_ms", corrected_time_ms);
     }
-    return true;
+    return 0;
   }
 
   PacketOptions options;
@@ -1017,14 +1010,14 @@
   packet_history_.SetSent(rtp_header.sequenceNumber);
 
   if (!sent)
-    return false;
+    return -1;
 
   {
     rtc::CritScope lock(&send_critsect_);
     media_has_been_sent_ = true;
   }
   UpdateRtpStats(buffer, length, rtp_header, false, false);
-  return true;
+  return 0;
 }
 
 void RTPSender::UpdateDelayStatistics(int64_t capture_time_ms, int64_t now_ms) {
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_sender.h b/webrtc/modules/rtp_rtcp/source/rtp_sender.h
index f068ae3..a7fab0f 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_sender.h
+++ b/webrtc/modules/rtp_rtcp/source/rtp_sender.h
@@ -76,12 +76,12 @@
   virtual size_t MaxDataPayloadLength() const = 0;
   virtual uint16_t ActualSendBitrateKbit() const = 0;
 
-  virtual bool SendToNetwork(uint8_t* data_buffer,
-                             size_t payload_length,
-                             size_t rtp_header_length,
-                             int64_t capture_time_ms,
-                             StorageType storage,
-                             RtpPacketSender::Priority priority) = 0;
+  virtual int32_t SendToNetwork(uint8_t* data_buffer,
+                                size_t payload_length,
+                                size_t rtp_header_length,
+                                int64_t capture_time_ms,
+                                StorageType storage,
+                                RtpPacketSender::Priority priority) = 0;
 
   virtual bool UpdateVideoRotation(uint8_t* rtp_packet,
                                    size_t rtp_packet_length,
@@ -154,15 +154,14 @@
 
   void SetMaxPayloadLength(size_t max_payload_length);
 
-  bool SendOutgoingData(FrameType frame_type,
-                        int8_t payload_type,
-                        uint32_t timestamp,
-                        int64_t capture_time_ms,
-                        const uint8_t* payload_data,
-                        size_t payload_size,
-                        const RTPFragmentationHeader* fragmentation,
-                        const RTPVideoHeader* rtp_header,
-                        uint32_t* transport_frame_id_out);
+  int32_t SendOutgoingData(FrameType frame_type,
+                           int8_t payload_type,
+                           uint32_t timestamp,
+                           int64_t capture_time_ms,
+                           const uint8_t* payload_data,
+                           size_t payload_size,
+                           const RTPFragmentationHeader* fragmentation,
+                           const RTPVideoHeader* rtp_header);
 
   // RTP header extension
   int32_t SetTransmissionTimeOffset(int32_t transmission_time_offset);
@@ -277,12 +276,12 @@
   uint32_t Timestamp() const override;
   uint32_t SSRC() const override;
 
-  bool SendToNetwork(uint8_t* data_buffer,
-                     size_t payload_length,
-                     size_t rtp_header_length,
-                     int64_t capture_time_ms,
-                     StorageType storage,
-                     RtpPacketSender::Priority priority) override;
+  int32_t SendToNetwork(uint8_t* data_buffer,
+                        size_t payload_length,
+                        size_t rtp_header_length,
+                        int64_t capture_time_ms,
+                        StorageType storage,
+                        RtpPacketSender::Priority priority) override;
 
   // Audio.
 
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc b/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc
index d358c5e..4ff61ab 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc
@@ -145,7 +145,7 @@
   return marker_bit;
 }
 
-bool RTPSenderAudio::SendAudio(FrameType frame_type,
+int32_t RTPSenderAudio::SendAudio(FrameType frame_type,
                                   int8_t payload_type,
                                   uint32_t capture_timestamp,
                                   const uint8_t* payload_data,
@@ -195,7 +195,7 @@
       if (packet_size_samples >
           (capture_timestamp - dtmf_timestamp_last_sent_)) {
         // not time to send yet
-        return true;
+        return 0;
       }
     }
     dtmf_timestamp_last_sent_ = capture_timestamp;
@@ -228,24 +228,24 @@
             ended, dtmf_payload_type, dtmf_timestamp_,
             static_cast<uint16_t>(dtmf_duration_samples), false);
       } else {
-        if (!SendTelephoneEventPacket(ended, dtmf_payload_type, dtmf_timestamp_,
-                                      dtmf_duration_samples,
-                                      !dtmf_event_first_packet_sent_)) {
-          return false;
+        if (SendTelephoneEventPacket(ended, dtmf_payload_type, dtmf_timestamp_,
+                                     dtmf_duration_samples,
+                                     !dtmf_event_first_packet_sent_) != 0) {
+          return -1;
         }
         dtmf_event_first_packet_sent_ = true;
-        return true;
+        return 0;
       }
     }
-    return true;
+    return 0;
   }
   if (payload_size == 0 || payload_data == NULL) {
     if (frame_type == kEmptyFrame) {
       // we don't send empty audio RTP packets
       // no error since we use it to drive DTMF when we use VAD
-      return true;
+      return 0;
     }
-    return false;
+    return -1;
   }
   uint8_t data_buffer[IP_PACKET_SIZE];
   bool marker_bit = MarkerBit(frame_type, payload_type);
@@ -269,11 +269,11 @@
                                                   clock_->TimeInMilliseconds());
   }
   if (rtpHeaderLength <= 0) {
-    return false;
+    return -1;
   }
   if (max_payload_length < (rtpHeaderLength + payload_size)) {
     // Too large payload buffer.
-    return false;
+    return -1;
   }
   if (red_payload_type >= 0 &&  // Have we configured RED?
       fragmentation && fragmentation->fragmentationVectorSize > 1 &&
@@ -281,7 +281,7 @@
     if (timestampOffset <= 0x3fff) {
       if (fragmentation->fragmentationVectorSize != 2) {
         // we only support 2 codecs when using RED
-        return false;
+        return -1;
       }
       // only 0x80 if we have multiple blocks
       data_buffer[rtpHeaderLength++] =
@@ -290,7 +290,7 @@
 
       // sanity blockLength
       if (blockLength > 0x3ff) {  // block length 10 bits 1023 bytes
-        return false;
+        return -1;
       }
       uint32_t REDheader = (timestampOffset << 10) + blockLength;
       ByteWriter<uint32_t>::WriteBigEndian(data_buffer + rtpHeaderLength,
@@ -349,7 +349,7 @@
   TRACE_EVENT_ASYNC_END2("webrtc", "Audio", capture_timestamp, "timestamp",
                          rtp_sender_->Timestamp(), "seqnum",
                          rtp_sender_->SequenceNumber());
-  bool send_result = rtp_sender_->SendToNetwork(
+  int32_t send_result = rtp_sender_->SendToNetwork(
       data_buffer, payload_size, rtpHeaderLength, rtc::TimeMillis(),
       kAllowRetransmission, RtpPacketSender::kHighPriority);
   if (first_packet_sent_()) {
@@ -403,18 +403,18 @@
   return AddDTMF(key, time_ms, level);
 }
 
-bool RTPSenderAudio::SendTelephoneEventPacket(bool ended,
-                                              int8_t dtmf_payload_type,
-                                              uint32_t dtmf_timestamp,
-                                              uint16_t duration,
-                                              bool marker_bit) {
+int32_t RTPSenderAudio::SendTelephoneEventPacket(bool ended,
+                                                 int8_t dtmf_payload_type,
+                                                 uint32_t dtmf_timestamp,
+                                                 uint16_t duration,
+                                                 bool marker_bit) {
   uint8_t dtmfbuffer[IP_PACKET_SIZE];
-  uint8_t send_count = 1;
-  bool result = 0;
+  uint8_t sendCount = 1;
+  int32_t retVal = 0;
 
   if (ended) {
     // resend last packet in an event 3 times
-    send_count = 3;
+    sendCount = 3;
   }
   do {
     // Send DTMF data
@@ -422,7 +422,7 @@
         dtmfbuffer, dtmf_payload_type, marker_bit, dtmf_timestamp,
         clock_->TimeInMilliseconds());
     if (header_length <= 0)
-      return false;
+      return -1;
 
     // reset CSRC and X bit
     dtmfbuffer[0] &= 0xe0;
@@ -451,12 +451,12 @@
     TRACE_EVENT_INSTANT2(
         TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"), "Audio::SendTelephoneEvent",
         "timestamp", dtmf_timestamp, "seqnum", rtp_sender_->SequenceNumber());
-    result = rtp_sender_->SendToNetwork(dtmfbuffer, 4, 12, rtc::TimeMillis(),
+    retVal = rtp_sender_->SendToNetwork(dtmfbuffer, 4, 12, rtc::TimeMillis(),
                                         kAllowRetransmission,
                                         RtpPacketSender::kHighPriority);
-    send_count--;
-  } while (send_count > 0 && result == 0);
+    sendCount--;
+  } while (sendCount > 0 && retVal == 0);
 
-  return result;
+  return retVal;
 }
 }  // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.h b/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.h
index d540593..cb3ddb2 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.h
+++ b/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.h
@@ -34,12 +34,12 @@
                                uint32_t rate,
                                RtpUtility::Payload** payload);
 
-  bool SendAudio(FrameType frame_type,
-                 int8_t payload_type,
-                 uint32_t capture_timestamp,
-                 const uint8_t* payload_data,
-                 size_t payload_size,
-                 const RTPFragmentationHeader* fragmentation);
+  int32_t SendAudio(FrameType frame_type,
+                    int8_t payload_type,
+                    uint32_t capture_timestamp,
+                    const uint8_t* payload_data,
+                    size_t payload_size,
+                    const RTPFragmentationHeader* fragmentation);
 
   // set audio packet size, used to determine when it's time to send a DTMF
   // packet in silence (CNG)
@@ -62,7 +62,7 @@
   int32_t RED(int8_t* payload_type) const;
 
  protected:
-  bool SendTelephoneEventPacket(
+  int32_t SendTelephoneEventPacket(
       bool ended,
       int8_t dtmf_payload_type,
       uint32_t dtmf_timestamp,
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_sender_unittest.cc b/webrtc/modules/rtp_rtcp/source/rtp_sender_unittest.cc
index ce032eca..fed767b 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_sender_unittest.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_sender_unittest.cc
@@ -190,9 +190,9 @@
     ASSERT_GE(rtp_length, 0);
 
     // Packet should be stored in a send bucket.
-    EXPECT_TRUE(rtp_sender_->SendToNetwork(
-        packet_, payload_length, rtp_length, capture_time_ms,
-        kAllowRetransmission, RtpPacketSender::kNormalPriority));
+    EXPECT_EQ(0, rtp_sender_->SendToNetwork(
+                     packet_, payload_length, rtp_length, capture_time_ms,
+                     kAllowRetransmission, RtpPacketSender::kNormalPriority));
   }
 
   void SendGenericPayload() {
@@ -204,9 +204,9 @@
     EXPECT_EQ(0, rtp_sender_->RegisterPayload(payload_name, kPayloadType, 90000,
                                               0, 1500));
 
-    EXPECT_TRUE(rtp_sender_->SendOutgoingData(
-        kVideoFrameKey, kPayloadType, kTimestamp, kCaptureTimeMs, kPayload,
-        sizeof(kPayload), nullptr, nullptr, nullptr));
+    EXPECT_EQ(0, rtp_sender_->SendOutgoingData(
+                     kVideoFrameKey, kPayloadType, kTimestamp, kCaptureTimeMs,
+                     kPayload, sizeof(kPayload), nullptr, nullptr));
   }
 };
 
@@ -753,9 +753,9 @@
   size_t rtp_length = static_cast<size_t>(rtp_length_int);
 
   // Packet should be stored in a send bucket.
-  EXPECT_TRUE(rtp_sender_->SendToNetwork(packet_, 0, rtp_length,
-                                         capture_time_ms, kAllowRetransmission,
-                                         RtpPacketSender::kNormalPriority));
+  EXPECT_EQ(0, rtp_sender_->SendToNetwork(packet_, 0, rtp_length,
+                                          capture_time_ms, kAllowRetransmission,
+                                          RtpPacketSender::kNormalPriority));
 
   EXPECT_EQ(0, transport_.packets_sent_);
 
@@ -806,9 +806,9 @@
   size_t rtp_length = static_cast<size_t>(rtp_length_int);
 
   // Packet should be stored in a send bucket.
-  EXPECT_TRUE(rtp_sender_->SendToNetwork(packet_, 0, rtp_length,
-                                         capture_time_ms, kAllowRetransmission,
-                                         RtpPacketSender::kNormalPriority));
+  EXPECT_EQ(0, rtp_sender_->SendToNetwork(packet_, 0, rtp_length,
+                                          capture_time_ms, kAllowRetransmission,
+                                          RtpPacketSender::kNormalPriority));
 
   EXPECT_EQ(0, transport_.packets_sent_);
 
@@ -888,9 +888,9 @@
   size_t rtp_length = static_cast<size_t>(rtp_length_int);
 
   // Packet should be stored in a send bucket.
-  EXPECT_TRUE(rtp_sender_->SendToNetwork(packet_, 0, rtp_length,
-                                         capture_time_ms, kAllowRetransmission,
-                                         RtpPacketSender::kNormalPriority));
+  EXPECT_EQ(0, rtp_sender_->SendToNetwork(packet_, 0, rtp_length,
+                                          capture_time_ms, kAllowRetransmission,
+                                          RtpPacketSender::kNormalPriority));
 
   int total_packets_sent = 0;
   EXPECT_EQ(total_packets_sent, transport_.packets_sent_);
@@ -948,9 +948,9 @@
               InsertPacket(RtpPacketSender::kNormalPriority, _, _, _, _, _));
 
   // Packet should be stored in a send bucket.
-  EXPECT_TRUE(rtp_sender_->SendToNetwork(packet_, 0, rtp_length,
-                                         capture_time_ms, kAllowRetransmission,
-                                         RtpPacketSender::kNormalPriority));
+  EXPECT_EQ(0, rtp_sender_->SendToNetwork(packet_, 0, rtp_length,
+                                          capture_time_ms, kAllowRetransmission,
+                                          RtpPacketSender::kNormalPriority));
 
   rtp_sender_->TimeToSendPacket(seq_num, capture_time_ms, false,
                                 PacketInfo::kNotAProbe);
@@ -1115,9 +1115,9 @@
   uint8_t payload[] = {47, 11, 32, 93, 89};
 
   // Send keyframe
-  ASSERT_TRUE(rtp_sender_->SendOutgoingData(kVideoFrameKey, payload_type, 1234,
-                                            4321, payload, sizeof(payload),
-                                            nullptr, nullptr, nullptr));
+  ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kVideoFrameKey, payload_type, 1234,
+                                             4321, payload, sizeof(payload),
+                                             nullptr, nullptr));
 
   RtpUtility::RtpHeaderParser rtp_parser(transport_.last_sent_packet_,
                                          transport_.last_sent_packet_len_);
@@ -1141,9 +1141,9 @@
   payload[1] = 42;
   payload[4] = 13;
 
-  ASSERT_TRUE(rtp_sender_->SendOutgoingData(
-      kVideoFrameDelta, payload_type, 1234, 4321, payload, sizeof(payload),
-      nullptr, nullptr, nullptr));
+  ASSERT_EQ(0, rtp_sender_->SendOutgoingData(
+                   kVideoFrameDelta, payload_type, 1234, 4321, payload,
+                   sizeof(payload), nullptr, nullptr));
 
   RtpUtility::RtpHeaderParser rtp_parser2(transport_.last_sent_packet_,
                                           transport_.last_sent_packet_len_);
@@ -1195,18 +1195,18 @@
   EXPECT_CALL(mock_paced_sender_, InsertPacket(_, _, _, _, _, _))
       .Times(::testing::AtLeast(2));
 
-  ASSERT_TRUE(rtp_sender_->SendOutgoingData(kVideoFrameKey, payload_type, 1234,
-                                            4321, payload, sizeof(payload),
-                                            nullptr, nullptr, nullptr));
+  ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kVideoFrameKey, payload_type, 1234,
+                                             4321, payload, sizeof(payload),
+                                             nullptr, nullptr));
 
   EXPECT_EQ(1U, callback.num_calls_);
   EXPECT_EQ(ssrc, callback.ssrc_);
   EXPECT_EQ(1, callback.frame_counts_.key_frames);
   EXPECT_EQ(0, callback.frame_counts_.delta_frames);
 
-  ASSERT_TRUE(rtp_sender_->SendOutgoingData(
-      kVideoFrameDelta, payload_type, 1234, 4321, payload, sizeof(payload),
-      nullptr, nullptr, nullptr));
+  ASSERT_EQ(0, rtp_sender_->SendOutgoingData(
+                   kVideoFrameDelta, payload_type, 1234, 4321, payload,
+                   sizeof(payload), nullptr, nullptr));
 
   EXPECT_EQ(2U, callback.num_calls_);
   EXPECT_EQ(ssrc, callback.ssrc_);
@@ -1268,9 +1268,9 @@
 
   // Send a few frames.
   for (uint32_t i = 0; i < kNumPackets; ++i) {
-    ASSERT_TRUE(rtp_sender_->SendOutgoingData(
-        kVideoFrameKey, payload_type, 1234, 4321, payload, sizeof(payload),
-        nullptr, nullptr, nullptr));
+    ASSERT_EQ(0, rtp_sender_->SendOutgoingData(
+                     kVideoFrameKey, payload_type, 1234, 4321, payload,
+                     sizeof(payload), nullptr, nullptr));
     fake_clock_.AdvanceTimeMilliseconds(kPacketInterval);
   }
 
@@ -1349,9 +1349,9 @@
   rtp_sender_->RegisterRtpStatisticsCallback(&callback);
 
   // Send a frame.
-  ASSERT_TRUE(rtp_sender_->SendOutgoingData(
-                      kVideoFrameKey, payload_type, 1234, 4321, payload,
-                      sizeof(payload), nullptr, nullptr, nullptr));
+  ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kVideoFrameKey, payload_type, 1234,
+                                             4321, payload, sizeof(payload),
+                                             nullptr, nullptr));
   StreamDataCounters expected;
   expected.transmitted.payload_bytes = 6;
   expected.transmitted.header_bytes = 12;
@@ -1391,9 +1391,9 @@
   fec_params.fec_rate = 1;
   fec_params.max_fec_frames = 1;
   rtp_sender_->SetFecParameters(&fec_params, &fec_params);
-  ASSERT_TRUE(rtp_sender_->SendOutgoingData(
-                      kVideoFrameDelta, payload_type, 1234, 4321, payload,
-                      sizeof(payload), nullptr, nullptr, nullptr));
+  ASSERT_EQ(0, rtp_sender_->SendOutgoingData(
+                   kVideoFrameDelta, payload_type, 1234, 4321, payload,
+                   sizeof(payload), nullptr, nullptr));
   expected.transmitted.payload_bytes = 40;
   expected.transmitted.header_bytes = 60;
   expected.transmitted.packets = 5;
@@ -1410,9 +1410,9 @@
                                             0, 1500));
   uint8_t payload[] = {47, 11, 32, 93, 89};
 
-  ASSERT_TRUE(rtp_sender_->SendOutgoingData(
-                      kAudioFrameCN, payload_type, 1234, 4321, payload,
-                      sizeof(payload), nullptr, nullptr, nullptr));
+  ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kAudioFrameCN, payload_type, 1234,
+                                             4321, payload, sizeof(payload),
+                                             nullptr, nullptr));
 
   RtpUtility::RtpHeaderParser rtp_parser(transport_.last_sent_packet_,
                                          transport_.last_sent_packet_len_);
@@ -1439,9 +1439,9 @@
                                             0, 1500));
   uint8_t payload[] = {47, 11, 32, 93, 89};
 
-  ASSERT_TRUE(rtp_sender_->SendOutgoingData(
-                      kAudioFrameCN, payload_type, 1234, 4321, payload,
-                      sizeof(payload), nullptr, nullptr, nullptr));
+  ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kAudioFrameCN, payload_type, 1234,
+                                             4321, payload, sizeof(payload),
+                                             nullptr, nullptr));
 
   RtpUtility::RtpHeaderParser rtp_parser(transport_.last_sent_packet_,
                                          transport_.last_sent_packet_len_);
@@ -1490,15 +1490,15 @@
   // During start, it takes the starting timestamp as last sent timestamp.
   // The duration is calculated as the difference of current and last sent
   // timestamp. So for first call it will skip since the duration is zero.
-  ASSERT_TRUE(rtp_sender_->SendOutgoingData(kEmptyFrame, payload_type,
-                                                capture_time_ms, 0, nullptr, 0,
-                                                nullptr, nullptr, nullptr));
+  ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kEmptyFrame, payload_type,
+                                             capture_time_ms, 0, nullptr, 0,
+                                             nullptr, nullptr));
   // DTMF Sample Length is (Frequency/1000) * Duration.
   // So in this case, it is (8000/1000) * 500 = 4000.
   // Sending it as two packets.
-  ASSERT_TRUE(rtp_sender_->SendOutgoingData(
-                      kEmptyFrame, payload_type, capture_time_ms + 2000, 0,
-                      nullptr, 0, nullptr, nullptr, nullptr));
+  ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kEmptyFrame, payload_type,
+                                             capture_time_ms + 2000, 0, nullptr,
+                                             0, nullptr, nullptr));
   std::unique_ptr<webrtc::RtpHeaderParser> rtp_parser(
       webrtc::RtpHeaderParser::Create());
   ASSERT_TRUE(rtp_parser.get() != nullptr);
@@ -1508,9 +1508,9 @@
   // Marker Bit should be set to 1 for first packet.
   EXPECT_TRUE(rtp_header.markerBit);
 
-  ASSERT_TRUE(rtp_sender_->SendOutgoingData(
-                      kEmptyFrame, payload_type, capture_time_ms + 4000, 0,
-                      nullptr, 0, nullptr, nullptr, nullptr));
+  ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kEmptyFrame, payload_type,
+                                             capture_time_ms + 4000, 0, nullptr,
+                                             0, nullptr, nullptr));
   ASSERT_TRUE(rtp_parser->Parse(transport_.last_sent_packet_,
                                 transport_.last_sent_packet_len_, &rtp_header));
   // Marker Bit should be set to 0 for rest of the packets.
@@ -1529,9 +1529,9 @@
                                             0, 1500));
   uint8_t payload[] = {47, 11, 32, 93, 89};
 
-  ASSERT_TRUE(rtp_sender_->SendOutgoingData(
-                      kVideoFrameKey, kPayloadType, 1234, 4321, payload,
-                      sizeof(payload), nullptr, nullptr, nullptr));
+  ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kVideoFrameKey, kPayloadType, 1234,
+                                             4321, payload, sizeof(payload),
+                                             nullptr, nullptr));
 
   // Will send 2 full-size padding packets.
   rtp_sender_->TimeToSendPadding(1, PacketInfo::kNotAProbe);
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_sender_video.cc b/webrtc/modules/rtp_rtcp/source/rtp_sender_video.cc
index 5af1b4a..5364a9b 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_sender_video.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_sender_video.cc
@@ -79,18 +79,18 @@
                                      uint32_t capture_timestamp,
                                      int64_t capture_time_ms,
                                      StorageType storage) {
-  if (!rtp_sender_->SendToNetwork(data_buffer, payload_length,
-                                  rtp_header_length, capture_time_ms, storage,
-                                  RtpPacketSender::kLowPriority)) {
+  if (rtp_sender_->SendToNetwork(data_buffer, payload_length, rtp_header_length,
+                                 capture_time_ms, storage,
+                                 RtpPacketSender::kLowPriority) == 0) {
+    rtc::CritScope cs(&stats_crit_);
+    video_bitrate_.Update(payload_length + rtp_header_length,
+                          clock_->TimeInMilliseconds());
+    TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"),
+                         "Video::PacketNormal", "timestamp", capture_timestamp,
+                         "seqnum", seq_num);
+  } else {
     LOG(LS_WARNING) << "Failed to send video packet " << seq_num;
-    return;
   }
-  rtc::CritScope cs(&stats_crit_);
-  video_bitrate_.Update(payload_length + rtp_header_length,
-                        clock_->TimeInMilliseconds());
-  TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"),
-                       "Video::PacketNormal", "timestamp", capture_timestamp,
-                       "seqnum", seq_num);
 }
 
 void RTPSenderVideo::SendVideoPacketAsRed(uint8_t* data_buffer,
@@ -206,17 +206,18 @@
   }
 }
 
-bool RTPSenderVideo::SendVideo(RtpVideoCodecTypes video_type,
-                               FrameType frame_type,
-                               int8_t payload_type,
-                               uint32_t capture_timestamp,
-                               int64_t capture_time_ms,
-                               const uint8_t* payload_data,
-                               size_t payload_size,
-                               const RTPFragmentationHeader* fragmentation,
-                               const RTPVideoHeader* video_header) {
-  if (payload_size == 0)
-    return false;
+int32_t RTPSenderVideo::SendVideo(RtpVideoCodecTypes video_type,
+                                  FrameType frame_type,
+                                  int8_t payload_type,
+                                  uint32_t capture_timestamp,
+                                  int64_t capture_time_ms,
+                                  const uint8_t* payload_data,
+                                  size_t payload_size,
+                                  const RTPFragmentationHeader* fragmentation,
+                                  const RTPVideoHeader* video_header) {
+  if (payload_size == 0) {
+    return -1;
+  }
 
   std::unique_ptr<RtpPacketizer> packetizer(RtpPacketizer::Create(
       video_type, rtp_sender_->MaxDataPayloadLength(),
@@ -261,14 +262,14 @@
 
     if (!packetizer->NextPacket(&dataBuffer[rtp_header_length],
                                 &payload_bytes_in_packet, &last)) {
-      return false;
+      return -1;
     }
 
     // Write RTP header.
     int32_t header_length = rtp_sender_->BuildRtpHeader(
         dataBuffer, payload_type, last, capture_timestamp, capture_time_ms);
     if (header_length <= 0)
-      return false;
+      return -1;
 
     // According to
     // http://www.etsi.org/deliver/etsi_ts/126100_126199/126114/12.07.00_60/
@@ -323,7 +324,7 @@
 
   TRACE_EVENT_ASYNC_END1("webrtc", "Video", capture_time_ms, "timestamp",
                          rtp_sender_->Timestamp());
-  return true;
+  return 0;
 }
 
 uint32_t RTPSenderVideo::VideoBitrateSent() const {
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_sender_video.h b/webrtc/modules/rtp_rtcp/source/rtp_sender_video.h
index 842eed8..682b6db 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_sender_video.h
+++ b/webrtc/modules/rtp_rtcp/source/rtp_sender_video.h
@@ -42,15 +42,15 @@
       const char payload_name[RTP_PAYLOAD_NAME_SIZE],
       int8_t payload_type);
 
-  bool SendVideo(RtpVideoCodecTypes video_type,
-                 FrameType frame_type,
-                 int8_t payload_type,
-                 uint32_t capture_timestamp,
-                 int64_t capture_time_ms,
-                 const uint8_t* payload_data,
-                 size_t payload_size,
-                 const RTPFragmentationHeader* fragmentation,
-                 const RTPVideoHeader* video_header);
+  int32_t SendVideo(RtpVideoCodecTypes video_type,
+                    FrameType frame_type,
+                    int8_t payload_type,
+                    uint32_t capture_timestamp,
+                    int64_t capture_time_ms,
+                    const uint8_t* payload_data,
+                    size_t payload_size,
+                    const RTPFragmentationHeader* fragmentation,
+                    const RTPVideoHeader* video_header);
 
   int32_t SendRTPIntraRequest();
 
diff --git a/webrtc/modules/rtp_rtcp/test/testAPI/test_api_audio.cc b/webrtc/modules/rtp_rtcp/test/testAPI/test_api_audio.cc
index 4bbcc32..f9e5001 100644
--- a/webrtc/modules/rtp_rtcp/test/testAPI/test_api_audio.cc
+++ b/webrtc/modules/rtp_rtcp/test/testAPI/test_api_audio.cc
@@ -171,9 +171,8 @@
 
   // Send an empty RTP packet.
   // Should fail since we have not registered the payload type.
-  EXPECT_FALSE(module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96, 0, -1,
-                                         nullptr, 0, nullptr, nullptr,
-                                         nullptr));
+  EXPECT_EQ(-1, module1->SendOutgoingData(webrtc::kAudioFrameSpeech,
+                                          96, 0, -1, NULL, 0));
 
   CodecInst voice_codec;
   memset(&voice_codec, 0, sizeof(voice_codec));
@@ -198,9 +197,8 @@
       (voice_codec.rate < 0) ? 0 : voice_codec.rate));
 
   const uint8_t test[5] = "test";
-  EXPECT_EQ(true,
-            module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96, 0, -1,
-                                      test, 4, nullptr, nullptr, nullptr));
+  EXPECT_EQ(0, module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96,
+                                         0, -1, test, 4));
 
   EXPECT_EQ(test_ssrc, rtp_receiver2_->SSRC());
   uint32_t timestamp;
@@ -273,9 +271,9 @@
 
   const uint8_t test[5] = "test";
   // Send a RTP packet.
-  EXPECT_TRUE(module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96, 160, -1,
-                                        test, 4, &fragmentation, nullptr,
-                                        nullptr));
+  EXPECT_EQ(0, module1->SendOutgoingData(webrtc::kAudioFrameSpeech,
+                                         96, 160, -1, test, 4,
+                                         &fragmentation));
 
   EXPECT_EQ(0, module1->SetSendREDPayloadType(-1));
   EXPECT_EQ(-1, module1->SendREDPayloadType(&red));
@@ -335,18 +333,16 @@
   // Send RTP packets for 16 tones a 160 ms  100ms
   // pause between = 2560ms + 1600ms = 4160ms
   for (; timeStamp <= 250 * 160; timeStamp += 160) {
-    EXPECT_TRUE(module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96,
-                                          timeStamp, -1, test, 4, nullptr,
-                                          nullptr, nullptr));
+    EXPECT_EQ(0, module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96,
+                                           timeStamp, -1, test, 4));
     fake_clock.AdvanceTimeMilliseconds(20);
     module1->Process();
   }
   EXPECT_EQ(0, module1->SendTelephoneEventOutband(32, 9000, 10));
 
   for (; timeStamp <= 740 * 160; timeStamp += 160) {
-    EXPECT_TRUE(module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96,
-                                          timeStamp, -1, test, 4, nullptr,
-                                          nullptr, nullptr));
+    EXPECT_EQ(0, module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96,
+                                           timeStamp, -1, test, 4));
     fake_clock.AdvanceTimeMilliseconds(20);
     module1->Process();
   }
diff --git a/webrtc/modules/rtp_rtcp/test/testAPI/test_api_rtcp.cc b/webrtc/modules/rtp_rtcp/test/testAPI/test_api_rtcp.cc
index e19cf35..f33507e 100644
--- a/webrtc/modules/rtp_rtcp/test/testAPI/test_api_rtcp.cc
+++ b/webrtc/modules/rtp_rtcp/test/testAPI/test_api_rtcp.cc
@@ -165,9 +165,8 @@
     // the receiving module.
     // send RTP packet with the data "testtest"
     const uint8_t test[9] = "testtest";
-    EXPECT_EQ(true,
-              module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96, 0, -1,
-                                        test, 8, nullptr, nullptr, nullptr));
+    EXPECT_EQ(0, module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96,
+                                           0, -1, test, 8));
   }
 
   virtual void TearDown() {
diff --git a/webrtc/modules/rtp_rtcp/test/testAPI/test_api_video.cc b/webrtc/modules/rtp_rtcp/test/testAPI/test_api_video.cc
index a701c9e..e784386 100644
--- a/webrtc/modules/rtp_rtcp/test/testAPI/test_api_video.cc
+++ b/webrtc/modules/rtp_rtcp/test/testAPI/test_api_video.cc
@@ -147,9 +147,11 @@
 
 TEST_F(RtpRtcpVideoTest, BasicVideo) {
   uint32_t timestamp = 3000;
-  EXPECT_TRUE(video_module_->SendOutgoingData(
-      kVideoFrameDelta, 123, timestamp, timestamp / 90, video_frame_,
-      payload_data_length_, nullptr, nullptr, nullptr));
+  EXPECT_EQ(0, video_module_->SendOutgoingData(kVideoFrameDelta, 123,
+                                               timestamp,
+                                               timestamp / 90,
+                                               video_frame_,
+                                               payload_data_length_));
 }
 
 TEST_F(RtpRtcpVideoTest, PaddingOnlyFrames) {
diff --git a/webrtc/modules/video_coding/codecs/test/videoprocessor.cc b/webrtc/modules/video_coding/codecs/test/videoprocessor.cc
index 7b0c8e3..e969fb7 100644
--- a/webrtc/modules/video_coding/codecs/test/videoprocessor.cc
+++ b/webrtc/modules/video_coding/codecs/test/videoprocessor.cc
@@ -410,8 +410,7 @@
 }
 
 // Callbacks
-EncodedImageCallback::Result
-VideoProcessorImpl::VideoProcessorEncodeCompleteCallback::OnEncodedImage(
+int32_t VideoProcessorImpl::VideoProcessorEncodeCompleteCallback::Encoded(
     const EncodedImage& encoded_image,
     const webrtc::CodecSpecificInfo* codec_specific_info,
     const webrtc::RTPFragmentationHeader* fragmentation) {
@@ -420,7 +419,7 @@
   video_processor_->FrameEncoded(codec_specific_info->codecType,
                                  encoded_image,
                                  fragmentation);
-  return Result(Result::OK, 0);
+  return 0;
 }
 int32_t VideoProcessorImpl::VideoProcessorDecodeCompleteCallback::Decoded(
     VideoFrame& image) {
diff --git a/webrtc/modules/video_coding/codecs/test/videoprocessor.h b/webrtc/modules/video_coding/codecs/test/videoprocessor.h
index 21ed55e..af84329 100644
--- a/webrtc/modules/video_coding/codecs/test/videoprocessor.h
+++ b/webrtc/modules/video_coding/codecs/test/videoprocessor.h
@@ -230,7 +230,7 @@
    public:
     explicit VideoProcessorEncodeCompleteCallback(VideoProcessorImpl* vp)
         : video_processor_(vp) {}
-    Result OnEncodedImage(
+    int32_t Encoded(
         const webrtc::EncodedImage& encoded_image,
         const webrtc::CodecSpecificInfo* codec_specific_info,
         const webrtc::RTPFragmentationHeader* fragmentation) override;
diff --git a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
index eba59d0..7b1e9d9 100644
--- a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
@@ -120,12 +120,12 @@
                               size_t stream_idx)
       : adapter_(adapter), stream_idx_(stream_idx) {}
 
-  EncodedImageCallback::Result OnEncodedImage(
-      const webrtc::EncodedImage& encoded_image,
-      const webrtc::CodecSpecificInfo* codec_specific_info,
-      const webrtc::RTPFragmentationHeader* fragmentation) override {
-    return adapter_->OnEncodedImage(stream_idx_, encoded_image,
-                                    codec_specific_info, fragmentation);
+  int32_t Encoded(
+      const webrtc::EncodedImage& encodedImage,
+      const webrtc::CodecSpecificInfo* codecSpecificInfo = NULL,
+      const webrtc::RTPFragmentationHeader* fragmentation = NULL) override {
+    return adapter_->Encoded(stream_idx_, encodedImage, codecSpecificInfo,
+                             fragmentation);
   }
 
  private:
@@ -404,7 +404,7 @@
   return WEBRTC_VIDEO_CODEC_OK;
 }
 
-EncodedImageCallback::Result SimulcastEncoderAdapter::OnEncodedImage(
+int32_t SimulcastEncoderAdapter::Encoded(
     size_t stream_idx,
     const EncodedImage& encodedImage,
     const CodecSpecificInfo* codecSpecificInfo,
@@ -413,7 +413,7 @@
   CodecSpecificInfoVP8* vp8Info = &(stream_codec_specific.codecSpecific.VP8);
   vp8Info->simulcastIdx = stream_idx;
 
-  return encoded_complete_callback_->OnEncodedImage(
+  return encoded_complete_callback_->Encoded(
       encodedImage, &stream_codec_specific, fragmentation);
 }
 
diff --git a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h
index be6aa59..fca16df 100644
--- a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h
+++ b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h
@@ -51,11 +51,10 @@
   // Eventual handler for the contained encoders' EncodedImageCallbacks, but
   // called from an internal helper that also knows the correct stream
   // index.
-  EncodedImageCallback::Result OnEncodedImage(
-      size_t stream_idx,
-      const EncodedImage& encoded_image,
-      const CodecSpecificInfo* codec_specific_info,
-      const RTPFragmentationHeader* fragmentation);
+  int32_t Encoded(size_t stream_idx,
+                  const EncodedImage& encodedImage,
+                  const CodecSpecificInfo* codecSpecificInfo = NULL,
+                  const RTPFragmentationHeader* fragmentation = NULL);
 
   void OnDroppedFrame() override;
 
diff --git a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc
index d14d1a4..efddb72 100644
--- a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc
@@ -242,16 +242,16 @@
         last_encoded_image_simulcast_index_(-1) {}
   virtual ~TestSimulcastEncoderAdapterFake() {}
 
-  Result OnEncodedImage(const EncodedImage& encoded_image,
-                        const CodecSpecificInfo* codec_specific_info,
-                        const RTPFragmentationHeader* fragmentation) override {
-    last_encoded_image_width_ = encoded_image._encodedWidth;
-    last_encoded_image_height_ = encoded_image._encodedHeight;
-    if (codec_specific_info) {
+  int32_t Encoded(const EncodedImage& encodedImage,
+                  const CodecSpecificInfo* codecSpecificInfo = NULL,
+                  const RTPFragmentationHeader* fragmentation = NULL) override {
+    last_encoded_image_width_ = encodedImage._encodedWidth;
+    last_encoded_image_height_ = encodedImage._encodedHeight;
+    if (codecSpecificInfo) {
       last_encoded_image_simulcast_index_ =
-          codec_specific_info->codecSpecific.VP8.simulcastIdx;
+          codecSpecificInfo->codecSpecific.VP8.simulcastIdx;
     }
-    return Result(Result::OK, encoded_image._timeStamp);
+    return 0;
   }
 
   bool GetLastEncodedImageInfo(int* out_width,
diff --git a/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h b/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h
index e2bd71e..b277ad2 100644
--- a/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h
+++ b/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h
@@ -61,9 +61,9 @@
     delete[] encoded_frame_._buffer;
   }
 
-  virtual Result OnEncodedImage(const EncodedImage& encoded_image,
-                                const CodecSpecificInfo* codec_specific_info,
-                                const RTPFragmentationHeader* fragmentation) {
+  virtual int32_t Encoded(const EncodedImage& encoded_image,
+                          const CodecSpecificInfo* codec_specific_info,
+                          const RTPFragmentationHeader* fragmentation) {
     // Only store the base layer.
     if (codec_specific_info->codecSpecific.VP8.simulcastIdx == 0) {
       if (encoded_image._frameType == kVideoFrameKey) {
@@ -89,7 +89,7 @@
         codec_specific_info->codecSpecific.VP8.layerSync;
     temporal_layer_[codec_specific_info->codecSpecific.VP8.simulcastIdx] =
         codec_specific_info->codecSpecific.VP8.temporalIdx;
-    return Result(Result::OK, encoded_image._timeStamp);
+    return 0;
   }
   void GetLastEncodedFrameInfo(int* picture_id,
                                int* temporal_layer,
@@ -338,38 +338,34 @@
     if (expected_video_streams >= 1) {
       EXPECT_CALL(
           encoder_callback_,
-          OnEncodedImage(
+          Encoded(
               AllOf(Field(&EncodedImage::_frameType, frame_type),
                     Field(&EncodedImage::_encodedWidth, kDefaultWidth / 4),
                     Field(&EncodedImage::_encodedHeight, kDefaultHeight / 4)),
               _, _))
           .Times(1)
-          .WillRepeatedly(Return(EncodedImageCallback::Result(
-              EncodedImageCallback::Result::OK, 0)));
+          .WillRepeatedly(Return(0));
     }
     if (expected_video_streams >= 2) {
       EXPECT_CALL(
           encoder_callback_,
-          OnEncodedImage(
+          Encoded(
               AllOf(Field(&EncodedImage::_frameType, frame_type),
                     Field(&EncodedImage::_encodedWidth, kDefaultWidth / 2),
                     Field(&EncodedImage::_encodedHeight, kDefaultHeight / 2)),
               _, _))
           .Times(1)
-          .WillRepeatedly(Return(EncodedImageCallback::Result(
-              EncodedImageCallback::Result::OK, 0)));
+          .WillRepeatedly(Return(0));
     }
     if (expected_video_streams >= 3) {
       EXPECT_CALL(
           encoder_callback_,
-          OnEncodedImage(
-              AllOf(Field(&EncodedImage::_frameType, frame_type),
-                    Field(&EncodedImage::_encodedWidth, kDefaultWidth),
-                    Field(&EncodedImage::_encodedHeight, kDefaultHeight)),
-              _, _))
+          Encoded(AllOf(Field(&EncodedImage::_frameType, frame_type),
+                        Field(&EncodedImage::_encodedWidth, kDefaultWidth),
+                        Field(&EncodedImage::_encodedHeight, kDefaultHeight)),
+                  _, _))
           .Times(1)
-          .WillRepeatedly(Return(EncodedImageCallback::Result(
-              EncodedImageCallback::Result::OK, 0)));
+          .WillRepeatedly(Return(0));
     }
   }
 
@@ -594,15 +590,13 @@
     encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1], 30);
     std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
                                        kVideoFrameDelta);
-    EXPECT_CALL(
-        encoder_callback_,
-        OnEncodedImage(AllOf(Field(&EncodedImage::_frameType, kVideoFrameKey),
-                             Field(&EncodedImage::_encodedWidth, width),
-                             Field(&EncodedImage::_encodedHeight, height)),
-                       _, _))
+    EXPECT_CALL(encoder_callback_,
+                Encoded(AllOf(Field(&EncodedImage::_frameType, kVideoFrameKey),
+                              Field(&EncodedImage::_encodedWidth, width),
+                              Field(&EncodedImage::_encodedHeight, height)),
+                        _, _))
         .Times(1)
-        .WillRepeatedly(Return(
-            EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
+        .WillRepeatedly(Return(0));
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
 
     // Switch back.
diff --git a/webrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc b/webrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
index 12dcb7c..756a102 100644
--- a/webrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
@@ -43,9 +43,9 @@
                                     void* decoderSpecificInfo)
       : encoded_frame_(frame), encode_complete_(false) {}
 
-  Result OnEncodedImage(const EncodedImage& encoded_frame_,
-                        const CodecSpecificInfo* codec_specific_info,
-                        const RTPFragmentationHeader* fragmentation) override;
+  virtual int Encoded(const EncodedImage& encoded_frame_,
+                      const CodecSpecificInfo* codecSpecificInfo,
+                      const RTPFragmentationHeader*);
   bool EncodeComplete();
 
  private:
@@ -54,10 +54,9 @@
   bool encode_complete_;
 };
 
-webrtc::EncodedImageCallback::Result
-Vp8UnitTestEncodeCompleteCallback::OnEncodedImage(
+int Vp8UnitTestEncodeCompleteCallback::Encoded(
     const EncodedImage& encoded_frame,
-    const CodecSpecificInfo* codec_specific_info,
+    const CodecSpecificInfo* codecSpecificInfo,
     const RTPFragmentationHeader* fragmentation) {
   if (encoded_frame_->_size < encoded_frame._length) {
     delete[] encoded_frame_->_buffer;
@@ -73,7 +72,7 @@
   encoded_frame_->_frameType = encoded_frame._frameType;
   encoded_frame_->_completeFrame = encoded_frame._completeFrame;
   encode_complete_ = true;
-  return Result(Result::OK, 0);
+  return 0;
 }
 
 bool Vp8UnitTestEncodeCompleteCallback::EncodeComplete() {
diff --git a/webrtc/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc b/webrtc/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc
index d7927eb..2802700 100644
--- a/webrtc/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc
@@ -26,9 +26,9 @@
   explicit Vp8SequenceCoderEncodeCallback(FILE* encoded_file)
       : encoded_file_(encoded_file), encoded_bytes_(0) {}
   ~Vp8SequenceCoderEncodeCallback();
-  Result OnEncodedImage(const webrtc::EncodedImage& encoded_image,
-                        const webrtc::CodecSpecificInfo* codec_specific_info,
-                        const webrtc::RTPFragmentationHeader*);
+  int Encoded(const webrtc::EncodedImage& encoded_image,
+              const webrtc::CodecSpecificInfo* codecSpecificInfo,
+              const webrtc::RTPFragmentationHeader*);
   // Returns the encoded image.
   webrtc::EncodedImage encoded_image() { return encoded_image_; }
   size_t encoded_bytes() { return encoded_bytes_; }
@@ -43,9 +43,7 @@
   delete[] encoded_image_._buffer;
   encoded_image_._buffer = NULL;
 }
-
-webrtc::EncodedImageCallback::Result
-Vp8SequenceCoderEncodeCallback::OnEncodedImage(
+int Vp8SequenceCoderEncodeCallback::Encoded(
     const webrtc::EncodedImage& encoded_image,
     const webrtc::CodecSpecificInfo* codecSpecificInfo,
     const webrtc::RTPFragmentationHeader* fragmentation) {
@@ -60,11 +58,11 @@
   if (encoded_file_ != NULL) {
     if (fwrite(encoded_image._buffer, 1, encoded_image._length,
                encoded_file_) != encoded_image._length) {
-      return Result(Result::ERROR_SEND_FAILED, 0);
+      return -1;
     }
   }
   encoded_bytes_ += encoded_image_._length;
-  return Result(Result::OK, 0);
+  return 0;
 }
 
 // TODO(mikhal): Add support for varying the frame size.
diff --git a/webrtc/modules/video_coding/generic_encoder.cc b/webrtc/modules/video_coding/generic_encoder.cc
index 28eb10a..e63da02 100644
--- a/webrtc/modules/video_coding/generic_encoder.cc
+++ b/webrtc/modules/video_coding/generic_encoder.cc
@@ -21,7 +21,6 @@
 #include "webrtc/system_wrappers/include/critical_section_wrapper.h"
 
 namespace webrtc {
-
 VCMGenericEncoder::VCMGenericEncoder(
     VideoEncoder* encoder,
     VCMEncodedFrameCallback* encoded_frame_callback,
@@ -144,25 +143,23 @@
 
 VCMEncodedFrameCallback::~VCMEncodedFrameCallback() {}
 
-EncodedImageCallback::Result VCMEncodedFrameCallback::OnEncodedImage(
+int32_t VCMEncodedFrameCallback::Encoded(
     const EncodedImage& encoded_image,
     const CodecSpecificInfo* codec_specific,
     const RTPFragmentationHeader* fragmentation_header) {
   TRACE_EVENT_INSTANT1("webrtc", "VCMEncodedFrameCallback::Encoded",
                        "timestamp", encoded_image._timeStamp);
-  Result result = post_encode_callback_->OnEncodedImage(
-      encoded_image, codec_specific, fragmentation_header);
-  if (result.error != Result::OK)
-    return result;
+  int ret_val = post_encode_callback_->Encoded(encoded_image, codec_specific,
+                                               fragmentation_header);
+  if (ret_val < 0)
+    return ret_val;
 
   if (media_opt_) {
     media_opt_->UpdateWithEncodedData(encoded_image);
-    if (internal_source_) {
-      // Signal to encoder to drop next frame.
-      result.drop_next_frame = media_opt_->DropFrame();
-    }
+    if (internal_source_)
+      return media_opt_->DropFrame();  // Signal to encoder to drop next frame.
   }
-  return result;
+  return VCM_OK;
 }
 
 }  // namespace webrtc
diff --git a/webrtc/modules/video_coding/generic_encoder.h b/webrtc/modules/video_coding/generic_encoder.h
index 9b5d2e6..9f73f36 100644
--- a/webrtc/modules/video_coding/generic_encoder.h
+++ b/webrtc/modules/video_coding/generic_encoder.h
@@ -41,10 +41,9 @@
   virtual ~VCMEncodedFrameCallback();
 
   // Implements EncodedImageCallback.
-  EncodedImageCallback::Result OnEncodedImage(
-      const EncodedImage& encoded_image,
-      const CodecSpecificInfo* codec_specific_info,
-      const RTPFragmentationHeader* fragmentation) override;
+  int32_t Encoded(const EncodedImage& encoded_image,
+                  const CodecSpecificInfo* codec_specific,
+                  const RTPFragmentationHeader* fragmentation_header) override;
   void SetInternalSource(bool internal_source) {
     internal_source_ = internal_source;
   }
diff --git a/webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h b/webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h
index 20f76cc..382eeb3 100644
--- a/webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h
+++ b/webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h
@@ -22,10 +22,10 @@
 
 class MockEncodedImageCallback : public EncodedImageCallback {
  public:
-  MOCK_METHOD3(OnEncodedImage,
-               Result(const EncodedImage& encodedImage,
-                      const CodecSpecificInfo* codecSpecificInfo,
-                      const RTPFragmentationHeader* fragmentation));
+  MOCK_METHOD3(Encoded,
+               int32_t(const EncodedImage& encodedImage,
+                       const CodecSpecificInfo* codecSpecificInfo,
+                       const RTPFragmentationHeader* fragmentation));
 };
 
 class MockVideoEncoder : public VideoEncoder {
diff --git a/webrtc/modules/video_coding/video_coding_impl.cc b/webrtc/modules/video_coding/video_coding_impl.cc
index 2f709b6..077f336 100644
--- a/webrtc/modules/video_coding/video_coding_impl.cc
+++ b/webrtc/modules/video_coding/video_coding_impl.cc
@@ -45,8 +45,7 @@
 class EncodedImageCallbackWrapper : public EncodedImageCallback {
  public:
   EncodedImageCallbackWrapper()
-      : cs_(CriticalSectionWrapper::CreateCriticalSection()),
-        callback_(nullptr) {}
+      : cs_(CriticalSectionWrapper::CreateCriticalSection()), callback_(NULL) {}
 
   virtual ~EncodedImageCallbackWrapper() {}
 
@@ -55,15 +54,14 @@
     callback_ = callback;
   }
 
-  virtual Result OnEncodedImage(const EncodedImage& encoded_image,
-                                const CodecSpecificInfo* codec_specific_info,
-                                const RTPFragmentationHeader* fragmentation) {
+  virtual int32_t Encoded(const EncodedImage& encoded_image,
+                          const CodecSpecificInfo* codec_specific_info,
+                          const RTPFragmentationHeader* fragmentation) {
     CriticalSectionScoped cs(cs_.get());
-    if (callback_) {
-      return callback_->OnEncodedImage(encoded_image, codec_specific_info,
-                                       fragmentation);
-    }
-    return Result(Result::ERROR_SEND_FAILED);
+    if (callback_)
+      return callback_->Encoded(encoded_image, codec_specific_info,
+                                fragmentation);
+    return 0;
   }
 
  private:
diff --git a/webrtc/modules/video_coding/video_sender_unittest.cc b/webrtc/modules/video_coding/video_sender_unittest.cc
index a3766c1..923144e 100644
--- a/webrtc/modules/video_coding/video_sender_unittest.cc
+++ b/webrtc/modules/video_coding/video_sender_unittest.cc
@@ -93,13 +93,13 @@
 
   virtual ~EncodedImageCallbackImpl() {}
 
-  Result OnEncodedImage(const EncodedImage& encoded_image,
-                        const CodecSpecificInfo* codec_specific_info,
-                        const RTPFragmentationHeader* fragmentation) override {
+  int32_t Encoded(const EncodedImage& encoded_image,
+                  const CodecSpecificInfo* codec_specific_info,
+                  const RTPFragmentationHeader* fragmentation) override {
     assert(codec_specific_info);
     frame_data_.push_back(
         FrameData(encoded_image._length, *codec_specific_info));
-    return Result(Result::OK, encoded_image._timeStamp);
+    return 0;
   }
 
   void Reset() {
diff --git a/webrtc/test/fake_encoder.cc b/webrtc/test/fake_encoder.cc
index a3fb768..72df40f 100644
--- a/webrtc/test/fake_encoder.cc
+++ b/webrtc/test/fake_encoder.cc
@@ -148,10 +148,9 @@
   return 0;
 }
 
-EncodedImageCallback::Result FakeH264Encoder::OnEncodedImage(
-    const EncodedImage& encoded_image,
-    const CodecSpecificInfo* codec_specific_info,
-    const RTPFragmentationHeader* fragments) {
+int32_t FakeH264Encoder::Encoded(const EncodedImage& encoded_image,
+                                 const CodecSpecificInfo* codec_specific_info,
+                                 const RTPFragmentationHeader* fragments) {
   const size_t kSpsSize = 8;
   const size_t kPpsSize = 11;
   const int kIdrFrequency = 10;
@@ -191,7 +190,7 @@
       ++fragment_counter;
     }
   }
-  return callback_->OnEncodedImage(encoded_image, NULL, &fragmentation);
+  return callback_->Encoded(encoded_image, NULL, &fragmentation);
 }
 
 DelayedEncoder::DelayedEncoder(Clock* clock, int delay_ms)
diff --git a/webrtc/test/fake_encoder.h b/webrtc/test/fake_encoder.h
index ae869ff..6bff00e 100644
--- a/webrtc/test/fake_encoder.h
+++ b/webrtc/test/fake_encoder.h
@@ -61,9 +61,9 @@
   int32_t RegisterEncodeCompleteCallback(
       EncodedImageCallback* callback) override;
 
-  Result OnEncodedImage(const EncodedImage& encodedImage,
-                        const CodecSpecificInfo* codecSpecificInfo,
-                        const RTPFragmentationHeader* fragments) override;
+  int32_t Encoded(const EncodedImage& encodedImage,
+                  const CodecSpecificInfo* codecSpecificInfo,
+                  const RTPFragmentationHeader* fragments) override;
 
  private:
   EncodedImageCallback* callback_;
diff --git a/webrtc/video/payload_router.cc b/webrtc/video/payload_router.cc
index 5bcf705..798325a 100644
--- a/webrtc/video/payload_router.cc
+++ b/webrtc/video/payload_router.cc
@@ -137,16 +137,15 @@
   }
 }
 
-EncodedImageCallback::Result PayloadRouter::OnEncodedImage(
-    const EncodedImage& encoded_image,
-    const CodecSpecificInfo* codec_specific_info,
-    const RTPFragmentationHeader* fragmentation) {
+int32_t PayloadRouter::Encoded(const EncodedImage& encoded_image,
+                               const CodecSpecificInfo* codec_specific_info,
+                               const RTPFragmentationHeader* fragmentation) {
   rtc::CritScope lock(&crit_);
   RTC_DCHECK(!rtp_modules_.empty());
   if (!active_ || num_sending_modules_ == 0)
-    return Result(Result::ERROR_SEND_FAILED);
+    return -1;
 
-  int stream_index = 0;
+  int stream_idx = 0;
 
   RTPVideoHeader rtp_video_header;
   memset(&rtp_video_header, 0, sizeof(RTPVideoHeader));
@@ -159,19 +158,13 @@
   // The simulcast index might actually be larger than the number of modules
   // in case the encoder was processing a frame during a codec reconfig.
   if (rtp_video_header.simulcastIdx >= num_sending_modules_)
-    return Result(Result::ERROR_SEND_FAILED);
-  stream_index = rtp_video_header.simulcastIdx;
+    return -1;
+  stream_idx = rtp_video_header.simulcastIdx;
 
-  uint32_t frame_id;
-  int send_result = rtp_modules_[stream_index]->SendOutgoingData(
+  return rtp_modules_[stream_idx]->SendOutgoingData(
       encoded_image._frameType, payload_type_, encoded_image._timeStamp,
       encoded_image.capture_time_ms_, encoded_image._buffer,
-      encoded_image._length, fragmentation, &rtp_video_header, &frame_id);
-
-  if (send_result < 0)
-    return Result(Result::ERROR_SEND_FAILED);
-
-  return Result(Result::OK, frame_id);
+      encoded_image._length, fragmentation, &rtp_video_header);
 }
 
 size_t PayloadRouter::MaxPayloadLength() const {
diff --git a/webrtc/video/payload_router.h b/webrtc/video/payload_router.h
index 24aee74..9c66bd0 100644
--- a/webrtc/video/payload_router.h
+++ b/webrtc/video/payload_router.h
@@ -32,8 +32,8 @@
 class PayloadRouter : public EncodedImageCallback {
  public:
   // Rtp modules are assumed to be sorted in simulcast index order.
-  PayloadRouter(const std::vector<RtpRtcp*>& rtp_modules,
-                int payload_type);
+  explicit PayloadRouter(const std::vector<RtpRtcp*>& rtp_modules,
+                         int payload_type);
   ~PayloadRouter();
 
   static size_t DefaultMaxPayloadLength();
@@ -46,10 +46,9 @@
 
   // Implements EncodedImageCallback.
   // Returns 0 if the packet was routed / sent, -1 otherwise.
-  EncodedImageCallback::Result OnEncodedImage(
-      const EncodedImage& encoded_image,
-      const CodecSpecificInfo* codec_specific_info,
-      const RTPFragmentationHeader* fragmentation) override;
+  int32_t Encoded(const EncodedImage& encoded_image,
+                  const CodecSpecificInfo* codec_specific_info,
+                  const RTPFragmentationHeader* fragmentation) override;
 
   // Returns the maximum allowed data payload length, given the configured MTU
   // and RTP headers.
diff --git a/webrtc/video/payload_router_unittest.cc b/webrtc/video/payload_router_unittest.cc
index def76b7..62dba29 100644
--- a/webrtc/video/payload_router_unittest.cc
+++ b/webrtc/video/payload_router_unittest.cc
@@ -44,7 +44,7 @@
   EXPECT_CALL(rtp, SendOutgoingData(encoded_image._frameType, payload_type,
                                     encoded_image._timeStamp,
                                     encoded_image.capture_time_ms_, &payload,
-                                    encoded_image._length, nullptr, _, _))
+                                    encoded_image._length, nullptr, _))
       .Times(0);
   EXPECT_EQ(-1, payload_router.Encoded(encoded_image, nullptr, nullptr));
 
@@ -52,7 +52,7 @@
   EXPECT_CALL(rtp, SendOutgoingData(encoded_image._frameType, payload_type,
                                     encoded_image._timeStamp,
                                     encoded_image.capture_time_ms_, &payload,
-                                    encoded_image._length, nullptr, _, _))
+                                    encoded_image._length, nullptr, _))
       .Times(1);
   EXPECT_EQ(0, payload_router.Encoded(encoded_image, nullptr, nullptr));
 
@@ -60,7 +60,7 @@
   EXPECT_CALL(rtp, SendOutgoingData(encoded_image._frameType, payload_type,
                                     encoded_image._timeStamp,
                                     encoded_image.capture_time_ms_, &payload,
-                                    encoded_image._length, nullptr, _, _))
+                                    encoded_image._length, nullptr, _))
       .Times(0);
   EXPECT_EQ(-1, payload_router.Encoded(encoded_image, nullptr, nullptr));
 
@@ -68,7 +68,7 @@
   EXPECT_CALL(rtp, SendOutgoingData(encoded_image._frameType, payload_type,
                                     encoded_image._timeStamp,
                                     encoded_image.capture_time_ms_, &payload,
-                                    encoded_image._length, nullptr, _, _))
+                                    encoded_image._length, nullptr, _))
       .Times(1);
   EXPECT_EQ(0, payload_router.Encoded(encoded_image, nullptr, nullptr));
 
@@ -77,7 +77,7 @@
   EXPECT_CALL(rtp, SendOutgoingData(encoded_image._frameType, payload_type,
                                     encoded_image._timeStamp,
                                     encoded_image.capture_time_ms_, &payload,
-                                    encoded_image._length, nullptr, _, _))
+                                    encoded_image._length, nullptr, _))
       .Times(0);
   EXPECT_EQ(-1, payload_router.Encoded(encoded_image, nullptr, nullptr));
 }
@@ -111,9 +111,10 @@
   EXPECT_CALL(rtp_1, SendOutgoingData(encoded_image._frameType, payload_type,
                                       encoded_image._timeStamp,
                                       encoded_image.capture_time_ms_, &payload,
-                                      encoded_image._length, nullptr, _, _))
+                                      encoded_image._length, nullptr, _))
       .Times(1);
-  EXPECT_CALL(rtp_2, SendOutgoingData(_, _, _, _, _, _, _, _, _)).Times(0);
+  EXPECT_CALL(rtp_2, SendOutgoingData(_, _, _, _, _, _, _, _))
+      .Times(0);
   EXPECT_EQ(0, payload_router.Encoded(encoded_image, &codec_info_1, nullptr));
 
   CodecSpecificInfo codec_info_2;
@@ -124,17 +125,17 @@
   EXPECT_CALL(rtp_2, SendOutgoingData(encoded_image._frameType, payload_type,
                                       encoded_image._timeStamp,
                                       encoded_image.capture_time_ms_, &payload,
-                                      encoded_image._length, nullptr, _, _))
+                                      encoded_image._length, nullptr, _))
       .Times(1);
-  EXPECT_CALL(rtp_1, SendOutgoingData(_, _, _, _, _, _, _, _, _))
+  EXPECT_CALL(rtp_1, SendOutgoingData(_, _, _, _, _, _, _, _))
       .Times(0);
   EXPECT_EQ(0, payload_router.Encoded(encoded_image, &codec_info_2, nullptr));
 
   // Inactive.
   payload_router.set_active(false);
-  EXPECT_CALL(rtp_1, SendOutgoingData(_, _, _, _, _, _, _, _, _))
+  EXPECT_CALL(rtp_1, SendOutgoingData(_, _, _, _, _, _, _, _))
       .Times(0);
-  EXPECT_CALL(rtp_2, SendOutgoingData(_, _, _, _, _, _, _, _, _))
+  EXPECT_CALL(rtp_2, SendOutgoingData(_, _, _, _, _, _, _, _))
       .Times(0);
   EXPECT_EQ(-1, payload_router.Encoded(encoded_image, &codec_info_1, nullptr));
   EXPECT_EQ(-1, payload_router.Encoded(encoded_image, &codec_info_2, nullptr));
@@ -143,9 +144,9 @@
   streams.pop_back();  // Remove a stream.
   payload_router.SetSendStreams(streams);
   payload_router.set_active(true);
-  EXPECT_CALL(rtp_1, SendOutgoingData(_, _, _, _, _, _, _, _, _))
+  EXPECT_CALL(rtp_1, SendOutgoingData(_, _, _, _, _, _, _, _))
       .Times(0);
-  EXPECT_CALL(rtp_2, SendOutgoingData(_, _, _, _, _, _, _, _, _))
+  EXPECT_CALL(rtp_2, SendOutgoingData(_, _, _, _, _, _, _, _))
       .Times(0);
   codec_info_2.codecSpecific.VP8.simulcastIdx = 1;
   EXPECT_EQ(-1, payload_router.Encoded(encoded_image, &codec_info_2, nullptr));
diff --git a/webrtc/video/video_encoder_unittest.cc b/webrtc/video/video_encoder_unittest.cc
index ac006bb..f2c3ea6 100644
--- a/webrtc/video/video_encoder_unittest.cc
+++ b/webrtc/video/video_encoder_unittest.cc
@@ -85,12 +85,10 @@
 
   class FakeEncodedImageCallback : public EncodedImageCallback {
    public:
-    Result OnEncodedImage(
-        const EncodedImage& encoded_image,
-        const CodecSpecificInfo* codec_specific_info,
-        const RTPFragmentationHeader* fragmentation) override {
-      ++callback_count_;
-      return Result(Result::OK, callback_count_);
+    int32_t Encoded(const EncodedImage& encoded_image,
+                    const CodecSpecificInfo* codec_specific_info,
+                    const RTPFragmentationHeader* fragmentation) override {
+      return ++callback_count_;
     }
     int callback_count_ = 0;
   };
diff --git a/webrtc/video/video_receive_stream.cc b/webrtc/video/video_receive_stream.cc
index 3c2139c..bc51269 100644
--- a/webrtc/video/video_receive_stream.cc
+++ b/webrtc/video/video_receive_stream.cc
@@ -324,7 +324,7 @@
 
 // TODO(asapersson): Consider moving callback from video_encoder.h or
 // creating a different callback.
-EncodedImageCallback::Result VideoReceiveStream::OnEncodedImage(
+int32_t VideoReceiveStream::Encoded(
     const EncodedImage& encoded_image,
     const CodecSpecificInfo* codec_specific_info,
     const RTPFragmentationHeader* fragmentation) {
@@ -348,7 +348,7 @@
     }
   }
 
-  return Result(Result::OK, encoded_image._timeStamp);
+  return 0;
 }
 
 bool VideoReceiveStream::DecodeThreadFunction(void* ptr) {
diff --git a/webrtc/video/video_receive_stream.h b/webrtc/video/video_receive_stream.h
index ba5e4e9..d37aece 100644
--- a/webrtc/video/video_receive_stream.h
+++ b/webrtc/video/video_receive_stream.h
@@ -67,10 +67,9 @@
   void OnFrame(const VideoFrame& video_frame) override;
 
   // Overrides EncodedImageCallback.
-  EncodedImageCallback::Result OnEncodedImage(
-      const EncodedImage& encoded_image,
-      const CodecSpecificInfo* codec_specific_info,
-      const RTPFragmentationHeader* fragmentation) override;
+  int32_t Encoded(const EncodedImage& encoded_image,
+                  const CodecSpecificInfo* codec_specific_info,
+                  const RTPFragmentationHeader* fragmentation) override;
 
   const Config& config() const { return config_; }
 
diff --git a/webrtc/video/video_send_stream.cc b/webrtc/video/video_send_stream.cc
index cb9ff41..940e1cc 100644
--- a/webrtc/video/video_send_stream.cc
+++ b/webrtc/video/video_send_stream.cc
@@ -720,10 +720,9 @@
     config_.overuse_callback->OnLoadUpdate(LoadObserver::kUnderuse);
 }
 
-EncodedImageCallback::Result VideoSendStream::OnEncodedImage(
-    const EncodedImage& encoded_image,
-    const CodecSpecificInfo* codec_specific_info,
-    const RTPFragmentationHeader* fragmentation) {
+int32_t VideoSendStream::Encoded(const EncodedImage& encoded_image,
+                                 const CodecSpecificInfo* codec_specific_info,
+                                 const RTPFragmentationHeader* fragmentation) {
   if (config_.post_encode_callback) {
     config_.post_encode_callback->EncodedFrameCallback(
         EncodedFrame(encoded_image._buffer, encoded_image._length,
@@ -731,7 +730,7 @@
   }
 
   protection_bitrate_calculator_.UpdateWithEncodedData(encoded_image);
-  EncodedImageCallback::Result result = payload_router_.OnEncodedImage(
+  int32_t return_value = payload_router_.Encoded(
       encoded_image, codec_specific_info, fragmentation);
 
   if (kEnableFrameRecording) {
@@ -757,7 +756,7 @@
     }
   }
 
-  return result;
+  return return_value;
 }
 
 void VideoSendStream::ConfigureProtection() {
diff --git a/webrtc/video/video_send_stream.h b/webrtc/video/video_send_stream.h
index c67dc70..d5ea363 100644
--- a/webrtc/video/video_send_stream.h
+++ b/webrtc/video/video_send_stream.h
@@ -108,10 +108,9 @@
   // Implements EncodedImageCallback. The implementation routes encoded frames
   // to the |payload_router_| and |config.pre_encode_callback| if set.
   // Called on an arbitrary encoder callback thread.
-  EncodedImageCallback::Result OnEncodedImage(
-      const EncodedImage& encoded_image,
-      const CodecSpecificInfo* codec_specific_info,
-      const RTPFragmentationHeader* fragmentation) override;
+  int32_t Encoded(const EncodedImage& encoded_image,
+                  const CodecSpecificInfo* codec_specific_info,
+                  const RTPFragmentationHeader* fragmentation) override;
 
   static bool EncoderThreadFunction(void* obj);
   void EncoderProcess();
diff --git a/webrtc/video/vie_encoder.cc b/webrtc/video/vie_encoder.cc
index 956fd77..30322c2 100644
--- a/webrtc/video/vie_encoder.cc
+++ b/webrtc/video/vie_encoder.cc
@@ -193,10 +193,9 @@
   return time_of_last_frame_activity_ms_;
 }
 
-EncodedImageCallback::Result ViEEncoder::OnEncodedImage(
-    const EncodedImage& encoded_image,
-    const CodecSpecificInfo* codec_specific_info,
-    const RTPFragmentationHeader* fragmentation) {
+int32_t ViEEncoder::Encoded(const EncodedImage& encoded_image,
+                            const CodecSpecificInfo* codec_specific_info,
+                            const RTPFragmentationHeader* fragmentation) {
   {
     rtc::CritScope lock(&data_cs_);
     time_of_last_frame_activity_ms_ = rtc::TimeMillis();
@@ -205,11 +204,11 @@
     stats_proxy_->OnSendEncodedImage(encoded_image, codec_specific_info);
   }
 
-  EncodedImageCallback::Result result =
-      sink_->OnEncodedImage(encoded_image, codec_specific_info, fragmentation);
+  int success =
+      sink_->Encoded(encoded_image, codec_specific_info, fragmentation);
 
   overuse_detector_->FrameSent(encoded_image._timeStamp);
-  return result;
+  return success;
 }
 
 void ViEEncoder::SendStatistics(uint32_t bit_rate,
diff --git a/webrtc/video/vie_encoder.h b/webrtc/video/vie_encoder.h
index f3f8340..bf090fe 100644
--- a/webrtc/video/vie_encoder.h
+++ b/webrtc/video/vie_encoder.h
@@ -82,10 +82,9 @@
 
 
   // Implements EncodedImageCallback.
-  EncodedImageCallback::Result OnEncodedImage(
-      const EncodedImage& encoded_image,
-      const CodecSpecificInfo* codec_specific_info,
-      const RTPFragmentationHeader* fragmentation) override;
+  int32_t Encoded(const EncodedImage& encoded_image,
+                  const CodecSpecificInfo* codec_specific_info,
+                  const RTPFragmentationHeader* fragmentation) override;
 
   // Implements VideoSendStatisticsCallback.
   void SendStatistics(uint32_t bit_rate,
diff --git a/webrtc/video_encoder.h b/webrtc/video_encoder.h
index 1a036c2..0100239 100644
--- a/webrtc/video_encoder.h
+++ b/webrtc/video_encoder.h
@@ -30,44 +30,11 @@
  public:
   virtual ~EncodedImageCallback() {}
 
-  struct Result {
-    enum Error {
-      OK,
-
-      // Failed to send the packet.
-      ERROR_SEND_FAILED,
-    };
-
-    Result(Error error) : error(error) {}
-    Result(Error error, uint32_t frame_id) : error(error), frame_id(frame_id) {}
-
-    Error error;
-
-    // Frame ID assigned to the frame. The frame ID should be the same as the ID
-    // seen by the receiver for this frame. RTP timestamp of the frame is used
-    // as frame ID when RTP is used to send video. Must be used only when
-    // error=OK.
-    uint32_t frame_id = 0;
-
-    // Tells the encoder that the next frame is should be dropped.
-    bool drop_next_frame = false;
-  };
-
   // Callback function which is called when an image has been encoded.
-  virtual Result OnEncodedImage(
-      const EncodedImage& encoded_image,
-      const CodecSpecificInfo* codec_specific_info,
-      const RTPFragmentationHeader* fragmentation) = 0;
-
-  // DEPRECATED.
-  // TODO(sergeyu): Remove this method.
+  // TODO(perkj): Change this to return void.
   virtual int32_t Encoded(const EncodedImage& encoded_image,
                           const CodecSpecificInfo* codec_specific_info,
-                          const RTPFragmentationHeader* fragmentation) {
-    Result result =
-        OnEncodedImage(encoded_image, codec_specific_info, fragmentation);
-    return (result.error != Result::OK) ? -1 : (result.drop_next_frame ? 1 : 0);
-  }
+                          const RTPFragmentationHeader* fragmentation) = 0;
 };
 
 class VideoEncoder {
diff --git a/webrtc/voice_engine/channel.cc b/webrtc/voice_engine/channel.cc
index 8bc9e2a..f5be7ef 100644
--- a/webrtc/voice_engine/channel.cc
+++ b/webrtc/voice_engine/channel.cc
@@ -360,12 +360,12 @@
   // Push data from ACM to RTP/RTCP-module to deliver audio frame for
   // packetization.
   // This call will trigger Transport::SendPacket() from the RTP/RTCP module.
-  if (!_rtpRtcpModule->SendOutgoingData(
+  if (_rtpRtcpModule->SendOutgoingData(
           (FrameType&)frameType, payloadType, timeStamp,
           // Leaving the time when this frame was
           // received from the capture device as
           // undefined for voice for now.
-          -1, payloadData, payloadSize, fragmentation, nullptr, nullptr)) {
+          -1, payloadData, payloadSize, fragmentation) == -1) {
     _engineStatisticsPtr->SetLastError(
         VE_RTP_RTCP_MODULE_ERROR, kTraceWarning,
         "Channel::SendData() failed to send data to RTP/RTCP module");