Add EncodedImageCallback::OnEncodedImage().
OnEncodedImage() is going to replace Encoded(), which is deprecated now.
The new OnEncodedImage() returns Result struct that contains frame_id,
which tells the encoder RTP timestamp for the frame.
BUG=chromium:621691
R=niklas.enbom@webrtc.org, sprang@webrtc.org, stefan@webrtc.org
Review URL: https://codereview.webrtc.org/2089773002 .
Committed: https://crrev.com/4c7f4cd2ef76821edca6d773d733a924b0bedd25
Committed: https://crrev.com/ad34dbe934d47f88011045671b4aea00dbd5a795
Cr-Original-Original-Commit-Position: refs/heads/master@{#13615}
Cr-Original-Commit-Position: refs/heads/master@{#13617}
Cr-Original-Original-Original-Commit-Position: refs/heads/master@{#13613}
Cr-Mirrored-From: https://chromium.googlesource.com/external/webrtc
Cr-Mirrored-Commit: 525df3ffd1626efa0dc6be5824bc64297a0e9931
diff --git a/modules/rtp_rtcp/include/rtp_rtcp.h b/modules/rtp_rtcp/include/rtp_rtcp.h
index f0d2342..85d14bd 100644
--- a/modules/rtp_rtcp/include/rtp_rtcp.h
+++ b/modules/rtp_rtcp/include/rtp_rtcp.h
@@ -225,8 +225,21 @@
// |payload_size| - size of payload buffer to send
// |fragmentation| - fragmentation offset data for fragmented frames such
// as layers or RED
- // Returns -1 on failure else 0.
- virtual int32_t SendOutgoingData(
+ // |transport_frame_id_out| - set to RTP timestamp.
+ // Returns true on success.
+
+ virtual bool SendOutgoingData(FrameType frame_type,
+ int8_t payload_type,
+ uint32_t timestamp,
+ int64_t capture_time_ms,
+ const uint8_t* payload_data,
+ size_t payload_size,
+ const RTPFragmentationHeader* fragmentation,
+ const RTPVideoHeader* rtp_video_header,
+ uint32_t* transport_frame_id_out) = 0;
+
+ // Deprecated version of the method above.
+ int32_t SendOutgoingData(
FrameType frame_type,
int8_t payload_type,
uint32_t timestamp,
@@ -234,7 +247,14 @@
const uint8_t* payload_data,
size_t payload_size,
const RTPFragmentationHeader* fragmentation = nullptr,
- const RTPVideoHeader* rtp_video_header = nullptr) = 0;
+ const RTPVideoHeader* rtp_video_header = nullptr) {
+ return SendOutgoingData(frame_type, payload_type, timestamp,
+ capture_time_ms, payload_data, payload_size,
+ fragmentation, rtp_video_header,
+ /*frame_id_out=*/nullptr)
+ ? 0
+ : -1;
+ }
virtual bool TimeToSendPacket(uint32_t ssrc,
uint16_t sequence_number,
diff --git a/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h b/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h
index 8a44159..6834da3 100644
--- a/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h
+++ b/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h
@@ -98,15 +98,16 @@
MOCK_CONST_METHOD0(GetVideoBitrateObserver, BitrateStatisticsObserver*(void));
MOCK_CONST_METHOD1(EstimatedReceiveBandwidth,
int(uint32_t* available_bandwidth));
- MOCK_METHOD8(SendOutgoingData,
- int32_t(FrameType frame_type,
- int8_t payload_type,
- uint32_t timestamp,
- int64_t capture_time_ms,
- const uint8_t* payload_data,
- size_t payload_size,
- const RTPFragmentationHeader* fragmentation,
- const RTPVideoHeader* rtp_video_header));
+ MOCK_METHOD9(SendOutgoingData,
+ bool(FrameType frame_type,
+ int8_t payload_type,
+ uint32_t timestamp,
+ int64_t capture_time_ms,
+ const uint8_t* payload_data,
+ size_t payload_size,
+ const RTPFragmentationHeader* fragmentation,
+ const RTPVideoHeader* rtp_video_header,
+ uint32_t* frame_id_out));
MOCK_METHOD5(TimeToSendPacket,
bool(uint32_t ssrc,
uint16_t sequence_number,
diff --git a/modules/rtp_rtcp/source/nack_rtx_unittest.cc b/modules/rtp_rtcp/source/nack_rtx_unittest.cc
index a0e0cec..052060c 100644
--- a/modules/rtp_rtcp/source/nack_rtx_unittest.cc
+++ b/modules/rtp_rtcp/source/nack_rtx_unittest.cc
@@ -264,9 +264,9 @@
uint32_t timestamp = 3000;
uint16_t nack_list[kVideoNackListSize];
for (int frame = 0; frame < kNumFrames; ++frame) {
- EXPECT_EQ(0, rtp_rtcp_module_->SendOutgoingData(
- webrtc::kVideoFrameDelta, kPayloadType, timestamp,
- timestamp / 90, payload_data, payload_data_length));
+ EXPECT_TRUE(rtp_rtcp_module_->SendOutgoingData(
+ webrtc::kVideoFrameDelta, kPayloadType, timestamp, timestamp / 90,
+ payload_data, payload_data_length, nullptr, nullptr, nullptr));
// Min required delay until retransmit = 5 + RTT ms (RTT = 0).
fake_clock.AdvanceTimeMilliseconds(5);
int length = BuildNackList(nack_list);
@@ -310,9 +310,9 @@
// Send 30 frames which at the default size is roughly what we need to get
// enough packets.
for (int frame = 0; frame < kNumFrames; ++frame) {
- EXPECT_EQ(0, rtp_rtcp_module_->SendOutgoingData(
- webrtc::kVideoFrameDelta, kPayloadType, timestamp,
- timestamp / 90, payload_data, payload_data_length));
+ EXPECT_TRUE(rtp_rtcp_module_->SendOutgoingData(
+ webrtc::kVideoFrameDelta, kPayloadType, timestamp, timestamp / 90,
+ payload_data, payload_data_length, nullptr, nullptr, nullptr));
// Prepare next frame.
timestamp += 3000;
fake_clock.AdvanceTimeMilliseconds(33);
diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl.cc b/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
index 190136d..e58ac3c 100644
--- a/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
+++ b/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
@@ -384,7 +384,7 @@
return rtp_sender_.SendingMedia();
}
-int32_t ModuleRtpRtcpImpl::SendOutgoingData(
+bool ModuleRtpRtcpImpl::SendOutgoingData(
FrameType frame_type,
int8_t payload_type,
uint32_t time_stamp,
@@ -392,7 +392,8 @@
const uint8_t* payload_data,
size_t payload_size,
const RTPFragmentationHeader* fragmentation,
- const RTPVideoHeader* rtp_video_header) {
+ const RTPVideoHeader* rtp_video_header,
+ uint32_t* transport_frame_id_out) {
rtcp_sender_.SetLastRtpTime(time_stamp, capture_time_ms);
// Make sure an RTCP report isn't queued behind a key frame.
if (rtcp_sender_.TimeToSendRTCPReport(kVideoFrameKey == frame_type)) {
@@ -400,7 +401,7 @@
}
return rtp_sender_.SendOutgoingData(
frame_type, payload_type, time_stamp, capture_time_ms, payload_data,
- payload_size, fragmentation, rtp_video_header);
+ payload_size, fragmentation, rtp_video_header, transport_frame_id_out);
}
bool ModuleRtpRtcpImpl::TimeToSendPacket(uint32_t ssrc,
diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl.h b/modules/rtp_rtcp/source/rtp_rtcp_impl.h
index e8fc545..fd44a59 100644
--- a/modules/rtp_rtcp/source/rtp_rtcp_impl.h
+++ b/modules/rtp_rtcp/source/rtp_rtcp_impl.h
@@ -112,15 +112,15 @@
// Used by the codec module to deliver a video or audio frame for
// packetization.
- int32_t SendOutgoingData(
- FrameType frame_type,
- int8_t payload_type,
- uint32_t time_stamp,
- int64_t capture_time_ms,
- const uint8_t* payload_data,
- size_t payload_size,
- const RTPFragmentationHeader* fragmentation = NULL,
- const RTPVideoHeader* rtp_video_header = NULL) override;
+ bool SendOutgoingData(FrameType frame_type,
+ int8_t payload_type,
+ uint32_t time_stamp,
+ int64_t capture_time_ms,
+ const uint8_t* payload_data,
+ size_t payload_size,
+ const RTPFragmentationHeader* fragmentation,
+ const RTPVideoHeader* rtp_video_header,
+ uint32_t* transport_frame_id_out) override;
bool TimeToSendPacket(uint32_t ssrc,
uint16_t sequence_number,
diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc b/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
index 9dfcc13..acbe56c 100644
--- a/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
+++ b/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
@@ -206,14 +206,9 @@
kRtpVideoVp8, {vp8_header}};
const uint8_t payload[100] = {0};
- EXPECT_EQ(0, module->impl_->SendOutgoingData(kVideoFrameKey,
- codec_.plType,
- 0,
- 0,
- payload,
- sizeof(payload),
- NULL,
- &rtp_video_header));
+ EXPECT_EQ(true, module->impl_->SendOutgoingData(
+ kVideoFrameKey, codec_.plType, 0, 0, payload,
+ sizeof(payload), nullptr, &rtp_video_header, nullptr));
}
void IncomingRtcpNack(const RtpRtcpModule* module, uint16_t sequence_number) {
diff --git a/modules/rtp_rtcp/source/rtp_sender.cc b/modules/rtp_rtcp/source/rtp_sender.cc
index 2fe80ae..58dbc3e 100644
--- a/modules/rtp_rtcp/source/rtp_sender.cc
+++ b/modules/rtp_rtcp/source/rtp_sender.cc
@@ -448,14 +448,15 @@
return video_rotation_active_;
}
-int32_t RTPSender::SendOutgoingData(FrameType frame_type,
- int8_t payload_type,
- uint32_t capture_timestamp,
- int64_t capture_time_ms,
- const uint8_t* payload_data,
- size_t payload_size,
- const RTPFragmentationHeader* fragmentation,
- const RTPVideoHeader* rtp_hdr) {
+bool RTPSender::SendOutgoingData(FrameType frame_type,
+ int8_t payload_type,
+ uint32_t capture_timestamp,
+ int64_t capture_time_ms,
+ const uint8_t* payload_data,
+ size_t payload_size,
+ const RTPFragmentationHeader* fragmentation,
+ const RTPVideoHeader* rtp_header,
+ uint32_t* transport_frame_id_out) {
uint32_t ssrc;
uint16_t sequence_number;
{
@@ -463,36 +464,35 @@
rtc::CritScope lock(&send_critsect_);
ssrc = ssrc_;
sequence_number = sequence_number_;
- if (!sending_media_) {
- return 0;
- }
+ if (!sending_media_)
+ return true;
}
RtpVideoCodecTypes video_type = kRtpVideoGeneric;
if (CheckPayloadType(payload_type, &video_type) != 0) {
LOG(LS_ERROR) << "Don't send data with unknown payload type: "
<< static_cast<int>(payload_type) << ".";
- return -1;
+ return false;
}
- int32_t ret_val;
+ bool result;
if (audio_configured_) {
TRACE_EVENT_ASYNC_STEP1("webrtc", "Audio", capture_timestamp,
"Send", "type", FrameTypeToString(frame_type));
assert(frame_type == kAudioFrameSpeech || frame_type == kAudioFrameCN ||
frame_type == kEmptyFrame);
- ret_val = audio_->SendAudio(frame_type, payload_type, capture_timestamp,
- payload_data, payload_size, fragmentation);
+ result = audio_->SendAudio(frame_type, payload_type, capture_timestamp,
+ payload_data, payload_size, fragmentation);
} else {
TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", capture_time_ms,
"Send", "type", FrameTypeToString(frame_type));
assert(frame_type != kAudioFrameSpeech && frame_type != kAudioFrameCN);
if (frame_type == kEmptyFrame)
- return 0;
+ return true;
- if (rtp_hdr) {
- playout_delay_oracle_.UpdateRequest(ssrc, rtp_hdr->playout_delay,
+ if (rtp_header) {
+ playout_delay_oracle_.UpdateRequest(ssrc, rtp_header->playout_delay,
sequence_number);
}
@@ -507,9 +507,16 @@
}
}
- ret_val = video_->SendVideo(
- video_type, frame_type, payload_type, capture_timestamp,
- capture_time_ms, payload_data, payload_size, fragmentation, rtp_hdr);
+ result = video_->SendVideo(video_type, frame_type, payload_type,
+ capture_timestamp, capture_time_ms, payload_data,
+ payload_size, fragmentation, rtp_header);
+ }
+
+ if (transport_frame_id_out) {
+ rtc::CritScope lock(&send_critsect_);
+ // TODO(sergeyu): Move RTP timestamp calculation from BuildRTPheader() to
+ // SendOutgoingData() and pass it to SendVideo()/SendAudio() calls.
+ *transport_frame_id_out = timestamp_;
}
rtc::CritScope cs(&statistics_crit_);
@@ -523,7 +530,7 @@
frame_count_observer_->FrameCountUpdated(frame_counts_, ssrc);
}
- return ret_val;
+ return result;
}
size_t RTPSender::TrySendRedundantPayloads(size_t bytes_to_send,
@@ -945,12 +952,12 @@
}
// TODO(pwestin): send in the RtpHeaderParser to avoid parsing it again.
-int32_t RTPSender::SendToNetwork(uint8_t* buffer,
- size_t payload_length,
- size_t rtp_header_length,
- int64_t capture_time_ms,
- StorageType storage,
- RtpPacketSender::Priority priority) {
+bool RTPSender::SendToNetwork(uint8_t* buffer,
+ size_t payload_length,
+ size_t rtp_header_length,
+ int64_t capture_time_ms,
+ StorageType storage,
+ RtpPacketSender::Priority priority) {
size_t length = payload_length + rtp_header_length;
RtpUtility::RtpHeaderParser rtp_parser(buffer, length);
@@ -972,7 +979,7 @@
// Used for NACK and to spread out the transmission of packets.
if (packet_history_.PutRTPPacket(buffer, length, capture_time_ms, storage) !=
0) {
- return -1;
+ return false;
}
if (paced_sender_) {
@@ -989,7 +996,7 @@
"PacedSend", corrected_time_ms,
"capture_time_ms", corrected_time_ms);
}
- return 0;
+ return true;
}
PacketOptions options;
@@ -1010,14 +1017,14 @@
packet_history_.SetSent(rtp_header.sequenceNumber);
if (!sent)
- return -1;
+ return false;
{
rtc::CritScope lock(&send_critsect_);
media_has_been_sent_ = true;
}
UpdateRtpStats(buffer, length, rtp_header, false, false);
- return 0;
+ return true;
}
void RTPSender::UpdateDelayStatistics(int64_t capture_time_ms, int64_t now_ms) {
diff --git a/modules/rtp_rtcp/source/rtp_sender.h b/modules/rtp_rtcp/source/rtp_sender.h
index a7fab0f..f068ae3 100644
--- a/modules/rtp_rtcp/source/rtp_sender.h
+++ b/modules/rtp_rtcp/source/rtp_sender.h
@@ -76,12 +76,12 @@
virtual size_t MaxDataPayloadLength() const = 0;
virtual uint16_t ActualSendBitrateKbit() const = 0;
- virtual int32_t SendToNetwork(uint8_t* data_buffer,
- size_t payload_length,
- size_t rtp_header_length,
- int64_t capture_time_ms,
- StorageType storage,
- RtpPacketSender::Priority priority) = 0;
+ virtual bool SendToNetwork(uint8_t* data_buffer,
+ size_t payload_length,
+ size_t rtp_header_length,
+ int64_t capture_time_ms,
+ StorageType storage,
+ RtpPacketSender::Priority priority) = 0;
virtual bool UpdateVideoRotation(uint8_t* rtp_packet,
size_t rtp_packet_length,
@@ -154,14 +154,15 @@
void SetMaxPayloadLength(size_t max_payload_length);
- int32_t SendOutgoingData(FrameType frame_type,
- int8_t payload_type,
- uint32_t timestamp,
- int64_t capture_time_ms,
- const uint8_t* payload_data,
- size_t payload_size,
- const RTPFragmentationHeader* fragmentation,
- const RTPVideoHeader* rtp_header);
+ bool SendOutgoingData(FrameType frame_type,
+ int8_t payload_type,
+ uint32_t timestamp,
+ int64_t capture_time_ms,
+ const uint8_t* payload_data,
+ size_t payload_size,
+ const RTPFragmentationHeader* fragmentation,
+ const RTPVideoHeader* rtp_header,
+ uint32_t* transport_frame_id_out);
// RTP header extension
int32_t SetTransmissionTimeOffset(int32_t transmission_time_offset);
@@ -276,12 +277,12 @@
uint32_t Timestamp() const override;
uint32_t SSRC() const override;
- int32_t SendToNetwork(uint8_t* data_buffer,
- size_t payload_length,
- size_t rtp_header_length,
- int64_t capture_time_ms,
- StorageType storage,
- RtpPacketSender::Priority priority) override;
+ bool SendToNetwork(uint8_t* data_buffer,
+ size_t payload_length,
+ size_t rtp_header_length,
+ int64_t capture_time_ms,
+ StorageType storage,
+ RtpPacketSender::Priority priority) override;
// Audio.
diff --git a/modules/rtp_rtcp/source/rtp_sender_audio.cc b/modules/rtp_rtcp/source/rtp_sender_audio.cc
index 4ff61ab..9b1b3bb 100644
--- a/modules/rtp_rtcp/source/rtp_sender_audio.cc
+++ b/modules/rtp_rtcp/source/rtp_sender_audio.cc
@@ -145,7 +145,7 @@
return marker_bit;
}
-int32_t RTPSenderAudio::SendAudio(FrameType frame_type,
+bool RTPSenderAudio::SendAudio(FrameType frame_type,
int8_t payload_type,
uint32_t capture_timestamp,
const uint8_t* payload_data,
@@ -195,7 +195,7 @@
if (packet_size_samples >
(capture_timestamp - dtmf_timestamp_last_sent_)) {
// not time to send yet
- return 0;
+ return true;
}
}
dtmf_timestamp_last_sent_ = capture_timestamp;
@@ -228,24 +228,24 @@
ended, dtmf_payload_type, dtmf_timestamp_,
static_cast<uint16_t>(dtmf_duration_samples), false);
} else {
- if (SendTelephoneEventPacket(ended, dtmf_payload_type, dtmf_timestamp_,
- dtmf_duration_samples,
- !dtmf_event_first_packet_sent_) != 0) {
- return -1;
+ if (!SendTelephoneEventPacket(ended, dtmf_payload_type, dtmf_timestamp_,
+ dtmf_duration_samples,
+ !dtmf_event_first_packet_sent_)) {
+ return false;
}
dtmf_event_first_packet_sent_ = true;
- return 0;
+ return true;
}
}
- return 0;
+ return true;
}
if (payload_size == 0 || payload_data == NULL) {
if (frame_type == kEmptyFrame) {
// we don't send empty audio RTP packets
// no error since we use it to drive DTMF when we use VAD
- return 0;
+ return true;
}
- return -1;
+ return false;
}
uint8_t data_buffer[IP_PACKET_SIZE];
bool marker_bit = MarkerBit(frame_type, payload_type);
@@ -269,11 +269,11 @@
clock_->TimeInMilliseconds());
}
if (rtpHeaderLength <= 0) {
- return -1;
+ return false;
}
if (max_payload_length < (rtpHeaderLength + payload_size)) {
// Too large payload buffer.
- return -1;
+ return false;
}
if (red_payload_type >= 0 && // Have we configured RED?
fragmentation && fragmentation->fragmentationVectorSize > 1 &&
@@ -281,7 +281,7 @@
if (timestampOffset <= 0x3fff) {
if (fragmentation->fragmentationVectorSize != 2) {
// we only support 2 codecs when using RED
- return -1;
+ return false;
}
// only 0x80 if we have multiple blocks
data_buffer[rtpHeaderLength++] =
@@ -290,7 +290,7 @@
// sanity blockLength
if (blockLength > 0x3ff) { // block length 10 bits 1023 bytes
- return -1;
+ return false;
}
uint32_t REDheader = (timestampOffset << 10) + blockLength;
ByteWriter<uint32_t>::WriteBigEndian(data_buffer + rtpHeaderLength,
@@ -349,7 +349,7 @@
TRACE_EVENT_ASYNC_END2("webrtc", "Audio", capture_timestamp, "timestamp",
rtp_sender_->Timestamp(), "seqnum",
rtp_sender_->SequenceNumber());
- int32_t send_result = rtp_sender_->SendToNetwork(
+ bool send_result = rtp_sender_->SendToNetwork(
data_buffer, payload_size, rtpHeaderLength, rtc::TimeMillis(),
kAllowRetransmission, RtpPacketSender::kHighPriority);
if (first_packet_sent_()) {
@@ -403,18 +403,18 @@
return AddDTMF(key, time_ms, level);
}
-int32_t RTPSenderAudio::SendTelephoneEventPacket(bool ended,
- int8_t dtmf_payload_type,
- uint32_t dtmf_timestamp,
- uint16_t duration,
- bool marker_bit) {
+bool RTPSenderAudio::SendTelephoneEventPacket(bool ended,
+ int8_t dtmf_payload_type,
+ uint32_t dtmf_timestamp,
+ uint16_t duration,
+ bool marker_bit) {
uint8_t dtmfbuffer[IP_PACKET_SIZE];
- uint8_t sendCount = 1;
- int32_t retVal = 0;
+ uint8_t send_count = 1;
+ bool result = true;
if (ended) {
// resend last packet in an event 3 times
- sendCount = 3;
+ send_count = 3;
}
do {
// Send DTMF data
@@ -422,7 +422,7 @@
dtmfbuffer, dtmf_payload_type, marker_bit, dtmf_timestamp,
clock_->TimeInMilliseconds());
if (header_length <= 0)
- return -1;
+ return false;
// reset CSRC and X bit
dtmfbuffer[0] &= 0xe0;
@@ -451,12 +451,12 @@
TRACE_EVENT_INSTANT2(
TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"), "Audio::SendTelephoneEvent",
"timestamp", dtmf_timestamp, "seqnum", rtp_sender_->SequenceNumber());
- retVal = rtp_sender_->SendToNetwork(dtmfbuffer, 4, 12, rtc::TimeMillis(),
+ result = rtp_sender_->SendToNetwork(dtmfbuffer, 4, 12, rtc::TimeMillis(),
kAllowRetransmission,
RtpPacketSender::kHighPriority);
- sendCount--;
- } while (sendCount > 0 && retVal == 0);
+ send_count--;
+ } while (send_count > 0 && result);
- return retVal;
+ return result;
}
} // namespace webrtc
diff --git a/modules/rtp_rtcp/source/rtp_sender_audio.h b/modules/rtp_rtcp/source/rtp_sender_audio.h
index cb3ddb2..d540593 100644
--- a/modules/rtp_rtcp/source/rtp_sender_audio.h
+++ b/modules/rtp_rtcp/source/rtp_sender_audio.h
@@ -34,12 +34,12 @@
uint32_t rate,
RtpUtility::Payload** payload);
- int32_t SendAudio(FrameType frame_type,
- int8_t payload_type,
- uint32_t capture_timestamp,
- const uint8_t* payload_data,
- size_t payload_size,
- const RTPFragmentationHeader* fragmentation);
+ bool SendAudio(FrameType frame_type,
+ int8_t payload_type,
+ uint32_t capture_timestamp,
+ const uint8_t* payload_data,
+ size_t payload_size,
+ const RTPFragmentationHeader* fragmentation);
// set audio packet size, used to determine when it's time to send a DTMF
// packet in silence (CNG)
@@ -62,7 +62,7 @@
int32_t RED(int8_t* payload_type) const;
protected:
- int32_t SendTelephoneEventPacket(
+ bool SendTelephoneEventPacket(
bool ended,
int8_t dtmf_payload_type,
uint32_t dtmf_timestamp,
diff --git a/modules/rtp_rtcp/source/rtp_sender_unittest.cc b/modules/rtp_rtcp/source/rtp_sender_unittest.cc
index fed767b..ce032ec 100644
--- a/modules/rtp_rtcp/source/rtp_sender_unittest.cc
+++ b/modules/rtp_rtcp/source/rtp_sender_unittest.cc
@@ -190,9 +190,9 @@
ASSERT_GE(rtp_length, 0);
// Packet should be stored in a send bucket.
- EXPECT_EQ(0, rtp_sender_->SendToNetwork(
- packet_, payload_length, rtp_length, capture_time_ms,
- kAllowRetransmission, RtpPacketSender::kNormalPriority));
+ EXPECT_TRUE(rtp_sender_->SendToNetwork(
+ packet_, payload_length, rtp_length, capture_time_ms,
+ kAllowRetransmission, RtpPacketSender::kNormalPriority));
}
void SendGenericPayload() {
@@ -204,9 +204,9 @@
EXPECT_EQ(0, rtp_sender_->RegisterPayload(payload_name, kPayloadType, 90000,
0, 1500));
- EXPECT_EQ(0, rtp_sender_->SendOutgoingData(
- kVideoFrameKey, kPayloadType, kTimestamp, kCaptureTimeMs,
- kPayload, sizeof(kPayload), nullptr, nullptr));
+ EXPECT_TRUE(rtp_sender_->SendOutgoingData(
+ kVideoFrameKey, kPayloadType, kTimestamp, kCaptureTimeMs, kPayload,
+ sizeof(kPayload), nullptr, nullptr, nullptr));
}
};
@@ -753,9 +753,9 @@
size_t rtp_length = static_cast<size_t>(rtp_length_int);
// Packet should be stored in a send bucket.
- EXPECT_EQ(0, rtp_sender_->SendToNetwork(packet_, 0, rtp_length,
- capture_time_ms, kAllowRetransmission,
- RtpPacketSender::kNormalPriority));
+ EXPECT_TRUE(rtp_sender_->SendToNetwork(packet_, 0, rtp_length,
+ capture_time_ms, kAllowRetransmission,
+ RtpPacketSender::kNormalPriority));
EXPECT_EQ(0, transport_.packets_sent_);
@@ -806,9 +806,9 @@
size_t rtp_length = static_cast<size_t>(rtp_length_int);
// Packet should be stored in a send bucket.
- EXPECT_EQ(0, rtp_sender_->SendToNetwork(packet_, 0, rtp_length,
- capture_time_ms, kAllowRetransmission,
- RtpPacketSender::kNormalPriority));
+ EXPECT_TRUE(rtp_sender_->SendToNetwork(packet_, 0, rtp_length,
+ capture_time_ms, kAllowRetransmission,
+ RtpPacketSender::kNormalPriority));
EXPECT_EQ(0, transport_.packets_sent_);
@@ -888,9 +888,9 @@
size_t rtp_length = static_cast<size_t>(rtp_length_int);
// Packet should be stored in a send bucket.
- EXPECT_EQ(0, rtp_sender_->SendToNetwork(packet_, 0, rtp_length,
- capture_time_ms, kAllowRetransmission,
- RtpPacketSender::kNormalPriority));
+ EXPECT_TRUE(rtp_sender_->SendToNetwork(packet_, 0, rtp_length,
+ capture_time_ms, kAllowRetransmission,
+ RtpPacketSender::kNormalPriority));
int total_packets_sent = 0;
EXPECT_EQ(total_packets_sent, transport_.packets_sent_);
@@ -948,9 +948,9 @@
InsertPacket(RtpPacketSender::kNormalPriority, _, _, _, _, _));
// Packet should be stored in a send bucket.
- EXPECT_EQ(0, rtp_sender_->SendToNetwork(packet_, 0, rtp_length,
- capture_time_ms, kAllowRetransmission,
- RtpPacketSender::kNormalPriority));
+ EXPECT_TRUE(rtp_sender_->SendToNetwork(packet_, 0, rtp_length,
+ capture_time_ms, kAllowRetransmission,
+ RtpPacketSender::kNormalPriority));
rtp_sender_->TimeToSendPacket(seq_num, capture_time_ms, false,
PacketInfo::kNotAProbe);
@@ -1115,9 +1115,9 @@
uint8_t payload[] = {47, 11, 32, 93, 89};
// Send keyframe
- ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kVideoFrameKey, payload_type, 1234,
- 4321, payload, sizeof(payload),
- nullptr, nullptr));
+ ASSERT_TRUE(rtp_sender_->SendOutgoingData(kVideoFrameKey, payload_type, 1234,
+ 4321, payload, sizeof(payload),
+ nullptr, nullptr, nullptr));
RtpUtility::RtpHeaderParser rtp_parser(transport_.last_sent_packet_,
transport_.last_sent_packet_len_);
@@ -1141,9 +1141,9 @@
payload[1] = 42;
payload[4] = 13;
- ASSERT_EQ(0, rtp_sender_->SendOutgoingData(
- kVideoFrameDelta, payload_type, 1234, 4321, payload,
- sizeof(payload), nullptr, nullptr));
+ ASSERT_TRUE(rtp_sender_->SendOutgoingData(
+ kVideoFrameDelta, payload_type, 1234, 4321, payload, sizeof(payload),
+ nullptr, nullptr, nullptr));
RtpUtility::RtpHeaderParser rtp_parser2(transport_.last_sent_packet_,
transport_.last_sent_packet_len_);
@@ -1195,18 +1195,18 @@
EXPECT_CALL(mock_paced_sender_, InsertPacket(_, _, _, _, _, _))
.Times(::testing::AtLeast(2));
- ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kVideoFrameKey, payload_type, 1234,
- 4321, payload, sizeof(payload),
- nullptr, nullptr));
+ ASSERT_TRUE(rtp_sender_->SendOutgoingData(kVideoFrameKey, payload_type, 1234,
+ 4321, payload, sizeof(payload),
+ nullptr, nullptr, nullptr));
EXPECT_EQ(1U, callback.num_calls_);
EXPECT_EQ(ssrc, callback.ssrc_);
EXPECT_EQ(1, callback.frame_counts_.key_frames);
EXPECT_EQ(0, callback.frame_counts_.delta_frames);
- ASSERT_EQ(0, rtp_sender_->SendOutgoingData(
- kVideoFrameDelta, payload_type, 1234, 4321, payload,
- sizeof(payload), nullptr, nullptr));
+ ASSERT_TRUE(rtp_sender_->SendOutgoingData(
+ kVideoFrameDelta, payload_type, 1234, 4321, payload, sizeof(payload),
+ nullptr, nullptr, nullptr));
EXPECT_EQ(2U, callback.num_calls_);
EXPECT_EQ(ssrc, callback.ssrc_);
@@ -1268,9 +1268,9 @@
// Send a few frames.
for (uint32_t i = 0; i < kNumPackets; ++i) {
- ASSERT_EQ(0, rtp_sender_->SendOutgoingData(
- kVideoFrameKey, payload_type, 1234, 4321, payload,
- sizeof(payload), nullptr, nullptr));
+ ASSERT_TRUE(rtp_sender_->SendOutgoingData(
+ kVideoFrameKey, payload_type, 1234, 4321, payload, sizeof(payload),
+ nullptr, nullptr, nullptr));
fake_clock_.AdvanceTimeMilliseconds(kPacketInterval);
}
@@ -1349,9 +1349,9 @@
rtp_sender_->RegisterRtpStatisticsCallback(&callback);
// Send a frame.
- ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kVideoFrameKey, payload_type, 1234,
- 4321, payload, sizeof(payload),
- nullptr, nullptr));
+ ASSERT_TRUE(rtp_sender_->SendOutgoingData(
+ kVideoFrameKey, payload_type, 1234, 4321, payload,
+ sizeof(payload), nullptr, nullptr, nullptr));
StreamDataCounters expected;
expected.transmitted.payload_bytes = 6;
expected.transmitted.header_bytes = 12;
@@ -1391,9 +1391,9 @@
fec_params.fec_rate = 1;
fec_params.max_fec_frames = 1;
rtp_sender_->SetFecParameters(&fec_params, &fec_params);
- ASSERT_EQ(0, rtp_sender_->SendOutgoingData(
- kVideoFrameDelta, payload_type, 1234, 4321, payload,
- sizeof(payload), nullptr, nullptr));
+ ASSERT_TRUE(rtp_sender_->SendOutgoingData(
+ kVideoFrameDelta, payload_type, 1234, 4321, payload,
+ sizeof(payload), nullptr, nullptr, nullptr));
expected.transmitted.payload_bytes = 40;
expected.transmitted.header_bytes = 60;
expected.transmitted.packets = 5;
@@ -1410,9 +1410,9 @@
0, 1500));
uint8_t payload[] = {47, 11, 32, 93, 89};
- ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kAudioFrameCN, payload_type, 1234,
- 4321, payload, sizeof(payload),
- nullptr, nullptr));
+ ASSERT_TRUE(rtp_sender_->SendOutgoingData(
+ kAudioFrameCN, payload_type, 1234, 4321, payload,
+ sizeof(payload), nullptr, nullptr, nullptr));
RtpUtility::RtpHeaderParser rtp_parser(transport_.last_sent_packet_,
transport_.last_sent_packet_len_);
@@ -1439,9 +1439,9 @@
0, 1500));
uint8_t payload[] = {47, 11, 32, 93, 89};
- ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kAudioFrameCN, payload_type, 1234,
- 4321, payload, sizeof(payload),
- nullptr, nullptr));
+ ASSERT_TRUE(rtp_sender_->SendOutgoingData(
+ kAudioFrameCN, payload_type, 1234, 4321, payload,
+ sizeof(payload), nullptr, nullptr, nullptr));
RtpUtility::RtpHeaderParser rtp_parser(transport_.last_sent_packet_,
transport_.last_sent_packet_len_);
@@ -1490,15 +1490,15 @@
// During start, it takes the starting timestamp as last sent timestamp.
// The duration is calculated as the difference of current and last sent
// timestamp. So for first call it will skip since the duration is zero.
- ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kEmptyFrame, payload_type,
- capture_time_ms, 0, nullptr, 0,
- nullptr, nullptr));
+ ASSERT_TRUE(rtp_sender_->SendOutgoingData(kEmptyFrame, payload_type,
+ capture_time_ms, 0, nullptr, 0,
+ nullptr, nullptr, nullptr));
// DTMF Sample Length is (Frequency/1000) * Duration.
// So in this case, it is (8000/1000) * 500 = 4000.
// Sending it as two packets.
- ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kEmptyFrame, payload_type,
- capture_time_ms + 2000, 0, nullptr,
- 0, nullptr, nullptr));
+ ASSERT_TRUE(rtp_sender_->SendOutgoingData(
+ kEmptyFrame, payload_type, capture_time_ms + 2000, 0,
+ nullptr, 0, nullptr, nullptr, nullptr));
std::unique_ptr<webrtc::RtpHeaderParser> rtp_parser(
webrtc::RtpHeaderParser::Create());
ASSERT_TRUE(rtp_parser.get() != nullptr);
@@ -1508,9 +1508,9 @@
// Marker Bit should be set to 1 for first packet.
EXPECT_TRUE(rtp_header.markerBit);
- ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kEmptyFrame, payload_type,
- capture_time_ms + 4000, 0, nullptr,
- 0, nullptr, nullptr));
+ ASSERT_TRUE(rtp_sender_->SendOutgoingData(
+ kEmptyFrame, payload_type, capture_time_ms + 4000, 0,
+ nullptr, 0, nullptr, nullptr, nullptr));
ASSERT_TRUE(rtp_parser->Parse(transport_.last_sent_packet_,
transport_.last_sent_packet_len_, &rtp_header));
// Marker Bit should be set to 0 for rest of the packets.
@@ -1529,9 +1529,9 @@
0, 1500));
uint8_t payload[] = {47, 11, 32, 93, 89};
- ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kVideoFrameKey, kPayloadType, 1234,
- 4321, payload, sizeof(payload),
- nullptr, nullptr));
+ ASSERT_TRUE(rtp_sender_->SendOutgoingData(
+ kVideoFrameKey, kPayloadType, 1234, 4321, payload,
+ sizeof(payload), nullptr, nullptr, nullptr));
// Will send 2 full-size padding packets.
rtp_sender_->TimeToSendPadding(1, PacketInfo::kNotAProbe);
diff --git a/modules/rtp_rtcp/source/rtp_sender_video.cc b/modules/rtp_rtcp/source/rtp_sender_video.cc
index 5364a9b..fd36d76 100644
--- a/modules/rtp_rtcp/source/rtp_sender_video.cc
+++ b/modules/rtp_rtcp/source/rtp_sender_video.cc
@@ -79,18 +79,18 @@
uint32_t capture_timestamp,
int64_t capture_time_ms,
StorageType storage) {
- if (rtp_sender_->SendToNetwork(data_buffer, payload_length, rtp_header_length,
- capture_time_ms, storage,
- RtpPacketSender::kLowPriority) == 0) {
- rtc::CritScope cs(&stats_crit_);
- video_bitrate_.Update(payload_length + rtp_header_length,
- clock_->TimeInMilliseconds());
- TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"),
- "Video::PacketNormal", "timestamp", capture_timestamp,
- "seqnum", seq_num);
- } else {
+ if (!rtp_sender_->SendToNetwork(data_buffer, payload_length,
+ rtp_header_length, capture_time_ms, storage,
+ RtpPacketSender::kLowPriority)) {
LOG(LS_WARNING) << "Failed to send video packet " << seq_num;
+ return;
}
+ rtc::CritScope cs(&stats_crit_);
+ video_bitrate_.Update(payload_length + rtp_header_length,
+ clock_->TimeInMilliseconds());
+ TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"),
+ "Video::PacketNormal", "timestamp", capture_timestamp,
+ "seqnum", seq_num);
}
void RTPSenderVideo::SendVideoPacketAsRed(uint8_t* data_buffer,
@@ -129,7 +129,7 @@
if (rtp_sender_->SendToNetwork(
red_packet->data(), red_packet->length() - rtp_header_length,
rtp_header_length, capture_time_ms, media_packet_storage,
- RtpPacketSender::kLowPriority) == 0) {
+ RtpPacketSender::kLowPriority)) {
rtc::CritScope cs(&stats_crit_);
video_bitrate_.Update(red_packet->length(), clock_->TimeInMilliseconds());
TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"),
@@ -142,7 +142,7 @@
if (rtp_sender_->SendToNetwork(
fec_packet->data(), fec_packet->length() - rtp_header_length,
rtp_header_length, capture_time_ms, fec_storage,
- RtpPacketSender::kLowPriority) == 0) {
+ RtpPacketSender::kLowPriority)) {
rtc::CritScope cs(&stats_crit_);
fec_bitrate_.Update(fec_packet->length(), clock_->TimeInMilliseconds());
TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"),
@@ -206,18 +206,17 @@
}
}
-int32_t RTPSenderVideo::SendVideo(RtpVideoCodecTypes video_type,
- FrameType frame_type,
- int8_t payload_type,
- uint32_t capture_timestamp,
- int64_t capture_time_ms,
- const uint8_t* payload_data,
- size_t payload_size,
- const RTPFragmentationHeader* fragmentation,
- const RTPVideoHeader* video_header) {
- if (payload_size == 0) {
- return -1;
- }
+bool RTPSenderVideo::SendVideo(RtpVideoCodecTypes video_type,
+ FrameType frame_type,
+ int8_t payload_type,
+ uint32_t capture_timestamp,
+ int64_t capture_time_ms,
+ const uint8_t* payload_data,
+ size_t payload_size,
+ const RTPFragmentationHeader* fragmentation,
+ const RTPVideoHeader* video_header) {
+ if (payload_size == 0)
+ return false;
std::unique_ptr<RtpPacketizer> packetizer(RtpPacketizer::Create(
video_type, rtp_sender_->MaxDataPayloadLength(),
@@ -262,14 +261,14 @@
if (!packetizer->NextPacket(&dataBuffer[rtp_header_length],
&payload_bytes_in_packet, &last)) {
- return -1;
+ return false;
}
// Write RTP header.
int32_t header_length = rtp_sender_->BuildRtpHeader(
dataBuffer, payload_type, last, capture_timestamp, capture_time_ms);
if (header_length <= 0)
- return -1;
+ return false;
// According to
// http://www.etsi.org/deliver/etsi_ts/126100_126199/126114/12.07.00_60/
@@ -324,7 +323,7 @@
TRACE_EVENT_ASYNC_END1("webrtc", "Video", capture_time_ms, "timestamp",
rtp_sender_->Timestamp());
- return 0;
+ return true;
}
uint32_t RTPSenderVideo::VideoBitrateSent() const {
diff --git a/modules/rtp_rtcp/source/rtp_sender_video.h b/modules/rtp_rtcp/source/rtp_sender_video.h
index 682b6db..842eed8 100644
--- a/modules/rtp_rtcp/source/rtp_sender_video.h
+++ b/modules/rtp_rtcp/source/rtp_sender_video.h
@@ -42,15 +42,15 @@
const char payload_name[RTP_PAYLOAD_NAME_SIZE],
int8_t payload_type);
- int32_t SendVideo(RtpVideoCodecTypes video_type,
- FrameType frame_type,
- int8_t payload_type,
- uint32_t capture_timestamp,
- int64_t capture_time_ms,
- const uint8_t* payload_data,
- size_t payload_size,
- const RTPFragmentationHeader* fragmentation,
- const RTPVideoHeader* video_header);
+ bool SendVideo(RtpVideoCodecTypes video_type,
+ FrameType frame_type,
+ int8_t payload_type,
+ uint32_t capture_timestamp,
+ int64_t capture_time_ms,
+ const uint8_t* payload_data,
+ size_t payload_size,
+ const RTPFragmentationHeader* fragmentation,
+ const RTPVideoHeader* video_header);
int32_t SendRTPIntraRequest();
diff --git a/modules/rtp_rtcp/test/testAPI/test_api_audio.cc b/modules/rtp_rtcp/test/testAPI/test_api_audio.cc
index f9e5001..4bbcc32 100644
--- a/modules/rtp_rtcp/test/testAPI/test_api_audio.cc
+++ b/modules/rtp_rtcp/test/testAPI/test_api_audio.cc
@@ -171,8 +171,9 @@
// Send an empty RTP packet.
// Should fail since we have not registered the payload type.
- EXPECT_EQ(-1, module1->SendOutgoingData(webrtc::kAudioFrameSpeech,
- 96, 0, -1, NULL, 0));
+ EXPECT_FALSE(module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96, 0, -1,
+ nullptr, 0, nullptr, nullptr,
+ nullptr));
CodecInst voice_codec;
memset(&voice_codec, 0, sizeof(voice_codec));
@@ -197,8 +198,9 @@
(voice_codec.rate < 0) ? 0 : voice_codec.rate));
const uint8_t test[5] = "test";
- EXPECT_EQ(0, module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96,
- 0, -1, test, 4));
+ EXPECT_EQ(true,
+ module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96, 0, -1,
+ test, 4, nullptr, nullptr, nullptr));
EXPECT_EQ(test_ssrc, rtp_receiver2_->SSRC());
uint32_t timestamp;
@@ -271,9 +273,9 @@
const uint8_t test[5] = "test";
// Send a RTP packet.
- EXPECT_EQ(0, module1->SendOutgoingData(webrtc::kAudioFrameSpeech,
- 96, 160, -1, test, 4,
- &fragmentation));
+ EXPECT_TRUE(module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96, 160, -1,
+ test, 4, &fragmentation, nullptr,
+ nullptr));
EXPECT_EQ(0, module1->SetSendREDPayloadType(-1));
EXPECT_EQ(-1, module1->SendREDPayloadType(&red));
@@ -333,16 +335,18 @@
// Send RTP packets for 16 tones a 160 ms 100ms
// pause between = 2560ms + 1600ms = 4160ms
for (; timeStamp <= 250 * 160; timeStamp += 160) {
- EXPECT_EQ(0, module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96,
- timeStamp, -1, test, 4));
+ EXPECT_TRUE(module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96,
+ timeStamp, -1, test, 4, nullptr,
+ nullptr, nullptr));
fake_clock.AdvanceTimeMilliseconds(20);
module1->Process();
}
EXPECT_EQ(0, module1->SendTelephoneEventOutband(32, 9000, 10));
for (; timeStamp <= 740 * 160; timeStamp += 160) {
- EXPECT_EQ(0, module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96,
- timeStamp, -1, test, 4));
+ EXPECT_TRUE(module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96,
+ timeStamp, -1, test, 4, nullptr,
+ nullptr, nullptr));
fake_clock.AdvanceTimeMilliseconds(20);
module1->Process();
}
diff --git a/modules/rtp_rtcp/test/testAPI/test_api_rtcp.cc b/modules/rtp_rtcp/test/testAPI/test_api_rtcp.cc
index f33507e..e19cf35 100644
--- a/modules/rtp_rtcp/test/testAPI/test_api_rtcp.cc
+++ b/modules/rtp_rtcp/test/testAPI/test_api_rtcp.cc
@@ -165,8 +165,9 @@
// the receiving module.
// send RTP packet with the data "testtest"
const uint8_t test[9] = "testtest";
- EXPECT_EQ(0, module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96,
- 0, -1, test, 8));
+ EXPECT_EQ(true,
+ module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96, 0, -1,
+ test, 8, nullptr, nullptr, nullptr));
}
virtual void TearDown() {
diff --git a/modules/rtp_rtcp/test/testAPI/test_api_video.cc b/modules/rtp_rtcp/test/testAPI/test_api_video.cc
index e784386..a701c9e 100644
--- a/modules/rtp_rtcp/test/testAPI/test_api_video.cc
+++ b/modules/rtp_rtcp/test/testAPI/test_api_video.cc
@@ -147,11 +147,9 @@
TEST_F(RtpRtcpVideoTest, BasicVideo) {
uint32_t timestamp = 3000;
- EXPECT_EQ(0, video_module_->SendOutgoingData(kVideoFrameDelta, 123,
- timestamp,
- timestamp / 90,
- video_frame_,
- payload_data_length_));
+ EXPECT_TRUE(video_module_->SendOutgoingData(
+ kVideoFrameDelta, 123, timestamp, timestamp / 90, video_frame_,
+ payload_data_length_, nullptr, nullptr, nullptr));
}
TEST_F(RtpRtcpVideoTest, PaddingOnlyFrames) {
diff --git a/modules/video_coding/codecs/test/videoprocessor.cc b/modules/video_coding/codecs/test/videoprocessor.cc
index e969fb7..7b0c8e3 100644
--- a/modules/video_coding/codecs/test/videoprocessor.cc
+++ b/modules/video_coding/codecs/test/videoprocessor.cc
@@ -410,7 +410,8 @@
}
// Callbacks
-int32_t VideoProcessorImpl::VideoProcessorEncodeCompleteCallback::Encoded(
+EncodedImageCallback::Result
+VideoProcessorImpl::VideoProcessorEncodeCompleteCallback::OnEncodedImage(
const EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo* codec_specific_info,
const webrtc::RTPFragmentationHeader* fragmentation) {
@@ -419,7 +420,7 @@
video_processor_->FrameEncoded(codec_specific_info->codecType,
encoded_image,
fragmentation);
- return 0;
+ return Result(Result::OK, 0);
}
int32_t VideoProcessorImpl::VideoProcessorDecodeCompleteCallback::Decoded(
VideoFrame& image) {
diff --git a/modules/video_coding/codecs/test/videoprocessor.h b/modules/video_coding/codecs/test/videoprocessor.h
index af84329..21ed55e 100644
--- a/modules/video_coding/codecs/test/videoprocessor.h
+++ b/modules/video_coding/codecs/test/videoprocessor.h
@@ -230,7 +230,7 @@
public:
explicit VideoProcessorEncodeCompleteCallback(VideoProcessorImpl* vp)
: video_processor_(vp) {}
- int32_t Encoded(
+ Result OnEncodedImage(
const webrtc::EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo* codec_specific_info,
const webrtc::RTPFragmentationHeader* fragmentation) override;
diff --git a/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc b/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
index 7b1e9d9..eba59d0 100644
--- a/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
+++ b/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
@@ -120,12 +120,12 @@
size_t stream_idx)
: adapter_(adapter), stream_idx_(stream_idx) {}
- int32_t Encoded(
- const webrtc::EncodedImage& encodedImage,
- const webrtc::CodecSpecificInfo* codecSpecificInfo = NULL,
- const webrtc::RTPFragmentationHeader* fragmentation = NULL) override {
- return adapter_->Encoded(stream_idx_, encodedImage, codecSpecificInfo,
- fragmentation);
+ EncodedImageCallback::Result OnEncodedImage(
+ const webrtc::EncodedImage& encoded_image,
+ const webrtc::CodecSpecificInfo* codec_specific_info,
+ const webrtc::RTPFragmentationHeader* fragmentation) override {
+ return adapter_->OnEncodedImage(stream_idx_, encoded_image,
+ codec_specific_info, fragmentation);
}
private:
@@ -404,7 +404,7 @@
return WEBRTC_VIDEO_CODEC_OK;
}
-int32_t SimulcastEncoderAdapter::Encoded(
+EncodedImageCallback::Result SimulcastEncoderAdapter::OnEncodedImage(
size_t stream_idx,
const EncodedImage& encodedImage,
const CodecSpecificInfo* codecSpecificInfo,
@@ -413,7 +413,7 @@
CodecSpecificInfoVP8* vp8Info = &(stream_codec_specific.codecSpecific.VP8);
vp8Info->simulcastIdx = stream_idx;
- return encoded_complete_callback_->Encoded(
+ return encoded_complete_callback_->OnEncodedImage(
encodedImage, &stream_codec_specific, fragmentation);
}
diff --git a/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h b/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h
index fca16df..be6aa59 100644
--- a/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h
+++ b/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h
@@ -51,10 +51,11 @@
// Eventual handler for the contained encoders' EncodedImageCallbacks, but
// called from an internal helper that also knows the correct stream
// index.
- int32_t Encoded(size_t stream_idx,
- const EncodedImage& encodedImage,
- const CodecSpecificInfo* codecSpecificInfo = NULL,
- const RTPFragmentationHeader* fragmentation = NULL);
+ EncodedImageCallback::Result OnEncodedImage(
+ size_t stream_idx,
+ const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const RTPFragmentationHeader* fragmentation);
void OnDroppedFrame() override;
diff --git a/modules/video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc b/modules/video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc
index efddb72..d14d1a4 100644
--- a/modules/video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc
+++ b/modules/video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc
@@ -242,16 +242,16 @@
last_encoded_image_simulcast_index_(-1) {}
virtual ~TestSimulcastEncoderAdapterFake() {}
- int32_t Encoded(const EncodedImage& encodedImage,
- const CodecSpecificInfo* codecSpecificInfo = NULL,
- const RTPFragmentationHeader* fragmentation = NULL) override {
- last_encoded_image_width_ = encodedImage._encodedWidth;
- last_encoded_image_height_ = encodedImage._encodedHeight;
- if (codecSpecificInfo) {
+ Result OnEncodedImage(const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const RTPFragmentationHeader* fragmentation) override {
+ last_encoded_image_width_ = encoded_image._encodedWidth;
+ last_encoded_image_height_ = encoded_image._encodedHeight;
+ if (codec_specific_info) {
last_encoded_image_simulcast_index_ =
- codecSpecificInfo->codecSpecific.VP8.simulcastIdx;
+ codec_specific_info->codecSpecific.VP8.simulcastIdx;
}
- return 0;
+ return Result(Result::OK, encoded_image._timeStamp);
}
bool GetLastEncodedImageInfo(int* out_width,
diff --git a/modules/video_coding/codecs/vp8/simulcast_unittest.h b/modules/video_coding/codecs/vp8/simulcast_unittest.h
index b277ad2..e2bd71e 100644
--- a/modules/video_coding/codecs/vp8/simulcast_unittest.h
+++ b/modules/video_coding/codecs/vp8/simulcast_unittest.h
@@ -61,9 +61,9 @@
delete[] encoded_frame_._buffer;
}
- virtual int32_t Encoded(const EncodedImage& encoded_image,
- const CodecSpecificInfo* codec_specific_info,
- const RTPFragmentationHeader* fragmentation) {
+ virtual Result OnEncodedImage(const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const RTPFragmentationHeader* fragmentation) {
// Only store the base layer.
if (codec_specific_info->codecSpecific.VP8.simulcastIdx == 0) {
if (encoded_image._frameType == kVideoFrameKey) {
@@ -89,7 +89,7 @@
codec_specific_info->codecSpecific.VP8.layerSync;
temporal_layer_[codec_specific_info->codecSpecific.VP8.simulcastIdx] =
codec_specific_info->codecSpecific.VP8.temporalIdx;
- return 0;
+ return Result(Result::OK, encoded_image._timeStamp);
}
void GetLastEncodedFrameInfo(int* picture_id,
int* temporal_layer,
@@ -338,34 +338,38 @@
if (expected_video_streams >= 1) {
EXPECT_CALL(
encoder_callback_,
- Encoded(
+ OnEncodedImage(
AllOf(Field(&EncodedImage::_frameType, frame_type),
Field(&EncodedImage::_encodedWidth, kDefaultWidth / 4),
Field(&EncodedImage::_encodedHeight, kDefaultHeight / 4)),
_, _))
.Times(1)
- .WillRepeatedly(Return(0));
+ .WillRepeatedly(Return(EncodedImageCallback::Result(
+ EncodedImageCallback::Result::OK, 0)));
}
if (expected_video_streams >= 2) {
EXPECT_CALL(
encoder_callback_,
- Encoded(
+ OnEncodedImage(
AllOf(Field(&EncodedImage::_frameType, frame_type),
Field(&EncodedImage::_encodedWidth, kDefaultWidth / 2),
Field(&EncodedImage::_encodedHeight, kDefaultHeight / 2)),
_, _))
.Times(1)
- .WillRepeatedly(Return(0));
+ .WillRepeatedly(Return(EncodedImageCallback::Result(
+ EncodedImageCallback::Result::OK, 0)));
}
if (expected_video_streams >= 3) {
EXPECT_CALL(
encoder_callback_,
- Encoded(AllOf(Field(&EncodedImage::_frameType, frame_type),
- Field(&EncodedImage::_encodedWidth, kDefaultWidth),
- Field(&EncodedImage::_encodedHeight, kDefaultHeight)),
- _, _))
+ OnEncodedImage(
+ AllOf(Field(&EncodedImage::_frameType, frame_type),
+ Field(&EncodedImage::_encodedWidth, kDefaultWidth),
+ Field(&EncodedImage::_encodedHeight, kDefaultHeight)),
+ _, _))
.Times(1)
- .WillRepeatedly(Return(0));
+ .WillRepeatedly(Return(EncodedImageCallback::Result(
+ EncodedImageCallback::Result::OK, 0)));
}
}
@@ -590,13 +594,15 @@
encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1], 30);
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
- EXPECT_CALL(encoder_callback_,
- Encoded(AllOf(Field(&EncodedImage::_frameType, kVideoFrameKey),
- Field(&EncodedImage::_encodedWidth, width),
- Field(&EncodedImage::_encodedHeight, height)),
- _, _))
+ EXPECT_CALL(
+ encoder_callback_,
+ OnEncodedImage(AllOf(Field(&EncodedImage::_frameType, kVideoFrameKey),
+ Field(&EncodedImage::_encodedWidth, width),
+ Field(&EncodedImage::_encodedHeight, height)),
+ _, _))
.Times(1)
- .WillRepeatedly(Return(0));
+ .WillRepeatedly(Return(
+ EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
// Switch back.
diff --git a/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc b/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
index 756a102..12dcb7c 100644
--- a/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
+++ b/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
@@ -43,9 +43,9 @@
void* decoderSpecificInfo)
: encoded_frame_(frame), encode_complete_(false) {}
- virtual int Encoded(const EncodedImage& encoded_frame_,
- const CodecSpecificInfo* codecSpecificInfo,
- const RTPFragmentationHeader*);
+ Result OnEncodedImage(const EncodedImage& encoded_frame_,
+ const CodecSpecificInfo* codec_specific_info,
+ const RTPFragmentationHeader* fragmentation) override;
bool EncodeComplete();
private:
@@ -54,9 +54,10 @@
bool encode_complete_;
};
-int Vp8UnitTestEncodeCompleteCallback::Encoded(
+webrtc::EncodedImageCallback::Result
+Vp8UnitTestEncodeCompleteCallback::OnEncodedImage(
const EncodedImage& encoded_frame,
- const CodecSpecificInfo* codecSpecificInfo,
+ const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) {
if (encoded_frame_->_size < encoded_frame._length) {
delete[] encoded_frame_->_buffer;
@@ -72,7 +73,7 @@
encoded_frame_->_frameType = encoded_frame._frameType;
encoded_frame_->_completeFrame = encoded_frame._completeFrame;
encode_complete_ = true;
- return 0;
+ return Result(Result::OK, 0);
}
bool Vp8UnitTestEncodeCompleteCallback::EncodeComplete() {
diff --git a/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc b/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc
index 2802700..d7927eb 100644
--- a/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc
+++ b/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc
@@ -26,9 +26,9 @@
explicit Vp8SequenceCoderEncodeCallback(FILE* encoded_file)
: encoded_file_(encoded_file), encoded_bytes_(0) {}
~Vp8SequenceCoderEncodeCallback();
- int Encoded(const webrtc::EncodedImage& encoded_image,
- const webrtc::CodecSpecificInfo* codecSpecificInfo,
- const webrtc::RTPFragmentationHeader*);
+ Result OnEncodedImage(const webrtc::EncodedImage& encoded_image,
+ const webrtc::CodecSpecificInfo* codec_specific_info,
+ const webrtc::RTPFragmentationHeader*);
// Returns the encoded image.
webrtc::EncodedImage encoded_image() { return encoded_image_; }
size_t encoded_bytes() { return encoded_bytes_; }
@@ -43,7 +43,9 @@
delete[] encoded_image_._buffer;
encoded_image_._buffer = NULL;
}
-int Vp8SequenceCoderEncodeCallback::Encoded(
+
+webrtc::EncodedImageCallback::Result
+Vp8SequenceCoderEncodeCallback::OnEncodedImage(
const webrtc::EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo* codecSpecificInfo,
const webrtc::RTPFragmentationHeader* fragmentation) {
@@ -58,11 +60,11 @@
if (encoded_file_ != NULL) {
if (fwrite(encoded_image._buffer, 1, encoded_image._length,
encoded_file_) != encoded_image._length) {
- return -1;
+ return Result(Result::ERROR_SEND_FAILED, 0);
}
}
encoded_bytes_ += encoded_image_._length;
- return 0;
+ return Result(Result::OK, 0);
}
// TODO(mikhal): Add support for varying the frame size.
diff --git a/modules/video_coding/generic_encoder.cc b/modules/video_coding/generic_encoder.cc
index e63da02..28eb10a 100644
--- a/modules/video_coding/generic_encoder.cc
+++ b/modules/video_coding/generic_encoder.cc
@@ -21,6 +21,7 @@
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
namespace webrtc {
+
VCMGenericEncoder::VCMGenericEncoder(
VideoEncoder* encoder,
VCMEncodedFrameCallback* encoded_frame_callback,
@@ -143,23 +144,25 @@
VCMEncodedFrameCallback::~VCMEncodedFrameCallback() {}
-int32_t VCMEncodedFrameCallback::Encoded(
+EncodedImageCallback::Result VCMEncodedFrameCallback::OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific,
const RTPFragmentationHeader* fragmentation_header) {
TRACE_EVENT_INSTANT1("webrtc", "VCMEncodedFrameCallback::Encoded",
"timestamp", encoded_image._timeStamp);
- int ret_val = post_encode_callback_->Encoded(encoded_image, codec_specific,
- fragmentation_header);
- if (ret_val < 0)
- return ret_val;
+ Result result = post_encode_callback_->OnEncodedImage(
+ encoded_image, codec_specific, fragmentation_header);
+ if (result.error != Result::OK)
+ return result;
if (media_opt_) {
media_opt_->UpdateWithEncodedData(encoded_image);
- if (internal_source_)
- return media_opt_->DropFrame(); // Signal to encoder to drop next frame.
+ if (internal_source_) {
+ // Signal to encoder to drop next frame.
+ result.drop_next_frame = media_opt_->DropFrame();
+ }
}
- return VCM_OK;
+ return result;
}
} // namespace webrtc
diff --git a/modules/video_coding/generic_encoder.h b/modules/video_coding/generic_encoder.h
index 9f73f36..9b5d2e6 100644
--- a/modules/video_coding/generic_encoder.h
+++ b/modules/video_coding/generic_encoder.h
@@ -41,9 +41,10 @@
virtual ~VCMEncodedFrameCallback();
// Implements EncodedImageCallback.
- int32_t Encoded(const EncodedImage& encoded_image,
- const CodecSpecificInfo* codec_specific,
- const RTPFragmentationHeader* fragmentation_header) override;
+ EncodedImageCallback::Result OnEncodedImage(
+ const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const RTPFragmentationHeader* fragmentation) override;
void SetInternalSource(bool internal_source) {
internal_source_ = internal_source;
}
diff --git a/modules/video_coding/include/mock/mock_video_codec_interface.h b/modules/video_coding/include/mock/mock_video_codec_interface.h
index 382eeb3..20f76cc 100644
--- a/modules/video_coding/include/mock/mock_video_codec_interface.h
+++ b/modules/video_coding/include/mock/mock_video_codec_interface.h
@@ -22,10 +22,10 @@
class MockEncodedImageCallback : public EncodedImageCallback {
public:
- MOCK_METHOD3(Encoded,
- int32_t(const EncodedImage& encodedImage,
- const CodecSpecificInfo* codecSpecificInfo,
- const RTPFragmentationHeader* fragmentation));
+ MOCK_METHOD3(OnEncodedImage,
+ Result(const EncodedImage& encodedImage,
+ const CodecSpecificInfo* codecSpecificInfo,
+ const RTPFragmentationHeader* fragmentation));
};
class MockVideoEncoder : public VideoEncoder {
diff --git a/modules/video_coding/video_coding_impl.cc b/modules/video_coding/video_coding_impl.cc
index 077f336..2f709b6 100644
--- a/modules/video_coding/video_coding_impl.cc
+++ b/modules/video_coding/video_coding_impl.cc
@@ -45,7 +45,8 @@
class EncodedImageCallbackWrapper : public EncodedImageCallback {
public:
EncodedImageCallbackWrapper()
- : cs_(CriticalSectionWrapper::CreateCriticalSection()), callback_(NULL) {}
+ : cs_(CriticalSectionWrapper::CreateCriticalSection()),
+ callback_(nullptr) {}
virtual ~EncodedImageCallbackWrapper() {}
@@ -54,14 +55,15 @@
callback_ = callback;
}
- virtual int32_t Encoded(const EncodedImage& encoded_image,
- const CodecSpecificInfo* codec_specific_info,
- const RTPFragmentationHeader* fragmentation) {
+ virtual Result OnEncodedImage(const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const RTPFragmentationHeader* fragmentation) {
CriticalSectionScoped cs(cs_.get());
- if (callback_)
- return callback_->Encoded(encoded_image, codec_specific_info,
- fragmentation);
- return 0;
+ if (callback_) {
+ return callback_->OnEncodedImage(encoded_image, codec_specific_info,
+ fragmentation);
+ }
+ return Result(Result::ERROR_SEND_FAILED);
}
private:
diff --git a/modules/video_coding/video_sender_unittest.cc b/modules/video_coding/video_sender_unittest.cc
index 923144e..a3766c1 100644
--- a/modules/video_coding/video_sender_unittest.cc
+++ b/modules/video_coding/video_sender_unittest.cc
@@ -93,13 +93,13 @@
virtual ~EncodedImageCallbackImpl() {}
- int32_t Encoded(const EncodedImage& encoded_image,
- const CodecSpecificInfo* codec_specific_info,
- const RTPFragmentationHeader* fragmentation) override {
+ Result OnEncodedImage(const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const RTPFragmentationHeader* fragmentation) override {
assert(codec_specific_info);
frame_data_.push_back(
FrameData(encoded_image._length, *codec_specific_info));
- return 0;
+ return Result(Result::OK, encoded_image._timeStamp);
}
void Reset() {