Audio egress implementation for initial voip api in api/voip.

For simplicity and flexibility on audio only API, it deemed
to be better to trim off all audio unrelated logic to serve
the purpose.

Bug: webrtc:11251
Change-Id: I40e3eba2714c171f7c98b158303a7b3f744ceb78
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/169462
Reviewed-by: Per Åhgren <peah@webrtc.org>
Reviewed-by: Patrik Höglund <phoglund@webrtc.org>
Reviewed-by: Sebastian Jansson <srte@webrtc.org>
Commit-Queue: Patrik Höglund <phoglund@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#30922}
diff --git a/BUILD.gn b/BUILD.gn
index 9b643e1..d56f549 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -40,6 +40,7 @@
         ":rtc_unittests",
         ":slow_tests",
         ":video_engine_tests",
+        ":voip_unittests",
         ":webrtc_nonparallel_tests",
         ":webrtc_perf_tests",
         "common_audio:common_audio_unittests",
@@ -673,6 +674,14 @@
       shard_timeout = 900
     }
   }
+
+  rtc_test("voip_unittests") {
+    testonly = true
+    deps = [
+      "audio/voip/test:audio_egress_unittests",
+      "test:test_main",
+    ]
+  }
 }
 
 # ---- Poisons ----
diff --git a/audio/voip/BUILD.gn b/audio/voip/BUILD.gn
new file mode 100644
index 0000000..9d52121
--- /dev/null
+++ b/audio/voip/BUILD.gn
@@ -0,0 +1,30 @@
+# Copyright(c) 2020 The WebRTC project authors.All Rights Reserved.
+#
+# Use of this source code is governed by a BSD - style license
+# that can be found in the LICENSE file in the root of the source
+# tree.An additional intellectual property rights grant can be found
+# in the file PATENTS.All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../webrtc.gni")
+
+rtc_library("audio_egress") {
+  sources = [
+    "audio_egress.cc",
+    "audio_egress.h",
+  ]
+  deps = [
+    "../../api/audio_codecs:audio_codecs_api",
+    "../../api/task_queue",
+    "../../audio",
+    "../../audio/utility:audio_frame_operations",
+    "../../call:audio_sender_interface",
+    "../../modules/audio_coding",
+    "../../modules/rtp_rtcp",
+    "../../modules/rtp_rtcp:rtp_rtcp_format",
+    "../../rtc_base:logging",
+    "../../rtc_base:rtc_task_queue",
+    "../../rtc_base:thread_checker",
+    "../../rtc_base:timeutils",
+  ]
+}
diff --git a/audio/voip/audio_egress.cc b/audio/voip/audio_egress.cc
new file mode 100644
index 0000000..c145201
--- /dev/null
+++ b/audio/voip/audio_egress.cc
@@ -0,0 +1,186 @@
+//
+//  Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+//
+//  Use of this source code is governed by a BSD-style license
+//  that can be found in the LICENSE file in the root of the source
+//  tree. An additional intellectual property rights grant can be found
+//  in the file PATENTS.  All contributing project authors may
+//  be found in the AUTHORS file in the root of the source tree.
+//
+
+#include "audio/voip/audio_egress.h"
+
+#include <utility>
+#include <vector>
+
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+AudioEgress::AudioEgress(RtpRtcp* rtp_rtcp,
+                         Clock* clock,
+                         TaskQueueFactory* task_queue_factory)
+    : rtp_rtcp_(rtp_rtcp),
+      rtp_sender_audio_(clock, rtp_rtcp_->RtpSender()),
+      audio_coding_(AudioCodingModule::Create(AudioCodingModule::Config())),
+      encoder_queue_(task_queue_factory->CreateTaskQueue(
+          "AudioEncoder",
+          TaskQueueFactory::Priority::NORMAL)) {
+  audio_coding_->RegisterTransportCallback(this);
+}
+
+AudioEgress::~AudioEgress() {
+  audio_coding_->RegisterTransportCallback(nullptr);
+}
+
+bool AudioEgress::IsSending() const {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+  return rtp_rtcp_->SendingMedia();
+}
+
+void AudioEgress::SetEncoder(int payload_type,
+                             const SdpAudioFormat& encoder_format,
+                             std::unique_ptr<AudioEncoder> encoder) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+  RTC_DCHECK_GE(payload_type, 0);
+  RTC_DCHECK_LE(payload_type, 127);
+
+  encoder_format_ = encoder_format;
+
+  // The RTP/RTCP module needs to know the RTP timestamp rate (i.e. clockrate)
+  // as well as some other things, so we collect this info and send it along.
+  rtp_rtcp_->RegisterSendPayloadFrequency(payload_type,
+                                          encoder->RtpTimestampRateHz());
+  rtp_sender_audio_.RegisterAudioPayload("audio", payload_type,
+                                         encoder->RtpTimestampRateHz(),
+                                         encoder->NumChannels(), 0);
+
+  audio_coding_->SetEncoder(std::move(encoder));
+}
+
+absl::optional<SdpAudioFormat> AudioEgress::GetEncoderFormat() const {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+  return encoder_format_;
+}
+
+void AudioEgress::StartSend() {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+
+  rtp_rtcp_->SetSendingMediaStatus(true);
+}
+
+void AudioEgress::StopSend() {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+
+  rtp_rtcp_->SetSendingMediaStatus(false);
+}
+
+void AudioEgress::SendAudioData(std::unique_ptr<AudioFrame> audio_frame) {
+  RTC_DCHECK_GT(audio_frame->samples_per_channel_, 0);
+  RTC_DCHECK_LE(audio_frame->num_channels_, 8);
+
+  encoder_queue_.PostTask(
+      [this, audio_frame = std::move(audio_frame)]() mutable {
+        RTC_DCHECK_RUN_ON(&encoder_queue_);
+        if (!rtp_rtcp_->SendingMedia()) {
+          return;
+        }
+
+        AudioFrameOperations::Mute(audio_frame.get(),
+                                   encoder_context_.previously_muted_,
+                                   encoder_context_.mute_);
+        encoder_context_.previously_muted_ = encoder_context_.mute_;
+
+        audio_frame->timestamp_ = encoder_context_.frame_rtp_timestamp_;
+
+        // This call will trigger AudioPacketizationCallback::SendData if
+        // encoding is done and payload is ready for packetization and
+        // transmission. Otherwise, it will return without invoking the
+        // callback.
+        if (audio_coding_->Add10MsData(*audio_frame) < 0) {
+          RTC_DLOG(LS_ERROR) << "ACM::Add10MsData() failed.";
+          return;
+        }
+
+        encoder_context_.frame_rtp_timestamp_ +=
+            rtc::dchecked_cast<uint32_t>(audio_frame->samples_per_channel_);
+      });
+}
+
+int32_t AudioEgress::SendData(AudioFrameType frame_type,
+                              uint8_t payload_type,
+                              uint32_t timestamp,
+                              const uint8_t* payload_data,
+                              size_t payload_size) {
+  RTC_DCHECK_RUN_ON(&encoder_queue_);
+
+  rtc::ArrayView<const uint8_t> payload(payload_data, payload_size);
+
+  // Currently we don't get a capture time from downstream modules (ADM,
+  // AudioTransportImpl).
+  // TODO(natim@webrtc.org): Integrate once it's ready.
+  constexpr uint32_t kUndefinedCaptureTime = -1;
+
+  // Push data from ACM to RTP/RTCP-module to deliver audio frame for
+  // packetization.
+  if (!rtp_rtcp_->OnSendingRtpFrame(timestamp, kUndefinedCaptureTime,
+                                    payload_type,
+                                    /*force_sender_report=*/false)) {
+    return -1;
+  }
+
+  const uint32_t rtp_timestamp = timestamp + rtp_rtcp_->StartTimestamp();
+
+  // This call will trigger Transport::SendPacket() from the RTP/RTCP module.
+  if (!rtp_sender_audio_.SendAudio(frame_type, payload_type, rtp_timestamp,
+                                   payload.data(), payload.size())) {
+    RTC_DLOG(LS_ERROR)
+        << "AudioEgress::SendData() failed to send data to RTP/RTCP module";
+    return -1;
+  }
+
+  return 0;
+}
+
+void AudioEgress::RegisterTelephoneEventType(int rtp_payload_type,
+                                             int sample_rate_hz) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+  RTC_DCHECK_GE(rtp_payload_type, 0);
+  RTC_DCHECK_LE(rtp_payload_type, 127);
+
+  rtp_rtcp_->RegisterSendPayloadFrequency(rtp_payload_type, sample_rate_hz);
+  rtp_sender_audio_.RegisterAudioPayload("telephone-event", rtp_payload_type,
+                                         sample_rate_hz, 0, 0);
+}
+
+bool AudioEgress::SendTelephoneEvent(int dtmf_event, int duration_ms) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+  RTC_DCHECK_GE(dtmf_event, 0);
+  RTC_DCHECK_LE(dtmf_event, 255);
+  RTC_DCHECK_GE(duration_ms, 0);
+  RTC_DCHECK_LE(duration_ms, 65535);
+
+  if (!IsSending()) {
+    return false;
+  }
+
+  constexpr int kTelephoneEventAttenuationdB = 10;
+
+  if (rtp_sender_audio_.SendTelephoneEvent(dtmf_event, duration_ms,
+                                           kTelephoneEventAttenuationdB) != 0) {
+    RTC_DLOG(LS_ERROR) << "SendTelephoneEvent() failed to send event";
+    return false;
+  }
+  return true;
+}
+
+void AudioEgress::SetMute(bool mute) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+
+  encoder_queue_.PostTask([this, mute] {
+    RTC_DCHECK_RUN_ON(&encoder_queue_);
+    encoder_context_.mute_ = mute;
+  });
+}
+
+}  // namespace webrtc
diff --git a/audio/voip/audio_egress.h b/audio/voip/audio_egress.h
new file mode 100644
index 0000000..fcdafa6
--- /dev/null
+++ b/audio/voip/audio_egress.h
@@ -0,0 +1,137 @@
+//
+//  Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+//
+//  Use of this source code is governed by a BSD-style license
+//  that can be found in the LICENSE file in the root of the source
+//  tree. An additional intellectual property rights grant can be found
+//  in the file PATENTS.  All contributing project authors may
+//  be found in the AUTHORS file in the root of the source tree.
+//
+
+#ifndef AUDIO_VOIP_AUDIO_EGRESS_H_
+#define AUDIO_VOIP_AUDIO_EGRESS_H_
+
+#include <memory>
+#include <string>
+
+#include "api/audio_codecs/audio_format.h"
+#include "api/task_queue/task_queue_factory.h"
+#include "audio/utility/audio_frame_operations.h"
+#include "call/audio_sender.h"
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "modules/rtp_rtcp/include/report_block_data.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "modules/rtp_rtcp/source/rtp_sender_audio.h"
+#include "rtc_base/task_queue.h"
+#include "rtc_base/thread_checker.h"
+#include "rtc_base/time_utils.h"
+
+namespace webrtc {
+
+// AudioEgress receives input samples from AudioDeviceModule via
+// AudioTransportImpl through AudioSender interface. Once it encodes the sample
+// via selected encoder through AudioPacketizationCallback interface, the
+// encoded payload will be packetized by the RTP stack, resulting in ready to
+// send RTP packet to remote endpoint.
+//
+// This class enforces single worker thread access by caller via SequenceChecker
+// in debug mode as expected thread usage pattern. In order to minimize the hold
+// on audio input thread from OS, TaskQueue is employed to encode and send RTP
+// asynchrounously.
+//
+// Note that this class is originally based on ChannelSend in
+// audio/channel_send.cc with non-audio related logic trimmed as aimed for
+// smaller footprint.
+class AudioEgress : public AudioSender, public AudioPacketizationCallback {
+ public:
+  AudioEgress(RtpRtcp* rtp_rtcp,
+              Clock* clock,
+              TaskQueueFactory* task_queue_factory);
+  ~AudioEgress() override;
+
+  // Set the encoder format and payload type for AudioCodingModule.
+  // It's possible to change the encoder type during its active usage.
+  // |payload_type| must be the type that is negotiated with peer through
+  // offer/answer.
+  void SetEncoder(int payload_type,
+                  const SdpAudioFormat& encoder_format,
+                  std::unique_ptr<AudioEncoder> encoder);
+
+  // Start or stop sending operation of AudioEgress. This will start/stop
+  // the RTP stack also causes encoder queue thread to start/stop
+  // processing input audio samples.
+  void StartSend();
+  void StopSend();
+
+  // Query the state of the RTP stack. This returns true if StartSend()
+  // called and false if StopSend() is called.
+  bool IsSending() const;
+
+  // Enable or disable Mute state.
+  void SetMute(bool mute);
+
+  // Retrieve current encoder format info. This returns encoder format set
+  // by SetEncoder() and if encoder is not set, this will return nullopt.
+  absl::optional<SdpAudioFormat> GetEncoderFormat() const;
+
+  // Register the payload type and sample rate for DTMF (RFC 4733) payload.
+  void RegisterTelephoneEventType(int rtp_payload_type, int sample_rate_hz);
+
+  // Send DTMF named event as specified by
+  // https://tools.ietf.org/html/rfc4733#section-3.2
+  // |duration_ms| specifies the duration of DTMF packets that will be emitted
+  // in place of real RTP packets instead.
+  // This will return true when requested dtmf event is successfully scheduled
+  // otherwise false when the dtmf queue reached maximum of 20 events.
+  bool SendTelephoneEvent(int dtmf_event, int duration_ms);
+
+  // Implementation of AudioSender interface.
+  void SendAudioData(std::unique_ptr<AudioFrame> audio_frame) override;
+
+  // Implementation of AudioPacketizationCallback interface.
+  int32_t SendData(AudioFrameType frame_type,
+                   uint8_t payload_type,
+                   uint32_t timestamp,
+                   const uint8_t* payload_data,
+                   size_t payload_size) override;
+
+ private:
+  // Ensure that single worker thread access.
+  SequenceChecker worker_thread_checker_;
+
+  // Current encoder format selected by caller.
+  absl::optional<SdpAudioFormat> encoder_format_
+      RTC_GUARDED_BY(worker_thread_checker_);
+
+  // Synchronization is handled internally by RtpRtcp.
+  RtpRtcp* const rtp_rtcp_;
+
+  // Synchronization is handled internally by RTPSenderAudio.
+  RTPSenderAudio rtp_sender_audio_;
+
+  // Synchronization is handled internally by AudioCodingModule.
+  const std::unique_ptr<AudioCodingModule> audio_coding_;
+
+  // Struct that holds all variables used by encoder task queue.
+  struct EncoderContext {
+    // Offset used to mark rtp timestamp in sample rate unit in
+    // newly received audio frame from AudioTransport.
+    uint32_t frame_rtp_timestamp_ = 0;
+
+    // Flag to track mute state from caller. |previously_muted_| is used to
+    // track previous state as part of input to AudioFrameOperations::Mute
+    // to implement fading effect when (un)mute is invoked.
+    bool mute_ = false;
+    bool previously_muted_ = false;
+  };
+
+  EncoderContext encoder_context_ RTC_GUARDED_BY(encoder_queue_);
+
+  // Defined last to ensure that there are no running tasks when the other
+  // members are destroyed.
+  rtc::TaskQueue encoder_queue_;
+};
+
+}  // namespace webrtc
+
+#endif  // AUDIO_VOIP_AUDIO_EGRESS_H_
diff --git a/audio/voip/test/BUILD.gn b/audio/voip/test/BUILD.gn
new file mode 100644
index 0000000..e0aedf6
--- /dev/null
+++ b/audio/voip/test/BUILD.gn
@@ -0,0 +1,29 @@
+# Copyright(c) 2020 The WebRTC project authors.All Rights Reserved.
+#
+# Use of this source code is governed by a BSD - style license
+# that can be found in the LICENSE file in the root of the source
+# tree.An additional intellectual property rights grant can be found
+# in the file PATENTS.All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../../webrtc.gni")
+
+if (rtc_include_tests) {
+  rtc_library("audio_egress_unittests") {
+    testonly = true
+    sources = [ "audio_egress_unittest.cc" ]
+    deps = [
+      "..:audio_egress",
+      "../../../api:transport_api",
+      "../../../api/audio_codecs:builtin_audio_decoder_factory",
+      "../../../api/audio_codecs:builtin_audio_encoder_factory",
+      "../../../api/task_queue:default_task_queue_factory",
+      "../../../modules/audio_mixer:audio_mixer_test_utils",
+      "../../../modules/rtp_rtcp:rtp_rtcp_format",
+      "../../../rtc_base:logging",
+      "../../../rtc_base:rtc_event",
+      "../../../test:mock_transport",
+      "../../../test:test_support",
+    ]
+  }
+}
diff --git a/audio/voip/test/audio_egress_unittest.cc b/audio/voip/test/audio_egress_unittest.cc
new file mode 100644
index 0000000..23c4e45
--- /dev/null
+++ b/audio/voip/test/audio_egress_unittest.cc
@@ -0,0 +1,288 @@
+//
+//  Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+//
+//  Use of this source code is governed by a BSD-style license
+//  that can be found in the LICENSE file in the root of the source
+//  tree. An additional intellectual property rights grant can be found
+//  in the file PATENTS.  All contributing project authors may
+//  be found in the AUTHORS file in the root of the source tree.
+//
+
+#include "audio/voip/audio_egress.h"
+#include "api/audio_codecs/builtin_audio_encoder_factory.h"
+#include "api/call/transport.h"
+#include "api/task_queue/default_task_queue_factory.h"
+#include "modules/audio_mixer/sine_wave_generator.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "rtc_base/event.h"
+#include "rtc_base/logging.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/mock_transport.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::Invoke;
+using ::testing::NiceMock;
+using ::testing::Unused;
+
+std::unique_ptr<RtpRtcp> CreateRtpStack(Clock* clock,
+                                        Transport* transport,
+                                        uint32_t remote_ssrc) {
+  RtpRtcp::Configuration rtp_config;
+  rtp_config.clock = clock;
+  rtp_config.audio = true;
+  rtp_config.rtcp_report_interval_ms = 5000;
+  rtp_config.outgoing_transport = transport;
+  rtp_config.local_media_ssrc = remote_ssrc;
+  auto rtp_rtcp = RtpRtcp::Create(rtp_config);
+  rtp_rtcp->SetSendingMediaStatus(false);
+  rtp_rtcp->SetRTCPStatus(RtcpMode::kCompound);
+  return rtp_rtcp;
+}
+
+// AudioEgressTest configures audio egress by using Rtp Stack, fake clock,
+// and task queue factory.  Encoder factory is needed to create codec and
+// configure the RTP stack in audio egress.
+class AudioEgressTest : public ::testing::Test {
+ public:
+  static constexpr int16_t kAudioLevel = 3004;  // Used for sine wave level.
+  static constexpr uint16_t kSeqNum = 12345;
+  static constexpr uint64_t kStartTime = 123456789;
+  static constexpr uint32_t kRemoteSsrc = 0xDEADBEEF;
+  const SdpAudioFormat kPcmuFormat = {"pcmu", 8000, 1};
+
+  AudioEgressTest()
+      : fake_clock_(kStartTime), wave_generator_(1000.0, kAudioLevel) {
+    rtp_rtcp_ = CreateRtpStack(&fake_clock_, &transport_, kRemoteSsrc);
+    task_queue_factory_ = CreateDefaultTaskQueueFactory();
+    encoder_factory_ = CreateBuiltinAudioEncoderFactory();
+  }
+
+  // Prepare test on audio egress by using PCMu codec with specific
+  // sequence number and its status to be running.
+  void SetUp() override {
+    egress_ = std::make_unique<AudioEgress>(rtp_rtcp_.get(), &fake_clock_,
+                                            task_queue_factory_.get());
+    constexpr int kPcmuPayload = 0;
+    egress_->SetEncoder(kPcmuPayload, kPcmuFormat,
+                        encoder_factory_->MakeAudioEncoder(
+                            kPcmuPayload, kPcmuFormat, absl::nullopt));
+    egress_->StartSend();
+    rtp_rtcp_->SetSequenceNumber(kSeqNum);
+    rtp_rtcp_->SetSendingStatus(true);
+  }
+
+  // Make sure we have shut down rtp stack and reset egress for each test.
+  void TearDown() override {
+    rtp_rtcp_->SetSendingStatus(false);
+    egress_.reset();
+  }
+
+  // Create an audio frame prepared for pcmu encoding. Timestamp is
+  // increased per RTP specification which is the number of samples it contains.
+  // Wave generator writes sine wave which has expected high level set
+  // by kAudioLevel.
+  std::unique_ptr<AudioFrame> GetAudioFrame(int order) {
+    auto frame = std::make_unique<AudioFrame>();
+    frame->sample_rate_hz_ = kPcmuFormat.clockrate_hz;
+    frame->samples_per_channel_ = kPcmuFormat.clockrate_hz / 100;  // 10 ms.
+    frame->num_channels_ = kPcmuFormat.num_channels;
+    frame->timestamp_ = frame->samples_per_channel_ * order;
+    wave_generator_.GenerateNextFrame(frame.get());
+    return frame;
+  }
+
+  // SimulatedClock doesn't directly affect this testcase as the the
+  // AudioFrame's timestamp is driven by GetAudioFrame.
+  SimulatedClock fake_clock_;
+  NiceMock<MockTransport> transport_;
+  SineWaveGenerator wave_generator_;
+  std::unique_ptr<AudioEgress> egress_;
+  std::unique_ptr<TaskQueueFactory> task_queue_factory_;
+  std::unique_ptr<RtpRtcp> rtp_rtcp_;
+  rtc::scoped_refptr<AudioEncoderFactory> encoder_factory_;
+};
+
+TEST_F(AudioEgressTest, SendingStatusAfterStartAndStop) {
+  EXPECT_TRUE(egress_->IsSending());
+  egress_->StopSend();
+  EXPECT_FALSE(egress_->IsSending());
+}
+
+TEST_F(AudioEgressTest, ProcessAudioWithMute) {
+  constexpr int kExpected = 10;
+  rtc::Event event;
+  int rtp_count = 0;
+  RtpPacketReceived rtp;
+  auto rtp_sent = [&](const uint8_t* packet, size_t length, Unused) {
+    rtp.Parse(packet, length);
+    if (++rtp_count == kExpected) {
+      event.Set();
+    }
+    return true;
+  };
+
+  EXPECT_CALL(transport_, SendRtp).WillRepeatedly(Invoke(rtp_sent));
+
+  egress_->SetMute(true);
+
+  // Two 10 ms audio frames will result in rtp packet with ptime 20.
+  for (size_t i = 0; i < kExpected * 2; i++) {
+    egress_->SendAudioData(GetAudioFrame(i));
+    fake_clock_.AdvanceTimeMilliseconds(10);
+  }
+
+  event.Wait(/*ms=*/1000);
+  EXPECT_EQ(rtp_count, kExpected);
+
+  // we expect on pcmu payload to result in 255 for silenced payload
+  RTPHeader header;
+  rtp.GetHeader(&header);
+  size_t packet_length = rtp.size();
+  size_t payload_length = packet_length - header.headerLength;
+  size_t payload_data_length = payload_length - header.paddingLength;
+  const uint8_t* payload = rtp.data() + header.headerLength;
+  for (size_t i = 0; i < payload_data_length; ++i) {
+    EXPECT_EQ(*payload++, 255);
+  }
+}
+
+TEST_F(AudioEgressTest, ProcessAudioWithSineWave) {
+  constexpr int kExpected = 10;
+  rtc::Event event;
+  int rtp_count = 0;
+  RtpPacketReceived rtp;
+  auto rtp_sent = [&](const uint8_t* packet, size_t length, Unused) {
+    rtp.Parse(packet, length);
+    if (++rtp_count == kExpected) {
+      event.Set();
+    }
+    return true;
+  };
+
+  EXPECT_CALL(transport_, SendRtp).WillRepeatedly(Invoke(rtp_sent));
+
+  // Two 10 ms audio frames will result in rtp packet with ptime 20.
+  for (size_t i = 0; i < kExpected * 2; i++) {
+    egress_->SendAudioData(GetAudioFrame(i));
+    fake_clock_.AdvanceTimeMilliseconds(10);
+  }
+
+  event.Wait(/*ms=*/1000);
+  EXPECT_EQ(rtp_count, kExpected);
+
+  // we expect on pcmu to result in < 255 for payload with sine wave
+  RTPHeader header;
+  rtp.GetHeader(&header);
+  size_t packet_length = rtp.size();
+  size_t payload_length = packet_length - header.headerLength;
+  size_t payload_data_length = payload_length - header.paddingLength;
+  const uint8_t* payload = rtp.data() + header.headerLength;
+  for (size_t i = 0; i < payload_data_length; ++i) {
+    EXPECT_NE(*payload++, 255);
+  }
+}
+
+TEST_F(AudioEgressTest, SkipAudioEncodingAfterStopSend) {
+  constexpr int kExpected = 10;
+  rtc::Event event;
+  int rtp_count = 0;
+  auto rtp_sent = [&](const uint8_t* packet, size_t length, Unused) {
+    if (++rtp_count == kExpected) {
+      event.Set();
+    }
+    return true;
+  };
+
+  EXPECT_CALL(transport_, SendRtp).WillRepeatedly(Invoke(rtp_sent));
+
+  // Two 10 ms audio frames will result in rtp packet with ptime 20.
+  for (size_t i = 0; i < kExpected * 2; i++) {
+    egress_->SendAudioData(GetAudioFrame(i));
+    fake_clock_.AdvanceTimeMilliseconds(10);
+  }
+
+  event.Wait(/*ms=*/1000);
+  EXPECT_EQ(rtp_count, kExpected);
+
+  // Now stop send and yet feed more data.
+  egress_->StopSend();
+
+  // It should be safe to exit the test case while encoder_queue_ has
+  // outstanding data to process. We are making sure that this doesn't
+  // result in crahses or sanitizer errors due to remaining data.
+  for (size_t i = 0; i < kExpected * 2; i++) {
+    egress_->SendAudioData(GetAudioFrame(i));
+    fake_clock_.AdvanceTimeMilliseconds(10);
+  }
+}
+
+TEST_F(AudioEgressTest, ChangeEncoderFromPcmuToOpus) {
+  absl::optional<SdpAudioFormat> pcmu = egress_->GetEncoderFormat();
+  EXPECT_TRUE(pcmu);
+  EXPECT_EQ(pcmu->clockrate_hz, kPcmuFormat.clockrate_hz);
+  EXPECT_EQ(pcmu->num_channels, kPcmuFormat.num_channels);
+
+  constexpr int kOpusPayload = 120;
+  const SdpAudioFormat kOpusFormat = {"opus", 48000, 2};
+
+  egress_->SetEncoder(kOpusPayload, kOpusFormat,
+                      encoder_factory_->MakeAudioEncoder(
+                          kOpusPayload, kOpusFormat, absl::nullopt));
+
+  absl::optional<SdpAudioFormat> opus = egress_->GetEncoderFormat();
+  EXPECT_TRUE(opus);
+  EXPECT_EQ(opus->clockrate_hz, kOpusFormat.clockrate_hz);
+  EXPECT_EQ(opus->num_channels, kOpusFormat.num_channels);
+}
+
+TEST_F(AudioEgressTest, SendDTMF) {
+  constexpr int kExpected = 7;
+  constexpr int kPayloadType = 100;
+  constexpr int kDurationMs = 100;
+  constexpr int kSampleRate = 8000;
+  constexpr int kEvent = 3;
+
+  egress_->RegisterTelephoneEventType(kPayloadType, kSampleRate);
+  // 100 ms duration will produce total 7 DTMF
+  // 1 @ 20 ms, 2 @ 40 ms, 3 @ 60 ms, 4 @ 80 ms
+  // 5, 6, 7 @ 100 ms (last one sends 3 dtmf)
+  egress_->SendTelephoneEvent(kEvent, kDurationMs);
+
+  rtc::Event event;
+  int dtmf_count = 0;
+  auto is_dtmf = [&](RtpPacketReceived& rtp) {
+    return (rtp.PayloadType() == kPayloadType &&
+            rtp.SequenceNumber() == kSeqNum + dtmf_count &&
+            rtp.padding_size() == 0 && rtp.Marker() == (dtmf_count == 0) &&
+            rtp.Ssrc() == kRemoteSsrc);
+  };
+
+  // It's possible that we may have actual audio RTP packets along with
+  // DTMF packtets.  We are only interested in the exact number of DTMF
+  // packets rtp stack is emitting.
+  auto rtp_sent = [&](const uint8_t* packet, size_t length, Unused) {
+    RtpPacketReceived rtp;
+    rtp.Parse(packet, length);
+    if (is_dtmf(rtp) && ++dtmf_count == kExpected) {
+      event.Set();
+    }
+    return true;
+  };
+
+  EXPECT_CALL(transport_, SendRtp).WillRepeatedly(Invoke(rtp_sent));
+
+  // Two 10 ms audio frames will result in rtp packet with ptime 20.
+  for (size_t i = 0; i < kExpected * 2; i++) {
+    egress_->SendAudioData(GetAudioFrame(i));
+    fake_clock_.AdvanceTimeMilliseconds(10);
+  }
+
+  event.Wait(/*ms=*/1000);
+  EXPECT_EQ(dtmf_count, kExpected);
+}
+
+}  // namespace
+}  // namespace webrtc