Parameterize PeerConnection integration tests for Unified Plan

Bug: webrtc:8765
Change-Id: I572966c57fd8d8f9293fc05a8be579dd982102f7
Reviewed-on: https://webrtc-review.googlesource.com/52800
Commit-Queue: Seth Hampson <shampson@webrtc.org>
Reviewed-by: Steve Anton <steveanton@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#22111}
diff --git a/pc/peerconnection_integrationtest.cc b/pc/peerconnection_integrationtest.cc
index 788b432..d5037a8 100644
--- a/pc/peerconnection_integrationtest.cc
+++ b/pc/peerconnection_integrationtest.cc
@@ -42,6 +42,7 @@
 #include "pc/mediasession.h"
 #include "pc/peerconnection.h"
 #include "pc/peerconnectionfactory.h"
+#include "pc/rtpmediautils.h"
 #include "pc/sessiondescription.h"
 #include "pc/test/fakeaudiocapturemodule.h"
 #include "pc/test/fakeperiodicvideocapturer.h"
@@ -61,6 +62,7 @@
 using cricket::FakeWebRtcVideoEncoderFactory;
 using cricket::MediaContentDescription;
 using rtc::SocketAddress;
+using ::testing::Combine;
 using ::testing::ElementsAre;
 using ::testing::Values;
 using webrtc::DataBuffer;
@@ -86,6 +88,10 @@
 using webrtc::RTCErrorType;
 using webrtc::RtpSenderInterface;
 using webrtc::RtpReceiverInterface;
+using webrtc::RtpSenderInterface;
+using webrtc::RtpTransceiverDirection;
+using webrtc::RtpTransceiverInit;
+using webrtc::RtpTransceiverInterface;
 using webrtc::SdpSemantics;
 using webrtc::SdpType;
 using webrtc::SessionDescriptionInterface;
@@ -179,7 +185,7 @@
 // encoders/decoders, though they aren't used by default since they don't
 // advertise support of any codecs.
 // TODO(steveanton): See how this could become a subclass of
-// PeerConnectionWrapper defined in peerconnectionwrapper.h .
+// PeerConnectionWrapper defined in peerconnectionwrapper.h.
 class PeerConnectionWrapper : public webrtc::PeerConnectionObserver,
                               public SignalingMessageReceiver {
  public:
@@ -204,54 +210,6 @@
     return client;
   }
 
-  static PeerConnectionWrapper* CreateWithConfig(
-      const std::string& debug_name,
-      const PeerConnectionInterface::RTCConfiguration& config,
-      rtc::Thread* network_thread,
-      rtc::Thread* worker_thread) {
-    std::unique_ptr<FakeRTCCertificateGenerator> cert_generator(
-        new FakeRTCCertificateGenerator());
-    PeerConnectionWrapper* client(new PeerConnectionWrapper(debug_name));
-    if (!client->Init(nullptr, nullptr, &config, std::move(cert_generator),
-                      network_thread, worker_thread)) {
-      delete client;
-      return nullptr;
-    }
-    return client;
-  }
-
-  static PeerConnectionWrapper* CreateWithOptions(
-      const std::string& debug_name,
-      const PeerConnectionFactory::Options& options,
-      rtc::Thread* network_thread,
-      rtc::Thread* worker_thread) {
-    std::unique_ptr<FakeRTCCertificateGenerator> cert_generator(
-        new FakeRTCCertificateGenerator());
-    PeerConnectionWrapper* client(new PeerConnectionWrapper(debug_name));
-    if (!client->Init(nullptr, &options, nullptr, std::move(cert_generator),
-                      network_thread, worker_thread)) {
-      delete client;
-      return nullptr;
-    }
-    return client;
-  }
-
-  static PeerConnectionWrapper* CreateWithConstraints(
-      const std::string& debug_name,
-      const MediaConstraintsInterface* constraints,
-      rtc::Thread* network_thread,
-      rtc::Thread* worker_thread) {
-    std::unique_ptr<FakeRTCCertificateGenerator> cert_generator(
-        new FakeRTCCertificateGenerator());
-    PeerConnectionWrapper* client(new PeerConnectionWrapper(debug_name));
-    if (!client->Init(constraints, nullptr, nullptr, std::move(cert_generator),
-                      network_thread, worker_thread)) {
-      delete client;
-      return nullptr;
-    }
-    return client;
-  }
-
   webrtc::PeerConnectionFactoryInterface* pc_factory() const {
     return peer_connection_factory_.get();
   }
@@ -291,6 +249,13 @@
     generated_sdp_munger_ = std::move(munger);
   }
 
+  // Set a callback to be invoked when a remote offer is received via the fake
+  // signaling channel. This provides an opportunity to change the
+  // PeerConnection state before an answer is created and sent to the caller.
+  void SetRemoteOfferHandler(std::function<void()> handler) {
+    remote_offer_handler_ = std::move(handler);
+  }
+
   // Every ICE connection state in order that has been seen by the observer.
   std::vector<PeerConnectionInterface::IceConnectionState>
   ice_connection_state_history() const {
@@ -365,6 +330,16 @@
     return receivers;
   }
 
+  rtc::scoped_refptr<RtpTransceiverInterface> GetFirstTransceiverOfType(
+      cricket::MediaType media_type) {
+    for (auto transceiver : pc()->GetTransceivers()) {
+      if (transceiver->receiver()->media_type() == media_type) {
+        return transceiver;
+      }
+    }
+    return nullptr;
+  }
+
   bool SignalingStateStable() {
     return pc()->signaling_state() == webrtc::PeerConnectionInterface::kStable;
   }
@@ -616,6 +591,9 @@
     if (options) {
       peer_connection_factory_->SetOptions(*options);
     }
+    if (config) {
+      sdp_semantics_ = config->sdp_semantics;
+    }
     peer_connection_ =
         CreatePeerConnection(std::move(port_allocator), constraints, config,
                              std::move(cert_generator));
@@ -697,6 +675,9 @@
     // Setting a remote description may have changed the number of receivers,
     // so reset the receiver observers.
     ResetRtpReceiverObservers();
+    if (remote_offer_handler_) {
+      remote_offer_handler_();
+    }
     auto answer = CreateAnswer();
     ASSERT_NE(nullptr, answer);
     EXPECT_TRUE(SetLocalDescriptionAndSendSdpMessage(std::move(answer)));
@@ -758,6 +739,9 @@
     std::string sdp;
     EXPECT_TRUE(desc->ToString(&sdp));
     pc()->SetLocalDescription(observer, desc.release());
+    if (sdp_semantics_ == SdpSemantics::kUnifiedPlan) {
+      RemoveUnusedVideoRenderers();
+    }
     // As mentioned above, we need to send the message immediately after
     // SetLocalDescription.
     SendSdpMessage(type, sdp);
@@ -770,10 +754,43 @@
         new rtc::RefCountedObject<MockSetSessionDescriptionObserver>());
     RTC_LOG(LS_INFO) << debug_name_ << ": SetRemoteDescription";
     pc()->SetRemoteDescription(observer, desc.release());
+    if (sdp_semantics_ == SdpSemantics::kUnifiedPlan) {
+      RemoveUnusedVideoRenderers();
+    }
     EXPECT_TRUE_WAIT(observer->called(), kDefaultTimeout);
     return observer->result();
   }
 
+  // This is a work around to remove unused fake_video_renderers from
+  // transceivers that have either stopped or are no longer receiving.
+  void RemoveUnusedVideoRenderers() {
+    auto transceivers = pc()->GetTransceivers();
+    for (auto& transceiver : transceivers) {
+      if (transceiver->receiver()->media_type() != cricket::MEDIA_TYPE_VIDEO) {
+        continue;
+      }
+      // Remove fake video renderers from any stopped transceivers.
+      if (transceiver->stopped()) {
+        auto it =
+            fake_video_renderers_.find(transceiver->receiver()->track()->id());
+        if (it != fake_video_renderers_.end()) {
+          fake_video_renderers_.erase(it);
+        }
+      }
+      // Remove fake video renderers from any transceivers that are no longer
+      // receiving.
+      if ((transceiver->current_direction() &&
+           !webrtc::RtpTransceiverDirectionHasRecv(
+               *transceiver->current_direction()))) {
+        auto it =
+            fake_video_renderers_.find(transceiver->receiver()->track()->id());
+        if (it != fake_video_renderers_.end()) {
+          fake_video_renderers_.erase(it);
+        }
+      }
+    }
+  }
+
   // Simulate sending a blob of SDP with delay |signaling_delay_ms_| (0 by
   // default).
   void SendSdpMessage(SdpType type, const std::string& msg) {
@@ -925,9 +942,11 @@
   // |local_video_renderer_| attached to the first created local video track.
   std::unique_ptr<webrtc::FakeVideoTrackRenderer> local_video_renderer_;
 
+  SdpSemantics sdp_semantics_;
   PeerConnectionInterface::RTCOfferAnswerOptions offer_answer_options_;
   std::function<void(cricket::SessionDescription*)> received_sdp_munger_;
   std::function<void(cricket::SessionDescription*)> generated_sdp_munger_;
+  std::function<void()> remote_offer_handler_;
 
   rtc::scoped_refptr<DataChannelInterface> data_channel_;
   std::unique_ptr<MockDataChannelObserver> data_observer_;
@@ -941,7 +960,7 @@
 
   rtc::AsyncInvoker invoker_;
 
-  friend class PeerConnectionIntegrationTest;
+  friend class PeerConnectionIntegrationBaseTest;
 };
 
 class MockRtcEventLogOutput : public webrtc::RtcEventLogOutput {
@@ -951,14 +970,120 @@
   MOCK_METHOD1(Write, bool(const std::string&));
 };
 
+// This helper object is used for both specifying how many audio/video frames
+// are expected to be received for a caller/callee. It provides helper functions
+// to specify these expectations. The object initially starts in a state of no
+// expectations.
+class MediaExpectations {
+ public:
+  enum ExpectFrames {
+    kExpectSomeFrames,
+    kExpectNoFrames,
+    kNoExpectation,
+  };
+
+  void ExpectBidirectionalAudioAndVideo() {
+    ExpectBidirectionalAudio();
+    ExpectBidirectionalVideo();
+  }
+
+  void ExpectBidirectionalAudio() {
+    CallerExpectsSomeAudio();
+    CalleeExpectsSomeAudio();
+  }
+
+  void ExpectNoAudio() {
+    CallerExpectsNoAudio();
+    CalleeExpectsNoAudio();
+  }
+
+  void ExpectBidirectionalVideo() {
+    CallerExpectsSomeVideo();
+    CalleeExpectsSomeVideo();
+  }
+
+  void ExpectNoVideo() {
+    CallerExpectsNoVideo();
+    CalleeExpectsNoVideo();
+  }
+
+  void CallerExpectsSomeAudioAndVideo() {
+    CallerExpectsSomeAudio();
+    CallerExpectsSomeVideo();
+  }
+
+  void CalleeExpectsSomeAudioAndVideo() {
+    CalleeExpectsSomeAudio();
+    CalleeExpectsSomeVideo();
+  }
+
+  // Caller's audio functions.
+  void CallerExpectsSomeAudio(
+      int expected_audio_frames = kDefaultExpectedAudioFrameCount) {
+    caller_audio_expectation_ = kExpectSomeFrames;
+    caller_audio_frames_expected_ = expected_audio_frames;
+  }
+
+  void CallerExpectsNoAudio() {
+    caller_audio_expectation_ = kExpectNoFrames;
+    caller_audio_frames_expected_ = 0;
+  }
+
+  // Caller's video functions.
+  void CallerExpectsSomeVideo(
+      int expected_video_frames = kDefaultExpectedVideoFrameCount) {
+    caller_video_expectation_ = kExpectSomeFrames;
+    caller_video_frames_expected_ = expected_video_frames;
+  }
+
+  void CallerExpectsNoVideo() {
+    caller_video_expectation_ = kExpectNoFrames;
+    caller_video_frames_expected_ = 0;
+  }
+
+  // Callee's audio functions.
+  void CalleeExpectsSomeAudio(
+      int expected_audio_frames = kDefaultExpectedAudioFrameCount) {
+    callee_audio_expectation_ = kExpectSomeFrames;
+    callee_audio_frames_expected_ = expected_audio_frames;
+  }
+
+  void CalleeExpectsNoAudio() {
+    callee_audio_expectation_ = kExpectNoFrames;
+    callee_audio_frames_expected_ = 0;
+  }
+
+  // Callee's video functions.
+  void CalleeExpectsSomeVideo(
+      int expected_video_frames = kDefaultExpectedVideoFrameCount) {
+    callee_video_expectation_ = kExpectSomeFrames;
+    callee_video_frames_expected_ = expected_video_frames;
+  }
+
+  void CalleeExpectsNoVideo() {
+    callee_video_expectation_ = kExpectNoFrames;
+    callee_video_frames_expected_ = 0;
+  }
+
+  ExpectFrames caller_audio_expectation_ = kNoExpectation;
+  ExpectFrames caller_video_expectation_ = kNoExpectation;
+  ExpectFrames callee_audio_expectation_ = kNoExpectation;
+  ExpectFrames callee_video_expectation_ = kNoExpectation;
+  int caller_audio_frames_expected_ = 0;
+  int caller_video_frames_expected_ = 0;
+  int callee_audio_frames_expected_ = 0;
+  int callee_video_frames_expected_ = 0;
+};
+
 // Tests two PeerConnections connecting to each other end-to-end, using a
 // virtual network, fake A/V capture and fake encoder/decoders. The
 // PeerConnections share the threads/socket servers, but use separate versions
 // of everything else (including "PeerConnectionFactory"s).
-class PeerConnectionIntegrationTest : public testing::Test {
+class PeerConnectionIntegrationBaseTest : public testing::Test {
  public:
-  PeerConnectionIntegrationTest()
-      : ss_(new rtc::VirtualSocketServer()),
+  explicit PeerConnectionIntegrationBaseTest(SdpSemantics sdp_semantics)
+      : sdp_semantics_(sdp_semantics),
+        ss_(new rtc::VirtualSocketServer()),
         fss_(new rtc::FirewallSocketServer(ss_.get())),
         network_thread_(new rtc::Thread(fss_.get())),
         worker_thread_(rtc::Thread::Create()) {
@@ -966,7 +1091,7 @@
     RTC_CHECK(worker_thread_->Start());
   }
 
-  ~PeerConnectionIntegrationTest() {
+  ~PeerConnectionIntegrationBaseTest() {
     if (caller_) {
       caller_->set_signaling_message_receiver(nullptr);
     }
@@ -993,6 +1118,32 @@
                 webrtc::PeerConnectionInterface::kIceConnectionCompleted);
   }
 
+  std::unique_ptr<PeerConnectionWrapper> CreatePeerConnectionWrapper(
+      const std::string& debug_name,
+      const MediaConstraintsInterface* constraints,
+      const PeerConnectionFactory::Options* options,
+      const RTCConfiguration* config,
+      std::unique_ptr<rtc::RTCCertificateGeneratorInterface> cert_generator) {
+    RTCConfiguration modified_config;
+    if (config) {
+      modified_config = *config;
+    }
+    if (modified_config.sdp_semantics == SdpSemantics::kDefault) {
+      modified_config.sdp_semantics = sdp_semantics_;
+    }
+    if (!cert_generator) {
+      cert_generator = rtc::MakeUnique<FakeRTCCertificateGenerator>();
+    }
+    std::unique_ptr<PeerConnectionWrapper> client(
+        new PeerConnectionWrapper(debug_name));
+    if (!client->Init(constraints, options, &modified_config,
+                      std::move(cert_generator), network_thread_.get(),
+                      worker_thread_.get())) {
+      return nullptr;
+    }
+    return client;
+  }
+
   bool CreatePeerConnectionWrappers() {
     return CreatePeerConnectionWrappersWithConfig(
         PeerConnectionInterface::RTCConfiguration(),
@@ -1002,44 +1153,41 @@
   bool CreatePeerConnectionWrappersWithConstraints(
       MediaConstraintsInterface* caller_constraints,
       MediaConstraintsInterface* callee_constraints) {
-    caller_.reset(PeerConnectionWrapper::CreateWithConstraints(
-        "Caller", caller_constraints, network_thread_.get(),
-        worker_thread_.get()));
-    callee_.reset(PeerConnectionWrapper::CreateWithConstraints(
-        "Callee", callee_constraints, network_thread_.get(),
-        worker_thread_.get()));
+    caller_ = CreatePeerConnectionWrapper("Caller", caller_constraints, nullptr,
+                                          nullptr, nullptr);
+    callee_ = CreatePeerConnectionWrapper("Callee", callee_constraints, nullptr,
+                                          nullptr, nullptr);
     return caller_ && callee_;
   }
 
   bool CreatePeerConnectionWrappersWithConfig(
       const PeerConnectionInterface::RTCConfiguration& caller_config,
       const PeerConnectionInterface::RTCConfiguration& callee_config) {
-    caller_.reset(PeerConnectionWrapper::CreateWithConfig(
-        "Caller", caller_config, network_thread_.get(), worker_thread_.get()));
-    callee_.reset(PeerConnectionWrapper::CreateWithConfig(
-        "Callee", callee_config, network_thread_.get(), worker_thread_.get()));
+    caller_ = CreatePeerConnectionWrapper("Caller", nullptr, nullptr,
+                                          &caller_config, nullptr);
+    callee_ = CreatePeerConnectionWrapper("Callee", nullptr, nullptr,
+                                          &callee_config, nullptr);
     return caller_ && callee_;
   }
 
   bool CreatePeerConnectionWrappersWithOptions(
       const PeerConnectionFactory::Options& caller_options,
       const PeerConnectionFactory::Options& callee_options) {
-    caller_.reset(PeerConnectionWrapper::CreateWithOptions(
-        "Caller", caller_options, network_thread_.get(), worker_thread_.get()));
-    callee_.reset(PeerConnectionWrapper::CreateWithOptions(
-        "Callee", callee_options, network_thread_.get(), worker_thread_.get()));
+    caller_ = CreatePeerConnectionWrapper("Caller", nullptr, &caller_options,
+                                          nullptr, nullptr);
+    callee_ = CreatePeerConnectionWrapper("Callee", nullptr, &callee_options,
+                                          nullptr, nullptr);
     return caller_ && callee_;
   }
 
-  PeerConnectionWrapper* CreatePeerConnectionWrapperWithAlternateKey() {
+  std::unique_ptr<PeerConnectionWrapper>
+  CreatePeerConnectionWrapperWithAlternateKey() {
     std::unique_ptr<FakeRTCCertificateGenerator> cert_generator(
         new FakeRTCCertificateGenerator());
     cert_generator->use_alternate_key();
 
-    // Make sure the new client is using a different certificate.
-    return PeerConnectionWrapper::CreateWithDtlsIdentityStore(
-        "New Peer", std::move(cert_generator), network_thread_.get(),
-        worker_thread_.get());
+    return CreatePeerConnectionWrapper("New Peer", nullptr, nullptr, nullptr,
+                                       std::move(cert_generator));
   }
 
   // Once called, SDP blobs and ICE candidates will be automatically signaled
@@ -1110,46 +1258,109 @@
 
   rtc::FirewallSocketServer* firewall() const { return fss_.get(); }
 
-  // Expects the provided number of new frames to be received within |wait_ms|.
-  // "New frames" meaning that it waits for the current frame counts to
-  // *increase* by the provided values. For video, uses
-  // RecievedVideoFramesForEachTrack for the case of multiple video tracks
-  // being received.
-  void ExpectNewFramesReceivedWithWait(
-      int expected_caller_received_audio_frames,
-      int expected_caller_received_video_frames,
-      int expected_callee_received_audio_frames,
-      int expected_callee_received_video_frames,
-      int wait_ms) {
-    // Add current frame counts to the provided values, in order to wait for
-    // the frame count to increase.
-    expected_caller_received_audio_frames += caller()->audio_frames_received();
-    expected_caller_received_video_frames +=
+  // Expects the provided number of new frames to be received within
+  // kMaxWaitForFramesMs. The new expected frames are specified in
+  // |media_expectations|. Returns false if any of the expectations were
+  // not met.
+  bool ExpectNewFrames(const MediaExpectations& media_expectations) {
+    // First initialize the expected frame counts based upon the current
+    // frame count.
+    int total_caller_audio_frames_expected = caller()->audio_frames_received();
+    if (media_expectations.caller_audio_expectation_ ==
+        MediaExpectations::kExpectSomeFrames) {
+      total_caller_audio_frames_expected +=
+          media_expectations.caller_audio_frames_expected_;
+    }
+    int total_caller_video_frames_expected =
         caller()->min_video_frames_received_per_track();
-    expected_callee_received_audio_frames += callee()->audio_frames_received();
-    expected_callee_received_video_frames +=
+    if (media_expectations.caller_video_expectation_ ==
+        MediaExpectations::kExpectSomeFrames) {
+      total_caller_video_frames_expected +=
+          media_expectations.caller_video_frames_expected_;
+    }
+    int total_callee_audio_frames_expected = callee()->audio_frames_received();
+    if (media_expectations.callee_audio_expectation_ ==
+        MediaExpectations::kExpectSomeFrames) {
+      total_callee_audio_frames_expected +=
+          media_expectations.callee_audio_frames_expected_;
+    }
+    int total_callee_video_frames_expected =
         callee()->min_video_frames_received_per_track();
+    if (media_expectations.callee_video_expectation_ ==
+        MediaExpectations::kExpectSomeFrames) {
+      total_callee_video_frames_expected +=
+          media_expectations.callee_video_frames_expected_;
+    }
 
+    // Wait for the expected frames.
     EXPECT_TRUE_WAIT(caller()->audio_frames_received() >=
-                             expected_caller_received_audio_frames &&
+                             total_caller_audio_frames_expected &&
                          caller()->min_video_frames_received_per_track() >=
-                             expected_caller_received_video_frames &&
+                             total_caller_video_frames_expected &&
                          callee()->audio_frames_received() >=
-                             expected_callee_received_audio_frames &&
+                             total_callee_audio_frames_expected &&
                          callee()->min_video_frames_received_per_track() >=
-                             expected_callee_received_video_frames,
-                     wait_ms);
+                             total_callee_video_frames_expected,
+                     kMaxWaitForFramesMs);
+    bool expectations_correct =
+        caller()->audio_frames_received() >=
+            total_caller_audio_frames_expected &&
+        caller()->min_video_frames_received_per_track() >=
+            total_caller_video_frames_expected &&
+        callee()->audio_frames_received() >=
+            total_callee_audio_frames_expected &&
+        callee()->min_video_frames_received_per_track() >=
+            total_callee_video_frames_expected;
 
-    // After the combined wait, do an "expect" for each individual count, to
-    // print out a more detailed message upon failure.
+    // After the combined wait, print out a more detailed message upon
+    // failure.
     EXPECT_GE(caller()->audio_frames_received(),
-              expected_caller_received_audio_frames);
+              total_caller_audio_frames_expected);
     EXPECT_GE(caller()->min_video_frames_received_per_track(),
-              expected_caller_received_video_frames);
+              total_caller_video_frames_expected);
     EXPECT_GE(callee()->audio_frames_received(),
-              expected_callee_received_audio_frames);
+              total_callee_audio_frames_expected);
     EXPECT_GE(callee()->min_video_frames_received_per_track(),
-              expected_callee_received_video_frames);
+              total_callee_video_frames_expected);
+
+    // We want to make sure nothing unexpected was received.
+    if (media_expectations.caller_audio_expectation_ ==
+        MediaExpectations::kExpectNoFrames) {
+      EXPECT_EQ(caller()->audio_frames_received(),
+                total_caller_audio_frames_expected);
+      if (caller()->audio_frames_received() !=
+          total_caller_audio_frames_expected) {
+        expectations_correct = false;
+      }
+    }
+    if (media_expectations.caller_video_expectation_ ==
+        MediaExpectations::kExpectNoFrames) {
+      EXPECT_EQ(caller()->min_video_frames_received_per_track(),
+                total_caller_video_frames_expected);
+      if (caller()->min_video_frames_received_per_track() !=
+          total_caller_video_frames_expected) {
+        expectations_correct = false;
+      }
+    }
+    if (media_expectations.callee_audio_expectation_ ==
+        MediaExpectations::kExpectNoFrames) {
+      EXPECT_EQ(callee()->audio_frames_received(),
+                total_callee_audio_frames_expected);
+      if (callee()->audio_frames_received() !=
+          total_callee_audio_frames_expected) {
+        expectations_correct = false;
+      }
+    }
+    if (media_expectations.callee_video_expectation_ ==
+        MediaExpectations::kExpectNoFrames) {
+      EXPECT_EQ(callee()->min_video_frames_received_per_track(),
+                total_callee_video_frames_expected);
+      if (callee()->min_video_frames_received_per_track() !=
+          total_callee_video_frames_expected) {
+        expectations_correct = false;
+      }
+    }
+    return expectations_correct;
   }
 
   void TestGcmNegotiationUsesCipherSuite(bool local_gcm_enabled,
@@ -1177,6 +1388,9 @@
     caller()->pc()->RegisterUMAObserver(nullptr);
   }
 
+ protected:
+  const SdpSemantics sdp_semantics_;
+
  private:
   // |ss_| is used by |network_thread_| so it must be destroyed later.
   std::unique_ptr<rtc::VirtualSocketServer> ss_;
@@ -1190,10 +1404,32 @@
   std::unique_ptr<PeerConnectionWrapper> callee_;
 };
 
+class PeerConnectionIntegrationTest
+    : public PeerConnectionIntegrationBaseTest,
+      public ::testing::WithParamInterface<SdpSemantics> {
+ protected:
+  PeerConnectionIntegrationTest()
+      : PeerConnectionIntegrationBaseTest(GetParam()) {}
+};
+
+class PeerConnectionIntegrationTestPlanB
+    : public PeerConnectionIntegrationBaseTest {
+ protected:
+  PeerConnectionIntegrationTestPlanB()
+      : PeerConnectionIntegrationBaseTest(SdpSemantics::kPlanB) {}
+};
+
+class PeerConnectionIntegrationTestUnifiedPlan
+    : public PeerConnectionIntegrationBaseTest {
+ protected:
+  PeerConnectionIntegrationTestUnifiedPlan()
+      : PeerConnectionIntegrationBaseTest(SdpSemantics::kUnifiedPlan) {}
+};
+
 // Test the OnFirstPacketReceived callback from audio/video RtpReceivers.  This
 // includes testing that the callback is invoked if an observer is connected
 // after the first packet has already been received.
-TEST_F(PeerConnectionIntegrationTest,
+TEST_P(PeerConnectionIntegrationTest,
        RtpReceiverObserverOnFirstPacketReceived) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
@@ -1284,7 +1520,7 @@
 
 // Verifies the DtmfSenderObserver callbacks for a DtmfSender (one in each
 // direction).
-TEST_F(PeerConnectionIntegrationTest, DtmfSenderObserver) {
+TEST_P(PeerConnectionIntegrationTest, DtmfSenderObserver) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
   // Only need audio for DTMF.
@@ -1300,7 +1536,7 @@
 
 // Basic end-to-end test, verifying media can be encoded/transmitted/decoded
 // between two connections, using DTLS-SRTP.
-TEST_F(PeerConnectionIntegrationTest, EndToEndCallWithDtls) {
+TEST_P(PeerConnectionIntegrationTest, EndToEndCallWithDtls) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
   rtc::scoped_refptr<webrtc::FakeMetricsObserver> caller_observer =
@@ -1313,10 +1549,9 @@
   callee()->AddAudioVideoTracks();
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
-  ExpectNewFramesReceivedWithWait(
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.ExpectBidirectionalAudioAndVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
   EXPECT_LE(
       1, caller_observer->GetEnumCounter(webrtc::kEnumCounterKeyProtocol,
                                          webrtc::kEnumCounterKeyProtocolDtls));
@@ -1326,7 +1561,7 @@
 }
 
 // Uses SDES instead of DTLS for key agreement.
-TEST_F(PeerConnectionIntegrationTest, EndToEndCallWithSdes) {
+TEST_P(PeerConnectionIntegrationTest, EndToEndCallWithSdes) {
   PeerConnectionInterface::RTCConfiguration sdes_config;
   sdes_config.enable_dtls_srtp.emplace(false);
   ASSERT_TRUE(CreatePeerConnectionWrappersWithConfig(sdes_config, sdes_config));
@@ -1341,10 +1576,9 @@
   callee()->AddAudioVideoTracks();
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
-  ExpectNewFramesReceivedWithWait(
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.ExpectBidirectionalAudioAndVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
   EXPECT_LE(
       1, caller_observer->GetEnumCounter(webrtc::kEnumCounterKeyProtocol,
                                          webrtc::kEnumCounterKeyProtocolSdes));
@@ -1355,8 +1589,12 @@
 
 // Tests that the GetRemoteAudioSSLCertificate method returns the remote DTLS
 // certificate once the DTLS handshake has finished.
-TEST_F(PeerConnectionIntegrationTest,
+TEST_P(PeerConnectionIntegrationTest,
        GetRemoteAudioSSLCertificateReturnsExchangedCertificate) {
+  // TODO(bugs.webrtc.org/8764): Enable for Unified Plan once stats work.
+  if (sdp_semantics_ == SdpSemantics::kUnifiedPlan) {
+    return;
+  }
   auto GetRemoteAudioSSLCertificate = [](PeerConnectionWrapper* wrapper) {
     auto pci = reinterpret_cast<PeerConnectionProxy*>(wrapper->pc());
     auto pc = reinterpret_cast<PeerConnection*>(pci->internal());
@@ -1425,7 +1663,7 @@
 
 // This test sets up a call between two parties (using DTLS) and tests that we
 // can get a video aspect ratio of 16:9.
-TEST_F(PeerConnectionIntegrationTest, SendAndReceive16To9AspectRatio) {
+TEST_P(PeerConnectionIntegrationTest, SendAndReceive16To9AspectRatio) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
 
@@ -1454,7 +1692,7 @@
 
 // This test sets up a call between two parties with a source resolution of
 // 1280x720 and verifies that a 16:9 aspect ratio is received.
-TEST_F(PeerConnectionIntegrationTest,
+TEST_P(PeerConnectionIntegrationTest,
        Send1280By720ResolutionAndReceive16To9AspectRatio) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
@@ -1485,60 +1723,86 @@
 
 // This test sets up an one-way call, with media only from caller to
 // callee.
-TEST_F(PeerConnectionIntegrationTest, OneWayMediaCall) {
+TEST_P(PeerConnectionIntegrationTest, OneWayMediaCall) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
   caller()->AddAudioVideoTracks();
   caller()->CreateAndSetAndSignalOffer();
-  int caller_received_frames = 0;
-  ExpectNewFramesReceivedWithWait(
-      caller_received_frames, caller_received_frames,
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.CalleeExpectsSomeAudioAndVideo();
+  media_expectations.CallerExpectsNoAudio();
+  media_expectations.CallerExpectsNoVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
 }
 
 // This test sets up a audio call initially, with the callee rejecting video
 // initially. Then later the callee decides to upgrade to audio/video, and
 // initiates a new offer/answer exchange.
-TEST_F(PeerConnectionIntegrationTest, AudioToVideoUpgrade) {
+TEST_P(PeerConnectionIntegrationTest, AudioToVideoUpgrade) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
   // Initially, offer an audio/video stream from the caller, but refuse to
   // send/receive video on the callee side.
   caller()->AddAudioVideoTracks();
   callee()->AddAudioTrack();
-  PeerConnectionInterface::RTCOfferAnswerOptions options;
-  options.offer_to_receive_video = 0;
-  callee()->SetOfferAnswerOptions(options);
+  if (sdp_semantics_ == SdpSemantics::kPlanB) {
+    PeerConnectionInterface::RTCOfferAnswerOptions options;
+    options.offer_to_receive_video = 0;
+    callee()->SetOfferAnswerOptions(options);
+  } else {
+    callee()->SetRemoteOfferHandler([this] {
+      callee()->GetFirstTransceiverOfType(cricket::MEDIA_TYPE_VIDEO)->Stop();
+    });
+  }
   // Do offer/answer and make sure audio is still received end-to-end.
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
-  ExpectNewFramesReceivedWithWait(kDefaultExpectedAudioFrameCount, 0,
-                                  kDefaultExpectedAudioFrameCount, 0,
-                                  kMaxWaitForFramesMs);
+  {
+    MediaExpectations media_expectations;
+    media_expectations.ExpectBidirectionalAudio();
+    media_expectations.ExpectNoVideo();
+    ASSERT_TRUE(ExpectNewFrames(media_expectations));
+  }
   // Sanity check that the callee's description has a rejected video section.
   ASSERT_NE(nullptr, callee()->pc()->local_description());
   const ContentInfo* callee_video_content =
       GetFirstVideoContent(callee()->pc()->local_description()->description());
   ASSERT_NE(nullptr, callee_video_content);
   EXPECT_TRUE(callee_video_content->rejected);
+
   // Now negotiate with video and ensure negotiation succeeds, with video
   // frames and additional audio frames being received.
   callee()->AddVideoTrack();
-  options.offer_to_receive_video = 1;
-  callee()->SetOfferAnswerOptions(options);
+  if (sdp_semantics_ == SdpSemantics::kPlanB) {
+    PeerConnectionInterface::RTCOfferAnswerOptions options;
+    options.offer_to_receive_video = 1;
+    callee()->SetOfferAnswerOptions(options);
+  } else {
+    callee()->SetRemoteOfferHandler(nullptr);
+    caller()->SetRemoteOfferHandler([this] {
+      // The caller creates a new transceiver to receive video on when receiving
+      // the offer, but by default it is send only.
+      auto transceivers = caller()->pc()->GetTransceivers();
+      ASSERT_EQ(3, transceivers.size());
+      ASSERT_EQ(cricket::MEDIA_TYPE_VIDEO,
+                transceivers[2]->receiver()->media_type());
+      transceivers[2]->sender()->SetTrack(caller()->CreateLocalVideoTrack());
+      transceivers[2]->SetDirection(RtpTransceiverDirection::kSendRecv);
+    });
+  }
   callee()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
-  // Expect additional audio frames to be received after the upgrade.
-  ExpectNewFramesReceivedWithWait(
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kMaxWaitForFramesMs);
+  {
+    // Expect additional audio frames to be received after the upgrade.
+    MediaExpectations media_expectations;
+    media_expectations.ExpectBidirectionalAudioAndVideo();
+    ASSERT_TRUE(ExpectNewFrames(media_expectations));
+  }
 }
 
 // Simpler than the above test; just add an audio track to an established
 // video-only connection.
-TEST_F(PeerConnectionIntegrationTest, AddAudioToVideoOnlyCall) {
+TEST_P(PeerConnectionIntegrationTest, AddAudioToVideoOnlyCall) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
   // Do initial offer/answer with just a video track.
@@ -1552,15 +1816,14 @@
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
   // Ensure both audio and video frames are received end-to-end.
-  ExpectNewFramesReceivedWithWait(
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.ExpectBidirectionalAudioAndVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
 }
 
 // This test sets up a call that's transferred to a new caller with a different
 // DTLS fingerprint.
-TEST_F(PeerConnectionIntegrationTest, CallTransferredForCallee) {
+TEST_P(PeerConnectionIntegrationTest, CallTransferredForCallee) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
   caller()->AddAudioVideoTracks();
@@ -1572,7 +1835,7 @@
   // receiving client. These SRTP packets will be dropped.
   std::unique_ptr<PeerConnectionWrapper> original_peer(
       SetCallerPcWrapperAndReturnCurrent(
-          CreatePeerConnectionWrapperWithAlternateKey()));
+          CreatePeerConnectionWrapperWithAlternateKey().release()));
   // TODO(deadbeef): Why do we call Close here? That goes against the comment
   // directly above.
   original_peer->pc()->Close();
@@ -1582,15 +1845,14 @@
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
   // Wait for some additional frames to be transmitted end-to-end.
-  ExpectNewFramesReceivedWithWait(
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.ExpectBidirectionalAudioAndVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
 }
 
 // This test sets up a call that's transferred to a new callee with a different
 // DTLS fingerprint.
-TEST_F(PeerConnectionIntegrationTest, CallTransferredForCaller) {
+TEST_P(PeerConnectionIntegrationTest, CallTransferredForCaller) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
   caller()->AddAudioVideoTracks();
@@ -1602,7 +1864,7 @@
   // receiving client. These SRTP packets will be dropped.
   std::unique_ptr<PeerConnectionWrapper> original_peer(
       SetCalleePcWrapperAndReturnCurrent(
-          CreatePeerConnectionWrapperWithAlternateKey()));
+          CreatePeerConnectionWrapperWithAlternateKey().release()));
   // TODO(deadbeef): Why do we call Close here? That goes against the comment
   // directly above.
   original_peer->pc()->Close();
@@ -1613,16 +1875,15 @@
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
   // Wait for some additional frames to be transmitted end-to-end.
-  ExpectNewFramesReceivedWithWait(
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.ExpectBidirectionalAudioAndVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
 }
 
 // This test sets up a non-bundled call and negotiates bundling at the same
 // time as starting an ICE restart. When bundling is in effect in the restart,
 // the DTLS-SRTP context should be successfully reset.
-TEST_F(PeerConnectionIntegrationTest, BundlingEnabledWhileIceRestartOccurs) {
+TEST_P(PeerConnectionIntegrationTest, BundlingEnabledWhileIceRestartOccurs) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
 
@@ -1634,11 +1895,11 @@
   });
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
-  ExpectNewFramesReceivedWithWait(
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kMaxWaitForFramesMs);
-
+  {
+    MediaExpectations media_expectations;
+    media_expectations.ExpectBidirectionalAudioAndVideo();
+    ASSERT_TRUE(ExpectNewFrames(media_expectations));
+  }
   // Now stop removing the BUNDLE group, and trigger an ICE restart.
   callee()->SetReceivedSdpMunger(nullptr);
   caller()->SetOfferAnswerOptions(IceRestartOfferAnswerOptions());
@@ -1646,17 +1907,18 @@
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
 
   // Expect additional frames to be received after the ICE restart.
-  ExpectNewFramesReceivedWithWait(
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kMaxWaitForFramesMs);
+  {
+    MediaExpectations media_expectations;
+    media_expectations.ExpectBidirectionalAudioAndVideo();
+    ASSERT_TRUE(ExpectNewFrames(media_expectations));
+  }
 }
 
 // Test CVO (Coordination of Video Orientation). If a video source is rotated
 // and both peers support the CVO RTP header extension, the actual video frames
 // don't need to be encoded in different resolutions, since the rotation is
 // communicated through the RTP header extension.
-TEST_F(PeerConnectionIntegrationTest, RotatedVideoWithCVOExtension) {
+TEST_P(PeerConnectionIntegrationTest, RotatedVideoWithCVOExtension) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
   // Add rotated video tracks.
@@ -1686,7 +1948,7 @@
 
 // Test that when the CVO extension isn't supported, video is rotated the
 // old-fashioned way, by encoding rotated frames.
-TEST_F(PeerConnectionIntegrationTest, RotatedVideoWithoutCVOExtension) {
+TEST_P(PeerConnectionIntegrationTest, RotatedVideoWithoutCVOExtension) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
   // Add rotated video tracks.
@@ -1721,66 +1983,88 @@
   EXPECT_EQ(webrtc::kVideoRotation_0, callee()->rendered_rotation());
 }
 
-// TODO(deadbeef): The tests below rely on RTCOfferAnswerOptions to reject an
-// m= section. When we implement Unified Plan SDP, the right way to do this
-// would be by stopping an RtpTransceiver.
-
 // Test that if the answerer rejects the audio m= section, no audio is sent or
 // received, but video still can be.
-TEST_F(PeerConnectionIntegrationTest, AnswererRejectsAudioSection) {
+TEST_P(PeerConnectionIntegrationTest, AnswererRejectsAudioSection) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
   caller()->AddAudioVideoTracks();
-  // Only add video track for callee, and set offer_to_receive_audio to 0, so
-  // it will reject the audio m= section completely.
-  PeerConnectionInterface::RTCOfferAnswerOptions options;
-  options.offer_to_receive_audio = 0;
-  callee()->SetOfferAnswerOptions(options);
+  if (sdp_semantics_ == SdpSemantics::kPlanB) {
+    // Only add video track for callee, and set offer_to_receive_audio to 0, so
+    // it will reject the audio m= section completely.
+    PeerConnectionInterface::RTCOfferAnswerOptions options;
+    options.offer_to_receive_audio = 0;
+    callee()->SetOfferAnswerOptions(options);
+  } else {
+    // Stopping the audio RtpTransceiver will cause the media section to be
+    // rejected in the answer.
+    callee()->SetRemoteOfferHandler([this] {
+      callee()->GetFirstTransceiverOfType(cricket::MEDIA_TYPE_AUDIO)->Stop();
+    });
+  }
   callee()->AddTrack(callee()->CreateLocalVideoTrack());
   // Do offer/answer and wait for successful end-to-end video frames.
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
-  ExpectNewFramesReceivedWithWait(0, kDefaultExpectedVideoFrameCount, 0,
-                                  kDefaultExpectedVideoFrameCount,
-                                  kMaxWaitForFramesMs);
-  // Shouldn't have received audio frames at any point.
-  EXPECT_EQ(0, caller()->audio_frames_received());
-  EXPECT_EQ(0, callee()->audio_frames_received());
+  MediaExpectations media_expectations;
+  media_expectations.ExpectBidirectionalVideo();
+  media_expectations.ExpectNoAudio();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
+
   // Sanity check that the callee's description has a rejected audio section.
   ASSERT_NE(nullptr, callee()->pc()->local_description());
   const ContentInfo* callee_audio_content =
       GetFirstAudioContent(callee()->pc()->local_description()->description());
   ASSERT_NE(nullptr, callee_audio_content);
   EXPECT_TRUE(callee_audio_content->rejected);
+  if (sdp_semantics_ == SdpSemantics::kUnifiedPlan) {
+    // The caller's transceiver should have stopped after receiving the answer.
+    EXPECT_TRUE(caller()
+                    ->GetFirstTransceiverOfType(cricket::MEDIA_TYPE_AUDIO)
+                    ->stopped());
+  }
 }
 
 // Test that if the answerer rejects the video m= section, no video is sent or
 // received, but audio still can be.
-TEST_F(PeerConnectionIntegrationTest, AnswererRejectsVideoSection) {
+TEST_P(PeerConnectionIntegrationTest, AnswererRejectsVideoSection) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
   caller()->AddAudioVideoTracks();
-  // Only add audio track for callee, and set offer_to_receive_video to 0, so
-  // it will reject the video m= section completely.
-  PeerConnectionInterface::RTCOfferAnswerOptions options;
-  options.offer_to_receive_video = 0;
-  callee()->SetOfferAnswerOptions(options);
+  if (sdp_semantics_ == SdpSemantics::kPlanB) {
+    // Only add audio track for callee, and set offer_to_receive_video to 0, so
+    // it will reject the video m= section completely.
+    PeerConnectionInterface::RTCOfferAnswerOptions options;
+    options.offer_to_receive_video = 0;
+    callee()->SetOfferAnswerOptions(options);
+  } else {
+    // Stopping the video RtpTransceiver will cause the media section to be
+    // rejected in the answer.
+    callee()->SetRemoteOfferHandler([this] {
+      callee()->GetFirstTransceiverOfType(cricket::MEDIA_TYPE_VIDEO)->Stop();
+    });
+  }
   callee()->AddTrack(callee()->CreateLocalAudioTrack());
   // Do offer/answer and wait for successful end-to-end audio frames.
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
-  ExpectNewFramesReceivedWithWait(kDefaultExpectedAudioFrameCount, 0,
-                                  kDefaultExpectedAudioFrameCount, 0,
-                                  kMaxWaitForFramesMs);
-  // Shouldn't have received video frames at any point.
-  EXPECT_EQ(0, caller()->total_video_frames_received());
-  EXPECT_EQ(0, callee()->total_video_frames_received());
+  MediaExpectations media_expectations;
+  media_expectations.ExpectBidirectionalAudio();
+  media_expectations.ExpectNoVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
+
   // Sanity check that the callee's description has a rejected video section.
   ASSERT_NE(nullptr, callee()->pc()->local_description());
   const ContentInfo* callee_video_content =
       GetFirstVideoContent(callee()->pc()->local_description()->description());
   ASSERT_NE(nullptr, callee_video_content);
   EXPECT_TRUE(callee_video_content->rejected);
+  if (sdp_semantics_ == SdpSemantics::kUnifiedPlan) {
+    // The caller's transceiver should have stopped after receiving the answer.
+    EXPECT_TRUE(caller()
+                    ->GetFirstTransceiverOfType(cricket::MEDIA_TYPE_VIDEO)
+                    ->stopped());
+  }
 }
 
 // Test that if the answerer rejects both audio and video m= sections, nothing
@@ -1788,19 +2072,29 @@
 // TODO(deadbeef): Test that a data channel still works. Currently this doesn't
 // test anything but the fact that negotiation succeeds, which doesn't mean
 // much.
-TEST_F(PeerConnectionIntegrationTest, AnswererRejectsAudioAndVideoSections) {
+TEST_P(PeerConnectionIntegrationTest, AnswererRejectsAudioAndVideoSections) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
   caller()->AddAudioVideoTracks();
-  // Don't give the callee any tracks, and set offer_to_receive_X to 0, so it
-  // will reject both audio and video m= sections.
-  PeerConnectionInterface::RTCOfferAnswerOptions options;
-  options.offer_to_receive_audio = 0;
-  options.offer_to_receive_video = 0;
-  callee()->SetOfferAnswerOptions(options);
+  if (sdp_semantics_ == SdpSemantics::kPlanB) {
+    // Don't give the callee any tracks, and set offer_to_receive_X to 0, so it
+    // will reject both audio and video m= sections.
+    PeerConnectionInterface::RTCOfferAnswerOptions options;
+    options.offer_to_receive_audio = 0;
+    options.offer_to_receive_video = 0;
+    callee()->SetOfferAnswerOptions(options);
+  } else {
+    callee()->SetRemoteOfferHandler([this] {
+      // Stopping all transceivers will cause all media sections to be rejected.
+      for (auto transceiver : callee()->pc()->GetTransceivers()) {
+        transceiver->Stop();
+      }
+    });
+  }
   // Do offer/answer and wait for stable signaling state.
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
+
   // Sanity check that the callee's description has rejected m= sections.
   ASSERT_NE(nullptr, callee()->pc()->local_description());
   const ContentInfo* callee_audio_content =
@@ -1817,28 +2111,31 @@
 // call runs for a while, the caller sends an updated offer with video being
 // rejected. Once the re-negotiation is done, the video flow should stop and
 // the audio flow should continue.
-TEST_F(PeerConnectionIntegrationTest, VideoRejectedInSubsequentOffer) {
+TEST_P(PeerConnectionIntegrationTest, VideoRejectedInSubsequentOffer) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
   caller()->AddAudioVideoTracks();
   callee()->AddAudioVideoTracks();
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
-  ExpectNewFramesReceivedWithWait(
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kMaxWaitForFramesMs);
-
+  {
+    MediaExpectations media_expectations;
+    media_expectations.ExpectBidirectionalAudioAndVideo();
+    ASSERT_TRUE(ExpectNewFrames(media_expectations));
+  }
   // Renegotiate, rejecting the video m= section.
-  // TODO(deadbeef): When an RtpTransceiver API is available, use that to
-  // reject the video m= section.
-  caller()->SetGeneratedSdpMunger([](cricket::SessionDescription* description) {
-    for (cricket::ContentInfo& content : description->contents()) {
-      if (cricket::IsVideoContent(&content)) {
-        content.rejected = true;
-      }
-    }
-  });
+  if (sdp_semantics_ == SdpSemantics::kPlanB) {
+    caller()->SetGeneratedSdpMunger(
+        [](cricket::SessionDescription* description) {
+          for (cricket::ContentInfo& content : description->contents()) {
+            if (cricket::IsVideoContent(&content)) {
+              content.rejected = true;
+            }
+          }
+        });
+  } else {
+    caller()->GetFirstTransceiverOfType(cricket::MEDIA_TYPE_VIDEO)->Stop();
+  }
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kMaxWaitForActivationMs);
 
@@ -1848,19 +2145,13 @@
       GetFirstVideoContent(caller()->pc()->local_description()->description());
   ASSERT_NE(nullptr, caller_video_content);
   EXPECT_TRUE(caller_video_content->rejected);
-
-  int caller_video_received = caller()->total_video_frames_received();
-  int callee_video_received = callee()->total_video_frames_received();
-
   // Wait for some additional audio frames to be received.
-  ExpectNewFramesReceivedWithWait(kDefaultExpectedAudioFrameCount, 0,
-                                  kDefaultExpectedAudioFrameCount, 0,
-                                  kMaxWaitForFramesMs);
-
-  // During this time, we shouldn't have received any additional video frames
-  // for the rejected video tracks.
-  EXPECT_EQ(caller_video_received, caller()->total_video_frames_received());
-  EXPECT_EQ(callee_video_received, callee()->total_video_frames_received());
+  {
+    MediaExpectations media_expectations;
+    media_expectations.ExpectBidirectionalAudio();
+    media_expectations.ExpectNoVideo();
+    ASSERT_TRUE(ExpectNewFrames(media_expectations));
+  }
 }
 
 // Basic end-to-end test, but without SSRC/MSID signaling. This functionality
@@ -1868,7 +2159,7 @@
 // TODO(deadbeef): When we support the MID extension and demuxing on MID, also
 // add a test for an end-to-end test without MID signaling either (basically,
 // the minimum acceptable SDP).
-TEST_F(PeerConnectionIntegrationTest, EndToEndCallWithoutSsrcOrMsidSignaling) {
+TEST_P(PeerConnectionIntegrationTest, EndToEndCallWithoutSsrcOrMsidSignaling) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
   // Add audio and video, testing that packets can be demuxed on payload type.
@@ -1878,15 +2169,14 @@
   callee()->SetReceivedSdpMunger(RemoveSsrcsAndMsids);
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
-  ExpectNewFramesReceivedWithWait(
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.ExpectBidirectionalAudioAndVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
 }
 
 // Test that if two video tracks are sent (from caller to callee, in this test),
 // they're transmitted correctly end-to-end.
-TEST_F(PeerConnectionIntegrationTest, EndToEndCallWithTwoVideoTracks) {
+TEST_P(PeerConnectionIntegrationTest, EndToEndCallWithTwoVideoTracks) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
   // Add one audio/video stream, and one video-only stream.
@@ -1895,9 +2185,10 @@
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
   ASSERT_EQ(3u, callee()->pc()->GetReceivers().size());
-  int expected_callee_received_frames = kDefaultExpectedVideoFrameCount;
-  ExpectNewFramesReceivedWithWait(0, 0, 0, expected_callee_received_frames,
-                                  kMaxWaitForFramesMs);
+
+  MediaExpectations media_expectations;
+  media_expectations.CalleeExpectsSomeAudioAndVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
 }
 
 static void MakeSpecCompliantMaxBundleOffer(cricket::SessionDescription* desc) {
@@ -1929,7 +2220,7 @@
 // TODO(deadbeef): Update this test to also omit "a=rtcp-mux", once that works.
 // TODO(deadbeef): Won't need this test once we start generating actual
 // standards-compliant SDP.
-TEST_F(PeerConnectionIntegrationTest,
+TEST_P(PeerConnectionIntegrationTest,
        EndToEndCallWithSpecCompliantMaxBundleOffer) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
@@ -1941,16 +2232,15 @@
   callee()->SetReceivedSdpMunger(MakeSpecCompliantMaxBundleOffer);
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
-  ExpectNewFramesReceivedWithWait(
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.ExpectBidirectionalAudioAndVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
 }
 
 // Test that we can receive the audio output level from a remote audio track.
 // TODO(deadbeef): Use a fake audio source and verify that the output level is
 // exactly what the source on the other side was configured with.
-TEST_F(PeerConnectionIntegrationTest, GetAudioOutputLevelStatsWithOldStatsApi) {
+TEST_P(PeerConnectionIntegrationTest, GetAudioOutputLevelStatsWithOldStatsApi) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
   // Just add an audio track.
@@ -1967,7 +2257,7 @@
 // Test that an audio input level is reported.
 // TODO(deadbeef): Use a fake audio source and verify that the input level is
 // exactly what the source was configured with.
-TEST_F(PeerConnectionIntegrationTest, GetAudioInputLevelStatsWithOldStatsApi) {
+TEST_P(PeerConnectionIntegrationTest, GetAudioInputLevelStatsWithOldStatsApi) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
   // Just add an audio track.
@@ -1982,18 +2272,17 @@
 }
 
 // Test that we can get incoming byte counts from both audio and video tracks.
-TEST_F(PeerConnectionIntegrationTest, GetBytesReceivedStatsWithOldStatsApi) {
+TEST_P(PeerConnectionIntegrationTest, GetBytesReceivedStatsWithOldStatsApi) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
   caller()->AddAudioVideoTracks();
   // Do offer/answer, wait for the callee to receive some frames.
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
-  int expected_caller_received_frames = 0;
-  ExpectNewFramesReceivedWithWait(
-      expected_caller_received_frames, expected_caller_received_frames,
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kMaxWaitForFramesMs);
+
+  MediaExpectations media_expectations;
+  media_expectations.CalleeExpectsSomeAudioAndVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
 
   // Get a handle to the remote tracks created, so they can be used as GetStats
   // filters.
@@ -2006,7 +2295,7 @@
 }
 
 // Test that we can get outgoing byte counts from both audio and video tracks.
-TEST_F(PeerConnectionIntegrationTest, GetBytesSentStatsWithOldStatsApi) {
+TEST_P(PeerConnectionIntegrationTest, GetBytesSentStatsWithOldStatsApi) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
   caller()->AddAudioVideoTracks();
@@ -2017,11 +2306,9 @@
   // Do offer/answer, wait for the callee to receive some frames.
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
-  int expected_caller_received_frames = 0;
-  ExpectNewFramesReceivedWithWait(
-      expected_caller_received_frames, expected_caller_received_frames,
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.CalleeExpectsSomeAudioAndVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
 
   // The callee received frames, so we definitely should have nonzero "sent
   // bytes" stats at this point.
@@ -2030,7 +2317,7 @@
 }
 
 // Test that we can get capture start ntp time.
-TEST_F(PeerConnectionIntegrationTest, GetCaptureStartNtpTimeWithOldStatsApi) {
+TEST_P(PeerConnectionIntegrationTest, GetCaptureStartNtpTimeWithOldStatsApi) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
   caller()->AddAudioTrack();
@@ -2058,7 +2345,7 @@
 // Test that we can get stats (using the new stats implemnetation) for
 // unsignaled streams. Meaning when SSRCs/MSIDs aren't signaled explicitly in
 // SDP.
-TEST_F(PeerConnectionIntegrationTest,
+TEST_P(PeerConnectionIntegrationTest,
        GetStatsForUnsignaledStreamWithNewStatsApi) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
@@ -2067,8 +2354,9 @@
   callee()->SetReceivedSdpMunger(RemoveSsrcsAndMsids);
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
-  // Wait for one audio frame to be received by the callee.
-  ExpectNewFramesReceivedWithWait(0, 0, 1, 0, kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.CalleeExpectsSomeAudio(1);
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
 
   // We received a frame, so we should have nonzero "bytes received" stats for
   // the unsignaled stream, if stats are working for it.
@@ -2085,7 +2373,7 @@
 
 // Test that we can successfully get the media related stats (audio level
 // etc.) for the unsignaled stream.
-TEST_F(PeerConnectionIntegrationTest,
+TEST_P(PeerConnectionIntegrationTest,
        GetMediaStatsForUnsignaledStreamWithNewStatsApi) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
@@ -2094,8 +2382,10 @@
   callee()->SetReceivedSdpMunger(RemoveSsrcsAndMsids);
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
-  // Wait for one audio frame to be received by the callee.
-  ExpectNewFramesReceivedWithWait(0, 0, 1, 1, kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.CalleeExpectsSomeAudio(1);
+  media_expectations.CalleeExpectsSomeVideo(1);
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
 
   rtc::scoped_refptr<const webrtc::RTCStatsReport> report =
       callee()->NewGetStats();
@@ -2134,7 +2424,7 @@
 // received, and a 50% chance that they'll stop updating (while
 // "concealed_samples" continues increasing, due to silence being generated for
 // the inactive stream).
-TEST_F(PeerConnectionIntegrationTest,
+TEST_P(PeerConnectionIntegrationTest,
        TrackStatsUpdatedCorrectlyWhenUnsignaledSsrcChanges) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
@@ -2145,8 +2435,11 @@
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
   // Wait for 50 audio frames (500ms of audio) to be received by the callee.
-  ExpectNewFramesReceivedWithWait(0, 0, 25, 0, kMaxWaitForFramesMs);
-
+  {
+    MediaExpectations media_expectations;
+    media_expectations.CalleeExpectsSomeAudio(50);
+    ASSERT_TRUE(ExpectNewFrames(media_expectations));
+  }
   // Some audio frames were received, so we should have nonzero "samples
   // received" for the track.
   rtc::scoped_refptr<const webrtc::RTCStatsReport> report =
@@ -2164,7 +2457,11 @@
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
   // Wait for 25 more audio frames (250ms of audio) to be received, from the new
   // SSRC.
-  ExpectNewFramesReceivedWithWait(0, 0, 25, 0, kMaxWaitForFramesMs);
+  {
+    MediaExpectations media_expectations;
+    media_expectations.CalleeExpectsSomeAudio(25);
+    ASSERT_TRUE(ExpectNewFrames(media_expectations));
+  }
 
   report = callee()->NewGetStats();
   ASSERT_NE(nullptr, report);
@@ -2198,7 +2495,7 @@
 }
 
 // Test that DTLS 1.0 is used if both sides only support DTLS 1.0.
-TEST_F(PeerConnectionIntegrationTest, EndToEndCallWithDtls10) {
+TEST_P(PeerConnectionIntegrationTest, EndToEndCallWithDtls10) {
   PeerConnectionFactory::Options dtls_10_options;
   dtls_10_options.ssl_max_version = rtc::SSL_PROTOCOL_DTLS_10;
   ASSERT_TRUE(CreatePeerConnectionWrappersWithOptions(dtls_10_options,
@@ -2210,14 +2507,13 @@
   callee()->AddAudioVideoTracks();
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
-  ExpectNewFramesReceivedWithWait(
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.ExpectBidirectionalAudioAndVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
 }
 
 // Test getting cipher stats and UMA metrics when DTLS 1.0 is negotiated.
-TEST_F(PeerConnectionIntegrationTest, Dtls10CipherStatsAndUmaMetrics) {
+TEST_P(PeerConnectionIntegrationTest, Dtls10CipherStatsAndUmaMetrics) {
   PeerConnectionFactory::Options dtls_10_options;
   dtls_10_options.ssl_max_version = rtc::SSL_PROTOCOL_DTLS_10;
   ASSERT_TRUE(CreatePeerConnectionWrappersWithOptions(dtls_10_options,
@@ -2242,7 +2538,7 @@
 }
 
 // Test getting cipher stats and UMA metrics when DTLS 1.2 is negotiated.
-TEST_F(PeerConnectionIntegrationTest, Dtls12CipherStatsAndUmaMetrics) {
+TEST_P(PeerConnectionIntegrationTest, Dtls12CipherStatsAndUmaMetrics) {
   PeerConnectionFactory::Options dtls_12_options;
   dtls_12_options.ssl_max_version = rtc::SSL_PROTOCOL_DTLS_12;
   ASSERT_TRUE(CreatePeerConnectionWrappersWithOptions(dtls_12_options,
@@ -2268,7 +2564,7 @@
 
 // Test that DTLS 1.0 can be used if the caller supports DTLS 1.2 and the
 // callee only supports 1.0.
-TEST_F(PeerConnectionIntegrationTest, CallerDtls12ToCalleeDtls10) {
+TEST_P(PeerConnectionIntegrationTest, CallerDtls12ToCalleeDtls10) {
   PeerConnectionFactory::Options caller_options;
   caller_options.ssl_max_version = rtc::SSL_PROTOCOL_DTLS_12;
   PeerConnectionFactory::Options callee_options;
@@ -2282,15 +2578,14 @@
   callee()->AddAudioVideoTracks();
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
-  ExpectNewFramesReceivedWithWait(
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.ExpectBidirectionalAudioAndVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
 }
 
 // Test that DTLS 1.0 can be used if the caller only supports DTLS 1.0 and the
 // callee supports 1.2.
-TEST_F(PeerConnectionIntegrationTest, CallerDtls10ToCalleeDtls12) {
+TEST_P(PeerConnectionIntegrationTest, CallerDtls10ToCalleeDtls12) {
   PeerConnectionFactory::Options caller_options;
   caller_options.ssl_max_version = rtc::SSL_PROTOCOL_DTLS_10;
   PeerConnectionFactory::Options callee_options;
@@ -2304,14 +2599,13 @@
   callee()->AddAudioVideoTracks();
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
-  ExpectNewFramesReceivedWithWait(
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.ExpectBidirectionalAudioAndVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
 }
 
 // Test that a non-GCM cipher is used if both sides only support non-GCM.
-TEST_F(PeerConnectionIntegrationTest, NonGcmCipherUsedWhenGcmNotSupported) {
+TEST_P(PeerConnectionIntegrationTest, NonGcmCipherUsedWhenGcmNotSupported) {
   bool local_gcm_enabled = false;
   bool remote_gcm_enabled = false;
   int expected_cipher_suite = kDefaultSrtpCryptoSuite;
@@ -2320,7 +2614,7 @@
 }
 
 // Test that a GCM cipher is used if both ends support it.
-TEST_F(PeerConnectionIntegrationTest, GcmCipherUsedWhenGcmSupported) {
+TEST_P(PeerConnectionIntegrationTest, GcmCipherUsedWhenGcmSupported) {
   bool local_gcm_enabled = true;
   bool remote_gcm_enabled = true;
   int expected_cipher_suite = kDefaultSrtpCryptoSuiteGcm;
@@ -2329,7 +2623,7 @@
 }
 
 // Test that GCM isn't used if only the offerer supports it.
-TEST_F(PeerConnectionIntegrationTest,
+TEST_P(PeerConnectionIntegrationTest,
        NonGcmCipherUsedWhenOnlyCallerSupportsGcm) {
   bool local_gcm_enabled = true;
   bool remote_gcm_enabled = false;
@@ -2339,7 +2633,7 @@
 }
 
 // Test that GCM isn't used if only the answerer supports it.
-TEST_F(PeerConnectionIntegrationTest,
+TEST_P(PeerConnectionIntegrationTest,
        NonGcmCipherUsedWhenOnlyCalleeSupportsGcm) {
   bool local_gcm_enabled = false;
   bool remote_gcm_enabled = true;
@@ -2352,7 +2646,7 @@
 // enabled. Note that the above tests, such as GcmCipherUsedWhenGcmSupported,
 // only verify that a GCM cipher is negotiated, and not necessarily that SRTP
 // works with it.
-TEST_F(PeerConnectionIntegrationTest, EndToEndCallWithGcmCipher) {
+TEST_P(PeerConnectionIntegrationTest, EndToEndCallWithGcmCipher) {
   PeerConnectionFactory::Options gcm_options;
   gcm_options.crypto_options.enable_gcm_crypto_suites = true;
   ASSERT_TRUE(
@@ -2364,15 +2658,14 @@
   callee()->AddAudioVideoTracks();
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
-  ExpectNewFramesReceivedWithWait(
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.ExpectBidirectionalAudioAndVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
 }
 
 // This test sets up a call between two parties with audio, video and an RTP
 // data channel.
-TEST_F(PeerConnectionIntegrationTest, EndToEndCallWithRtpDataChannel) {
+TEST_P(PeerConnectionIntegrationTest, EndToEndCallWithRtpDataChannel) {
   FakeConstraints setup_constraints;
   setup_constraints.SetAllowRtpDataChannels();
   ASSERT_TRUE(CreatePeerConnectionWrappersWithConstraints(&setup_constraints,
@@ -2386,10 +2679,9 @@
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
   // Ensure the existence of the RTP data channel didn't impede audio/video.
-  ExpectNewFramesReceivedWithWait(
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.ExpectBidirectionalAudioAndVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
   ASSERT_NE(nullptr, caller()->data_channel());
   ASSERT_NE(nullptr, callee()->data_channel());
   EXPECT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout);
@@ -2407,7 +2699,7 @@
 
 // Ensure that an RTP data channel is signaled as closed for the caller when
 // the callee rejects it in a subsequent offer.
-TEST_F(PeerConnectionIntegrationTest,
+TEST_P(PeerConnectionIntegrationTest,
        RtpDataChannelSignaledClosedInCalleeOffer) {
   // Same procedure as above test.
   FakeConstraints setup_constraints;
@@ -2440,7 +2732,7 @@
 // transport has detected that a channel is writable and thus data can be
 // received before the data channel state changes to open. That is hard to test
 // but the same buffering is expected to be used in that case.
-TEST_F(PeerConnectionIntegrationTest,
+TEST_P(PeerConnectionIntegrationTest,
        DataBufferedUntilRtpDataChannelObserverRegistered) {
   // Use fake clock and simulated network delay so that we predictably can wait
   // until an SCTP message has been delivered without "sleep()"ing.
@@ -2485,7 +2777,7 @@
 
 // This test sets up a call between two parties with audio, video and but only
 // the caller client supports RTP data channels.
-TEST_F(PeerConnectionIntegrationTest, RtpDataChannelsRejectedByCallee) {
+TEST_P(PeerConnectionIntegrationTest, RtpDataChannelsRejectedByCallee) {
   FakeConstraints setup_constraints_1;
   setup_constraints_1.SetAllowRtpDataChannels();
   // Must disable DTLS to make negotiation succeed.
@@ -2511,7 +2803,7 @@
 
 // This test sets up a call between two parties with audio, and video. When
 // audio and video is setup and flowing, an RTP data channel is negotiated.
-TEST_F(PeerConnectionIntegrationTest, AddRtpDataChannelInSubsequentOffer) {
+TEST_P(PeerConnectionIntegrationTest, AddRtpDataChannelInSubsequentOffer) {
   FakeConstraints setup_constraints;
   setup_constraints.SetAllowRtpDataChannels();
   ASSERT_TRUE(CreatePeerConnectionWrappersWithConstraints(&setup_constraints,
@@ -2544,7 +2836,7 @@
 
 // This test sets up a call between two parties with audio, video and an SCTP
 // data channel.
-TEST_F(PeerConnectionIntegrationTest, EndToEndCallWithSctpDataChannel) {
+TEST_P(PeerConnectionIntegrationTest, EndToEndCallWithSctpDataChannel) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
   // Expect that data channel created on caller side will show up for callee as
@@ -2555,10 +2847,9 @@
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
   // Ensure the existence of the SCTP data channel didn't impede audio/video.
-  ExpectNewFramesReceivedWithWait(
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.ExpectBidirectionalAudioAndVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
   // Caller data channel should already exist (it created one). Callee data
   // channel may not exist yet, since negotiation happens in-band, not in SDP.
   ASSERT_NE(nullptr, caller()->data_channel());
@@ -2578,7 +2869,7 @@
 
 // Ensure that when the callee closes an SCTP data channel, the closing
 // procedure results in the data channel being closed for the caller as well.
-TEST_F(PeerConnectionIntegrationTest, CalleeClosesSctpDataChannel) {
+TEST_P(PeerConnectionIntegrationTest, CalleeClosesSctpDataChannel) {
   // Same procedure as above test.
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
@@ -2599,7 +2890,7 @@
   EXPECT_TRUE_WAIT(!callee()->data_observer()->IsOpen(), kDefaultTimeout);
 }
 
-TEST_F(PeerConnectionIntegrationTest, SctpDataChannelConfigSentToOtherSide) {
+TEST_P(PeerConnectionIntegrationTest, SctpDataChannelConfigSentToOtherSide) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
   webrtc::DataChannelInit init;
@@ -2621,7 +2912,7 @@
 // Test usrsctp's ability to process unordered data stream, where data actually
 // arrives out of order using simulated delays. Previously there have been some
 // bugs in this area.
-TEST_F(PeerConnectionIntegrationTest, StressTestUnorderedSctpDataChannel) {
+TEST_P(PeerConnectionIntegrationTest, StressTestUnorderedSctpDataChannel) {
   // Introduce random network delays.
   // Otherwise it's not a true "unordered" test.
   virtual_socket_server()->set_delay_mean(20);
@@ -2677,7 +2968,7 @@
 
 // This test sets up a call between two parties with audio, and video. When
 // audio and video are setup and flowing, an SCTP data channel is negotiated.
-TEST_F(PeerConnectionIntegrationTest, AddSctpDataChannelInSubsequentOffer) {
+TEST_P(PeerConnectionIntegrationTest, AddSctpDataChannelInSubsequentOffer) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
   // Do initial offer/answer with audio/video.
@@ -2709,7 +3000,7 @@
 // to audio/video, ensuring frames are received end-to-end. Effectively the
 // inverse of the test above.
 // This was broken in M57; see https://crbug.com/711243
-TEST_F(PeerConnectionIntegrationTest, SctpDataChannelToAudioVideoUpgrade) {
+TEST_P(PeerConnectionIntegrationTest, SctpDataChannelToAudioVideoUpgrade) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
   // Do initial offer/answer with just data channel.
@@ -2727,10 +3018,9 @@
   callee()->AddAudioVideoTracks();
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
-  ExpectNewFramesReceivedWithWait(
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.ExpectBidirectionalAudioAndVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
 }
 
 static void MakeSpecCompliantSctpOffer(cricket::SessionDescription* desc) {
@@ -2744,7 +3034,7 @@
 // Test that the data channel works when a spec-compliant SCTP m= section is
 // offered (using "a=sctp-port" instead of "a=sctpmap", and using
 // "UDP/DTLS/SCTP" as the protocol).
-TEST_F(PeerConnectionIntegrationTest,
+TEST_P(PeerConnectionIntegrationTest,
        DataChannelWorksWhenSpecCompliantSctpOfferReceived) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
@@ -2770,7 +3060,7 @@
 
 // Test that the ICE connection and gathering states eventually reach
 // "complete".
-TEST_F(PeerConnectionIntegrationTest, IceStatesReachCompletion) {
+TEST_P(PeerConnectionIntegrationTest, IceStatesReachCompletion) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
   // Do normal offer/answer.
@@ -2796,11 +3086,13 @@
 // Test that firewalling the ICE connection causes the clients to identify the
 // disconnected state and then removing the firewall causes them to reconnect.
 class PeerConnectionIntegrationIceStatesTest
-    : public PeerConnectionIntegrationTest,
-      public ::testing::WithParamInterface<std::tuple<std::string, uint32_t>> {
+    : public PeerConnectionIntegrationBaseTest,
+      public ::testing::WithParamInterface<
+          std::tuple<SdpSemantics, std::tuple<std::string, uint32_t>>> {
  protected:
-  PeerConnectionIntegrationIceStatesTest() {
-    port_allocator_flags_ = std::get<1>(GetParam());
+  PeerConnectionIntegrationIceStatesTest()
+      : PeerConnectionIntegrationBaseTest(std::get<0>(GetParam())) {
+    port_allocator_flags_ = std::get<1>(std::get<1>(GetParam()));
   }
 
   void StartStunServer(const SocketAddress& server_address) {
@@ -2981,18 +3273,19 @@
 constexpr uint32_t kFlagsIPv4Stun =
     cricket::PORTALLOCATOR_DISABLE_TCP | cricket::PORTALLOCATOR_DISABLE_RELAY;
 
-INSTANTIATE_TEST_CASE_P(PeerConnectionIntegrationTest,
-                        PeerConnectionIntegrationIceStatesTest,
-                        Values(std::make_pair("IPv4 no STUN", kFlagsIPv4NoStun),
-                               std::make_pair("IPv6 no STUN", kFlagsIPv6NoStun),
-                               std::make_pair("IPv4 with STUN",
-                                              kFlagsIPv4Stun)));
+INSTANTIATE_TEST_CASE_P(
+    PeerConnectionIntegrationTest,
+    PeerConnectionIntegrationIceStatesTest,
+    Combine(Values(SdpSemantics::kPlanB, SdpSemantics::kUnifiedPlan),
+            Values(std::make_pair("IPv4 no STUN", kFlagsIPv4NoStun),
+                   std::make_pair("IPv6 no STUN", kFlagsIPv6NoStun),
+                   std::make_pair("IPv4 with STUN", kFlagsIPv4Stun))));
 
 // This test sets up a call between two parties with audio and video.
 // During the call, the caller restarts ICE and the test verifies that
 // new ICE candidates are generated and audio and video still can flow, and the
 // ICE state reaches completed again.
-TEST_F(PeerConnectionIntegrationTest, MediaContinuesFlowingAfterIceRestart) {
+TEST_P(PeerConnectionIntegrationTest, MediaContinuesFlowingAfterIceRestart) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
   // Do normal offer/answer and wait for ICE to complete.
@@ -3061,15 +3354,14 @@
   ASSERT_NE(callee_ufrag_pre_restart, callee_ufrag_post_restart);
 
   // Ensure that additional frames are received after the ICE restart.
-  ExpectNewFramesReceivedWithWait(
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.ExpectBidirectionalAudioAndVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
 }
 
 // Verify that audio/video can be received end-to-end when ICE renomination is
 // enabled.
-TEST_F(PeerConnectionIntegrationTest, EndToEndCallWithIceRenomination) {
+TEST_P(PeerConnectionIntegrationTest, EndToEndCallWithIceRenomination) {
   PeerConnectionInterface::RTCConfiguration config;
   config.enable_ice_renomination = true;
   ASSERT_TRUE(CreatePeerConnectionWrappersWithConfig(config, config));
@@ -3096,16 +3388,15 @@
         std::find(info.description.transport_options.begin(),
                   info.description.transport_options.end(), "renomination"));
   }
-  ExpectNewFramesReceivedWithWait(
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.ExpectBidirectionalAudioAndVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
 }
 
 // With a max bundle policy and RTCP muxing, adding a new media description to
 // the connection should not affect ICE at all because the new media will use
 // the existing connection.
-TEST_F(PeerConnectionIntegrationTest,
+TEST_P(PeerConnectionIntegrationTest,
        AddMediaToConnectedBundleDoesNotRestartIce) {
   PeerConnectionInterface::RTCConfiguration config;
   config.bundle_policy = PeerConnectionInterface::kBundlePolicyMaxBundle;
@@ -3132,7 +3423,7 @@
 // This test sets up a call between two parties with audio and video. It then
 // renegotiates setting the video m-line to "port 0", then later renegotiates
 // again, enabling video.
-TEST_F(PeerConnectionIntegrationTest,
+TEST_P(PeerConnectionIntegrationTest,
        VideoFlowsAfterMediaSectionIsRejectedAndRecycled) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
@@ -3145,9 +3436,15 @@
 
   // Negotiate again, disabling the video "m=" section (the callee will set the
   // port to 0 due to offer_to_receive_video = 0).
-  PeerConnectionInterface::RTCOfferAnswerOptions options;
-  options.offer_to_receive_video = 0;
-  callee()->SetOfferAnswerOptions(options);
+  if (sdp_semantics_ == SdpSemantics::kPlanB) {
+    PeerConnectionInterface::RTCOfferAnswerOptions options;
+    options.offer_to_receive_video = 0;
+    callee()->SetOfferAnswerOptions(options);
+  } else {
+    callee()->SetRemoteOfferHandler([this] {
+      callee()->GetFirstTransceiverOfType(cricket::MEDIA_TYPE_VIDEO)->Stop();
+    });
+  }
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
   // Sanity check that video "m=" section was actually rejected.
@@ -3158,24 +3455,35 @@
 
   // Enable video and do negotiation again, making sure video is received
   // end-to-end, also adding media stream to callee.
-  options.offer_to_receive_video = 1;
-  callee()->SetOfferAnswerOptions(options);
-  callee()->AddAudioVideoTracks();
+  if (sdp_semantics_ == SdpSemantics::kPlanB) {
+    PeerConnectionInterface::RTCOfferAnswerOptions options;
+    options.offer_to_receive_video = 1;
+    callee()->SetOfferAnswerOptions(options);
+  } else {
+    // The caller's transceiver is stopped, so we need to add another track.
+    auto caller_transceiver =
+        caller()->GetFirstTransceiverOfType(cricket::MEDIA_TYPE_VIDEO);
+    EXPECT_TRUE(caller_transceiver->stopped());
+    caller()->AddVideoTrack();
+  }
+  callee()->AddVideoTrack();
+  callee()->SetRemoteOfferHandler(nullptr);
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
+
   // Verify the caller receives frames from the newly added stream, and the
   // callee receives additional frames from the re-enabled video m= section.
-  ExpectNewFramesReceivedWithWait(
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.CalleeExpectsSomeAudio();
+  media_expectations.ExpectBidirectionalVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
 }
 
 // This test sets up a Jsep call between two parties with external
 // VideoDecoderFactory.
 // TODO(holmer): Disabled due to sometimes crashing on buildbots.
 // See issue webrtc/2378.
-TEST_F(PeerConnectionIntegrationTest,
+TEST_P(PeerConnectionIntegrationTest,
        DISABLED_EndToEndCallWithVideoDecoderFactory) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   EnableVideoDecoderFactory();
@@ -3184,18 +3492,15 @@
   callee()->AddAudioVideoTracks();
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
-  ExpectNewFramesReceivedWithWait(
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.ExpectBidirectionalAudioAndVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
 }
 
 // This tests that if we negotiate after calling CreateSender but before we
 // have a track, then set a track later, frames from the newly-set track are
 // received end-to-end.
-// TODO(deadbeef): Change this test to use AddTransceiver, once that's
-// implemented.
-TEST_F(PeerConnectionIntegrationTest,
+TEST_F(PeerConnectionIntegrationTestPlanB,
        MediaFlowsAfterEarlyWarmupWithCreateSender) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
@@ -3219,17 +3524,55 @@
   EXPECT_TRUE(caller_video_sender->SetTrack(caller()->CreateLocalVideoTrack()));
   EXPECT_TRUE(callee_audio_sender->SetTrack(callee()->CreateLocalAudioTrack()));
   EXPECT_TRUE(callee_video_sender->SetTrack(callee()->CreateLocalVideoTrack()));
-  ExpectNewFramesReceivedWithWait(
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.ExpectBidirectionalAudioAndVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
+}
+
+// This tests that if we negotiate after calling AddTransceiver but before we
+// have a track, then set a track later, frames from the newly-set tracks are
+// received end-to-end.
+TEST_F(PeerConnectionIntegrationTestUnifiedPlan,
+       MediaFlowsAfterEarlyWarmupWithAddTransceiver) {
+  ASSERT_TRUE(CreatePeerConnectionWrappers());
+  ConnectFakeSignaling();
+  auto audio_result = caller()->pc()->AddTransceiver(cricket::MEDIA_TYPE_AUDIO);
+  ASSERT_EQ(RTCErrorType::NONE, audio_result.error().type());
+  auto caller_audio_sender = audio_result.MoveValue()->sender();
+  auto video_result = caller()->pc()->AddTransceiver(cricket::MEDIA_TYPE_VIDEO);
+  ASSERT_EQ(RTCErrorType::NONE, video_result.error().type());
+  auto caller_video_sender = video_result.MoveValue()->sender();
+  callee()->SetRemoteOfferHandler([this] {
+    ASSERT_EQ(2u, callee()->pc()->GetTransceivers().size());
+    callee()->pc()->GetTransceivers()[0]->SetDirection(
+        RtpTransceiverDirection::kSendRecv);
+    callee()->pc()->GetTransceivers()[1]->SetDirection(
+        RtpTransceiverDirection::kSendRecv);
+  });
+  caller()->CreateAndSetAndSignalOffer();
+  ASSERT_TRUE_WAIT(SignalingStateStable(), kMaxWaitForActivationMs);
+  // Wait for ICE to complete, without any tracks being set.
+  EXPECT_EQ_WAIT(webrtc::PeerConnectionInterface::kIceConnectionCompleted,
+                 caller()->ice_connection_state(), kMaxWaitForFramesMs);
+  EXPECT_EQ_WAIT(webrtc::PeerConnectionInterface::kIceConnectionConnected,
+                 callee()->ice_connection_state(), kMaxWaitForFramesMs);
+  // Now set the tracks, and expect frames to immediately start flowing.
+  auto callee_audio_sender = callee()->pc()->GetSenders()[0];
+  auto callee_video_sender = callee()->pc()->GetSenders()[1];
+  ASSERT_TRUE(caller_audio_sender->SetTrack(caller()->CreateLocalAudioTrack()));
+  ASSERT_TRUE(caller_video_sender->SetTrack(caller()->CreateLocalVideoTrack()));
+  ASSERT_TRUE(callee_audio_sender->SetTrack(callee()->CreateLocalAudioTrack()));
+  ASSERT_TRUE(callee_video_sender->SetTrack(callee()->CreateLocalVideoTrack()));
+  MediaExpectations media_expectations;
+  media_expectations.ExpectBidirectionalAudioAndVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
 }
 
 // This test verifies that a remote video track can be added via AddStream,
 // and sent end-to-end. For this particular test, it's simply echoed back
 // from the caller to the callee, rather than being forwarded to a third
 // PeerConnection.
-TEST_F(PeerConnectionIntegrationTest, CanSendRemoteVideoTrack) {
+TEST_F(PeerConnectionIntegrationTestPlanB, CanSendRemoteVideoTrack) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
   // Just send a video track from the caller.
@@ -3244,9 +3587,9 @@
   callee()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kMaxWaitForActivationMs);
 
-  int expected_caller_received_video_frames = kDefaultExpectedVideoFrameCount;
-  ExpectNewFramesReceivedWithWait(0, expected_caller_received_video_frames, 0,
-                                  0, kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.ExpectBidirectionalVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
 }
 
 // Test that we achieve the expected end-to-end connection time, using a
@@ -3262,7 +3605,7 @@
 // 2. 9 media hops: Rest of the DTLS handshake. 3 hops in each direction when
 //                  using TURN<->TURN pair, and DTLS exchange is 4 packets,
 //                  the first of which should have arrived before the answer.
-TEST_F(PeerConnectionIntegrationTest, EndToEndConnectionTimeWithTurnTurnPair) {
+TEST_P(PeerConnectionIntegrationTest, EndToEndConnectionTimeWithTurnTurnPair) {
   rtc::ScopedFakeClock fake_clock;
   // Some things use a time of "0" as a special value, so we need to start out
   // the fake clock at a nonzero time.
@@ -3345,8 +3688,7 @@
 // Verify that a TurnCustomizer passed in through RTCConfiguration
 // is actually used by the underlying TURN candidate pair.
 // Note that turnport_unittest.cc contains more detailed, lower-level tests.
-TEST_F(PeerConnectionIntegrationTest,           \
-       TurnCustomizerUsedForTurnConnections) {
+TEST_P(PeerConnectionIntegrationTest, TurnCustomizerUsedForTurnConnections) {
   static const rtc::SocketAddress turn_server_1_internal_address{"88.88.88.0",
                                                                  3478};
   static const rtc::SocketAddress turn_server_1_external_address{"88.88.88.1",
@@ -3415,7 +3757,7 @@
 // In the past, this has regressed and caused crashes/black video, due to the
 // fact that code at some layers was doing case-insensitive comparisons and
 // code at other layers was not.
-TEST_F(PeerConnectionIntegrationTest, CodecNamesAreCaseInsensitive) {
+TEST_P(PeerConnectionIntegrationTest, CodecNamesAreCaseInsensitive) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
   caller()->AddAudioVideoTracks();
@@ -3455,20 +3797,21 @@
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
 
   // Verify frames are still received end-to-end.
-  ExpectNewFramesReceivedWithWait(
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.ExpectBidirectionalAudioAndVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
 }
 
-TEST_F(PeerConnectionIntegrationTest, GetSources) {
+TEST_P(PeerConnectionIntegrationTest, GetSources) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
   caller()->AddAudioTrack();
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
   // Wait for one audio frame to be received by the callee.
-  ExpectNewFramesReceivedWithWait(0, 0, 1, 0, kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.CalleeExpectsSomeAudio(1);
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
   ASSERT_GT(callee()->pc()->GetReceivers().size(), 0u);
   auto receiver = callee()->pc()->GetReceivers()[0];
   ASSERT_EQ(receiver->media_type(), cricket::MEDIA_TYPE_AUDIO);
@@ -3482,7 +3825,11 @@
 // Test that if a track is removed and added again with a different stream ID,
 // the new stream ID is successfully communicated in SDP and media continues to
 // flow end-to-end.
-TEST_F(PeerConnectionIntegrationTest, RemoveAndAddTrackWithNewStreamId) {
+// TODO(webrtc.bugs.org/8734): This test does not work for Unified Plan because
+// it will not reuse a transceiver that has already been sending. After creating
+// a new transceiver it tries to create an offer with two senders of the same
+// track ids and it fails.
+TEST_F(PeerConnectionIntegrationTestPlanB, RemoveAndAddTrackWithNewStreamId) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
 
@@ -3498,20 +3845,25 @@
       caller()->pc()->AddTrack(track, {stream_1.get()});
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
-  // Wait for one audio frame to be received by the callee.
-  ExpectNewFramesReceivedWithWait(0, 0, 1, 0, kMaxWaitForFramesMs);
-
+  {
+    MediaExpectations media_expectations;
+    media_expectations.CalleeExpectsSomeAudio(1);
+    ASSERT_TRUE(ExpectNewFrames(media_expectations));
+  }
   // Remove the sender, and create a new one with the new stream.
   caller()->pc()->RemoveTrack(sender);
   sender = caller()->pc()->AddTrack(track, {stream_2.get()});
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
   // Wait for additional audio frames to be received by the callee.
-  ExpectNewFramesReceivedWithWait(0, 0, kDefaultExpectedAudioFrameCount, 0,
-                                  kMaxWaitForFramesMs);
+  {
+    MediaExpectations media_expectations;
+    media_expectations.CalleeExpectsSomeAudio();
+    ASSERT_TRUE(ExpectNewFrames(media_expectations));
+  }
 }
 
-TEST_F(PeerConnectionIntegrationTest, RtcEventLogOutputWriteCalled) {
+TEST_P(PeerConnectionIntegrationTest, RtcEventLogOutputWriteCalled) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
 
@@ -3530,7 +3882,7 @@
 // Test that if candidates are only signaled by applying full session
 // descriptions (instead of using AddIceCandidate), the peers can connect to
 // each other and exchange media.
-TEST_F(PeerConnectionIntegrationTest, MediaFlowsWhenCandidatesSetOnlyInSdp) {
+TEST_P(PeerConnectionIntegrationTest, MediaFlowsWhenCandidatesSetOnlyInSdp) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   // Each side will signal the session descriptions but not candidates.
   ConnectFakeSignalingForSdpOnly();
@@ -3553,17 +3905,16 @@
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
 
   // Ensure that media flows in both directions.
-  ExpectNewFramesReceivedWithWait(
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.ExpectBidirectionalAudioAndVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
 }
 
 // Test that SetAudioPlayout can be used to disable audio playout from the
 // start, then later enable it. This may be useful, for example, if the caller
 // needs to play a local ringtone until some event occurs, after which it
 // switches to playing the received audio.
-TEST_F(PeerConnectionIntegrationTest, DisableAndEnableAudioPlayout) {
+TEST_P(PeerConnectionIntegrationTest, DisableAndEnableAudioPlayout) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
 
@@ -3585,9 +3936,9 @@
 
   // Enable playout again, and ensure audio starts flowing.
   caller()->pc()->SetAudioPlayout(true);
-  ExpectNewFramesReceivedWithWait(kDefaultExpectedAudioFrameCount, 0,
-                                  kDefaultExpectedAudioFrameCount, 0,
-                                  kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.ExpectBidirectionalAudio();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
 }
 
 double GetAudioEnergyStat(PeerConnectionWrapper* pc) {
@@ -3611,7 +3962,7 @@
 
 // Test that if audio playout is disabled via the SetAudioPlayout() method, then
 // incoming audio is still processed and statistics are generated.
-TEST_F(PeerConnectionIntegrationTest,
+TEST_P(PeerConnectionIntegrationTest,
        DisableAudioPlayoutStillGeneratesAudioStats) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
@@ -3633,7 +3984,7 @@
 // start, then later enable it. This may be useful, for example, if the caller
 // wants to ensure that no audio resources are active before a certain state
 // is reached.
-TEST_F(PeerConnectionIntegrationTest, DisableAndEnableAudioRecording) {
+TEST_P(PeerConnectionIntegrationTest, DisableAndEnableAudioRecording) {
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
 
@@ -3655,14 +4006,14 @@
 
   // Enable audio recording again, and ensure audio starts flowing.
   caller()->pc()->SetAudioRecording(true);
-  ExpectNewFramesReceivedWithWait(kDefaultExpectedAudioFrameCount, 0,
-                                  kDefaultExpectedAudioFrameCount, 0,
-                                  kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.ExpectBidirectionalAudio();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
 }
 
 // Test that after closing PeerConnections, they stop sending any packets (ICE,
 // DTLS, RTP...).
-TEST_F(PeerConnectionIntegrationTest, ClosingConnectionStopsPacketFlow) {
+TEST_P(PeerConnectionIntegrationTest, ClosingConnectionStopsPacketFlow) {
   // Set up audio/video/data, wait for some frames to be received.
   ASSERT_TRUE(CreatePeerConnectionWrappers());
   ConnectFakeSignaling();
@@ -3672,9 +4023,9 @@
 #endif
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
-  ExpectNewFramesReceivedWithWait(0, 0, kDefaultExpectedAudioFrameCount,
-                                  kDefaultExpectedAudioFrameCount,
-                                  kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.CalleeExpectsSomeAudioAndVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
   // Close PeerConnections.
   caller()->pc()->Close();
   callee()->pc()->Close();
@@ -3685,32 +4036,24 @@
   EXPECT_EQ(sent_packets_a, sent_packets_b);
 }
 
-// Test that a basic 1 audio and 1 video track call works when Unified Plan
-// semantics configured for both sides.
-TEST_F(PeerConnectionIntegrationTest, UnifiedPlanMediaFlows) {
-  PeerConnectionInterface::RTCConfiguration config;
-  config.sdp_semantics = SdpSemantics::kUnifiedPlan;
-  ASSERT_TRUE(CreatePeerConnectionWrappersWithConfig(config, config));
-  ConnectFakeSignaling();
-  caller()->AddAudioVideoTracks();
-  callee()->AddAudioVideoTracks();
-  caller()->CreateAndSetAndSignalOffer();
-  ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
-  ExpectNewFramesReceivedWithWait(
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kMaxWaitForFramesMs);
-}
+INSTANTIATE_TEST_CASE_P(PeerConnectionIntegrationTest,
+                        PeerConnectionIntegrationTest,
+                        Values(SdpSemantics::kPlanB,
+                               SdpSemantics::kUnifiedPlan));
 
 // Tests that verify interoperability between Plan B and Unified Plan
 // PeerConnections.
 class PeerConnectionIntegrationInteropTest
-    : public PeerConnectionIntegrationTest,
+    : public PeerConnectionIntegrationBaseTest,
       public ::testing::WithParamInterface<
           std::tuple<SdpSemantics, SdpSemantics>> {
  protected:
+  // Setting the SdpSemantics for the base test to kDefault does not matter
+  // because we specify not to use the test semantics when creating
+  // PeerConnectionWrappers.
   PeerConnectionIntegrationInteropTest()
-      : caller_semantics_(std::get<0>(GetParam())),
+      : PeerConnectionIntegrationBaseTest(SdpSemantics::kDefault),
+        caller_semantics_(std::get<0>(GetParam())),
         callee_semantics_(std::get<1>(GetParam())) {}
 
   bool CreatePeerConnectionWrappersWithSemantics() {
@@ -3748,9 +4091,9 @@
   EXPECT_EQ(cricket::MEDIA_TYPE_AUDIO, receivers[0]->media_type());
   EXPECT_EQ(receivers[0]->track()->id(), audio_sender->track()->id());
 
-  // Expect to receive only audio frames on the callee.
-  ExpectNewFramesReceivedWithWait(0, 0, kDefaultExpectedAudioFrameCount, 0,
-                                  kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.CalleeExpectsSomeAudio();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
 }
 
 TEST_P(PeerConnectionIntegrationInteropTest, OneAudioOneVideoToNoMediaRemote) {
@@ -3773,10 +4116,9 @@
   ASSERT_EQ(1u, video_receivers.size());
   EXPECT_EQ(video_receivers[0]->track()->id(), video_sender->track()->id());
 
-  // Expect to receive audio and video frames only on the callee.
-  ExpectNewFramesReceivedWithWait(0, 0, kDefaultExpectedAudioFrameCount,
-                                  kDefaultExpectedVideoFrameCount,
-                                  kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.CalleeExpectsSomeAudioAndVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
 }
 
 TEST_P(PeerConnectionIntegrationInteropTest,
@@ -3789,10 +4131,9 @@
   caller()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
 
-  ExpectNewFramesReceivedWithWait(
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kDefaultExpectedAudioFrameCount, kDefaultExpectedVideoFrameCount,
-      kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.ExpectBidirectionalAudioAndVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
 }
 
 TEST_P(PeerConnectionIntegrationInteropTest,
@@ -3813,11 +4154,10 @@
   callee()->CreateAndSetAndSignalOffer();
   ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
 
-  // Expect to receive audio frames on the callee and video frames on the
-  // caller.
-  ExpectNewFramesReceivedWithWait(0, kDefaultExpectedVideoFrameCount,
-                                  kDefaultExpectedAudioFrameCount, 0,
-                                  kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.CallerExpectsSomeVideo();
+  media_expectations.CalleeExpectsSomeAudio();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
 }
 
 // Test that if one side offers two video tracks then the other side will only
@@ -3838,10 +4178,9 @@
   EXPECT_TRUE(receivers[0]->track()->enabled());
   EXPECT_EQ(first_sender->track()->id(), receivers[0]->track()->id());
 
-  // Expect to receive video frames from the one track.
-  ExpectNewFramesReceivedWithWait(0, 0,
-                                  0, kDefaultExpectedVideoFrameCount,
-                                  kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.CalleeExpectsSomeVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
 }
 
 // Test that in the multi-track case each endpoint only looks at the first track
@@ -3893,9 +4232,9 @@
   }
 
   // Should be exchanging video frames for the first tracks on each endpoint.
-  ExpectNewFramesReceivedWithWait(0, kDefaultExpectedVideoFrameCount, 0,
-                                  kDefaultExpectedVideoFrameCount,
-                                  kMaxWaitForFramesMs);
+  MediaExpectations media_expectations;
+  media_expectations.ExpectBidirectionalVideo();
+  ASSERT_TRUE(ExpectNewFrames(media_expectations));
 }
 
 INSTANTIATE_TEST_CASE_P(