Don't allow changing ICE pool size after SetLocalDescription.

This was the decision at IETF 97
(see: https://github.com/rtcweb-wg/jsep/issues/381). It's simpler to not
allow this (since there's no real need for it) rather than try to decide
complex rules for it.

BUG=webrtc:6864

Review-Url: https://codereview.webrtc.org/2566833002
Cr-Commit-Position: refs/heads/master@{#15559}
diff --git a/webrtc/api/peerconnection.cc b/webrtc/api/peerconnection.cc
index 692ca2e..05a940e 100644
--- a/webrtc/api/peerconnection.cc
+++ b/webrtc/api/peerconnection.cc
@@ -1288,6 +1288,14 @@
 
 bool PeerConnection::SetConfiguration(const RTCConfiguration& configuration) {
   TRACE_EVENT0("webrtc", "PeerConnection::SetConfiguration");
+
+  if (session_->local_description() &&
+      configuration.ice_candidate_pool_size !=
+          configuration_.ice_candidate_pool_size) {
+    LOG(LS_ERROR) << "Can't change candidate pool size after calling "
+                     "SetLocalDescription.";
+    return false;
+  }
   // TODO(deadbeef): Return false and log an error if there are any unsupported
   // modifications.
   if (port_allocator_) {
@@ -1295,6 +1303,7 @@
             RTC_FROM_HERE,
             rtc::Bind(&PeerConnection::ReconfigurePortAllocator_n, this,
                       configuration))) {
+      LOG(LS_ERROR) << "Failed to apply configuration to PortAllocator.";
       return false;
     }
   }
@@ -2386,10 +2395,9 @@
       ConvertIceTransportTypeToCandidateFilter(configuration.type));
   // Call this last since it may create pooled allocator sessions using the
   // candidate filter set above.
-  port_allocator_->SetConfiguration(stun_servers, turn_servers,
-                                    configuration.ice_candidate_pool_size,
-                                    configuration.prune_turn_ports);
-  return true;
+  return port_allocator_->SetConfiguration(
+      stun_servers, turn_servers, configuration.ice_candidate_pool_size,
+      configuration.prune_turn_ports);
 }
 
 bool PeerConnection::StartRtcEventLog_w(rtc::PlatformFile file,
diff --git a/webrtc/api/peerconnectioninterface_unittest.cc b/webrtc/api/peerconnectioninterface_unittest.cc
index 1fb3a5f..7b33c0c 100644
--- a/webrtc/api/peerconnectioninterface_unittest.cc
+++ b/webrtc/api/peerconnectioninterface_unittest.cc
@@ -2203,6 +2203,26 @@
   EXPECT_EQ(1UL, session->stun_servers().size());
 }
 
+// Test that after SetLocalDescription, changing the pool size is not allowed.
+TEST_F(PeerConnectionInterfaceTest,
+       CantChangePoolSizeAfterSetLocalDescription) {
+  CreatePeerConnection();
+  // Start by setting a size of 1.
+  PeerConnectionInterface::RTCConfiguration config;
+  config.ice_candidate_pool_size = 1;
+  EXPECT_TRUE(pc_->SetConfiguration(config));
+
+  // Set remote offer; can still change pool size at this point.
+  CreateOfferAsRemoteDescription();
+  config.ice_candidate_pool_size = 2;
+  EXPECT_TRUE(pc_->SetConfiguration(config));
+
+  // Set local answer; now it's too late.
+  CreateAnswerAsLocalDescription();
+  config.ice_candidate_pool_size = 3;
+  EXPECT_FALSE(pc_->SetConfiguration(config));
+}
+
 // Test that PeerConnection::Close changes the states to closed and all remote
 // tracks change state to ended.
 TEST_F(PeerConnectionInterfaceTest, CloseAndTestStreamsAndStates) {
diff --git a/webrtc/p2p/base/portallocator.cc b/webrtc/p2p/base/portallocator.cc
index e71582a..dc4166a 100644
--- a/webrtc/p2p/base/portallocator.cc
+++ b/webrtc/p2p/base/portallocator.cc
@@ -29,7 +29,7 @@
   RTC_DCHECK(ice_ufrag.empty() == ice_pwd.empty());
 }
 
-void PortAllocator::SetConfiguration(
+bool PortAllocator::SetConfiguration(
     const ServerAddresses& stun_servers,
     const std::vector<RelayServerConfig>& turn_servers,
     int candidate_pool_size,
@@ -40,31 +40,48 @@
   turn_servers_ = turn_servers;
   prune_turn_ports_ = prune_turn_ports;
 
+  bool candidate_pool_drain_began =
+      static_cast<int>(pooled_sessions_.size()) != candidate_pool_size_;
+  if (candidate_pool_drain_began &&
+      candidate_pool_size != candidate_pool_size_) {
+    LOG(LS_ERROR) << "Trying to change candidate pool size after pool started "
+                     "to be drained.";
+    return false;
+  }
+  if (candidate_pool_size < 0) {
+    LOG(LS_ERROR) << "Can't set negative pool size.";
+    return false;
+  }
+  candidate_pool_size_ = candidate_pool_size;
+
+  // If sessions need to be recreated, only recreate as many as the current
+  // pool size if the pool has begun to be drained.
+  int sessions_needed = candidate_pool_drain_began
+                            ? static_cast<int>(pooled_sessions_.size())
+                            : candidate_pool_size_;
+
   // If ICE servers changed, throw away any existing pooled sessions and create
   // new ones.
   if (ice_servers_changed) {
     pooled_sessions_.clear();
-    allocated_pooled_session_count_ = 0;
   }
 
-  // If |size| is less than the number of allocated sessions, get rid of the
-  // extras.
-  while (allocated_pooled_session_count_ > candidate_pool_size &&
-         !pooled_sessions_.empty()) {
+  // If |sessions_needed| is less than the number of pooled sessions, get rid
+  // of the extras.
+  while (sessions_needed < static_cast<int>(pooled_sessions_.size())) {
     pooled_sessions_.front().reset(nullptr);
     pooled_sessions_.pop_front();
-    --allocated_pooled_session_count_;
   }
-  // If |size| is greater than the number of allocated sessions, create new
-  // sessions.
-  while (allocated_pooled_session_count_ < candidate_pool_size) {
+
+  // If |sessions_needed| is greater than the number of pooled sessions,
+  // create new sessions.
+  while (static_cast<int>(pooled_sessions_.size()) < sessions_needed) {
     PortAllocatorSession* pooled_session = CreateSessionInternal("", 0, "", "");
     pooled_session->StartGettingPorts();
     pooled_sessions_.push_back(
         std::unique_ptr<PortAllocatorSession>(pooled_session));
-    ++allocated_pooled_session_count_;
   }
-  target_pooled_session_count_ = candidate_pool_size;
+  return true;
 }
 
 std::unique_ptr<PortAllocatorSession> PortAllocator::CreateSession(
diff --git a/webrtc/p2p/base/portallocator.h b/webrtc/p2p/base/portallocator.h
index 3c81068..a61d642 100644
--- a/webrtc/p2p/base/portallocator.h
+++ b/webrtc/p2p/base/portallocator.h
@@ -298,7 +298,9 @@
   //
   // If the servers are not changing but the candidate pool size is,
   // pooled sessions will be either created or destroyed as necessary.
-  void SetConfiguration(const ServerAddresses& stun_servers,
+  //
+  // Returns true if the configuration could successfully be changed.
+  bool SetConfiguration(const ServerAddresses& stun_servers,
                         const std::vector<RelayServerConfig>& turn_servers,
                         int candidate_pool_size,
                         bool prune_turn_ports);
@@ -309,7 +311,7 @@
     return turn_servers_;
   }
 
-  int candidate_pool_size() const { return target_pooled_session_count_; }
+  int candidate_pool_size() const { return candidate_pool_size_; }
 
   // Sets the network types to ignore.
   // Values are defined by the AdapterType enum.
@@ -412,11 +414,7 @@
  private:
   ServerAddresses stun_servers_;
   std::vector<RelayServerConfig> turn_servers_;
-  // The last size passed into SetConfiguration.
-  int target_pooled_session_count_ = 0;
-  // This variable represents the total number of pooled sessions
-  // both owned by this class and taken by TakePooledSession.
-  int allocated_pooled_session_count_ = 0;
+  int candidate_pool_size_ = 0;  // Last value passed into SetConfiguration.
   std::deque<std::unique_ptr<PortAllocatorSession>> pooled_sessions_;
   bool prune_turn_ports_ = false;
 
diff --git a/webrtc/p2p/base/portallocator_unittest.cc b/webrtc/p2p/base/portallocator_unittest.cc
index a97cf30..74e2a3d 100644
--- a/webrtc/p2p/base/portallocator_unittest.cc
+++ b/webrtc/p2p/base/portallocator_unittest.cc
@@ -32,9 +32,15 @@
 
  protected:
   void SetConfigurationWithPoolSize(int candidate_pool_size) {
-    allocator_->SetConfiguration(cricket::ServerAddresses(),
-                                 std::vector<cricket::RelayServerConfig>(),
-                                 candidate_pool_size, false);
+    EXPECT_TRUE(allocator_->SetConfiguration(
+        cricket::ServerAddresses(), std::vector<cricket::RelayServerConfig>(),
+        candidate_pool_size, false));
+  }
+
+  void SetConfigurationWithPoolSizeExpectFailure(int candidate_pool_size) {
+    EXPECT_FALSE(allocator_->SetConfiguration(
+        cricket::ServerAddresses(), std::vector<cricket::RelayServerConfig>(),
+        candidate_pool_size, false));
   }
 
   std::unique_ptr<cricket::FakePortAllocatorSession> CreateSession(
@@ -104,14 +110,16 @@
 TEST_F(PortAllocatorTest, SetConfigurationUpdatesIceServers) {
   cricket::ServerAddresses stun_servers_1 = {stun_server_1};
   std::vector<cricket::RelayServerConfig> turn_servers_1 = {turn_server_1};
-  allocator_->SetConfiguration(stun_servers_1, turn_servers_1, 0, false);
+  EXPECT_TRUE(
+      allocator_->SetConfiguration(stun_servers_1, turn_servers_1, 0, false));
   EXPECT_EQ(stun_servers_1, allocator_->stun_servers());
   EXPECT_EQ(turn_servers_1, allocator_->turn_servers());
 
   // Update with a different set of servers.
   cricket::ServerAddresses stun_servers_2 = {stun_server_2};
   std::vector<cricket::RelayServerConfig> turn_servers_2 = {turn_server_2};
-  allocator_->SetConfiguration(stun_servers_2, turn_servers_2, 0, false);
+  EXPECT_TRUE(
+      allocator_->SetConfiguration(stun_servers_2, turn_servers_2, 0, false));
   EXPECT_EQ(stun_servers_2, allocator_->stun_servers());
   EXPECT_EQ(turn_servers_2, allocator_->turn_servers());
 }
@@ -128,9 +136,8 @@
 }
 
 // A negative pool size should just be treated as zero.
-TEST_F(PortAllocatorTest, SetConfigurationWithNegativePoolSizeDoesntCrash) {
-  SetConfigurationWithPoolSize(-1);
-  // No asserts; we're just testing that this doesn't crash.
+TEST_F(PortAllocatorTest, SetConfigurationWithNegativePoolSizeFails) {
+  SetConfigurationWithPoolSizeExpectFailure(-1);
 }
 
 // Test that if the candidate pool size is nonzero, pooled sessions are
@@ -162,18 +169,16 @@
   EXPECT_EQ(1, GetAllPooledSessionsReturnCount());
 }
 
-// Test that if the candidate pool size is reduced and increased, but reducing
-// didn't actually destroy any sessions (because they were already given away),
-// increasing the size to its initial value doesn't create a new session.
-TEST_F(PortAllocatorTest, SetConfigurationDoesntCreateExtraSessions) {
+// Test that after the pool starts to be drained, changing the pool size is not
+// allowed.
+TEST_F(PortAllocatorTest, CantChangePoolSizeAfterTakePooledSession) {
   SetConfigurationWithPoolSize(1);
   TakePooledSession();
-  SetConfigurationWithPoolSize(0);
-  SetConfigurationWithPoolSize(1);
-  EXPECT_EQ(0, GetAllPooledSessionsReturnCount());
+  SetConfigurationWithPoolSizeExpectFailure(2);
+  SetConfigurationWithPoolSizeExpectFailure(0);
 }
 
-// According to JSEP, exising pooled sessions should be destroyed and new
+// According to JSEP, existing pooled sessions should be destroyed and new
 // ones created when the ICE servers change.
 TEST_F(PortAllocatorTest,
        SetConfigurationRecreatesPooledSessionsWhenIceServersChange) {