Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2020 The WebRTC project authors. All Rights Reserved. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license |
| 5 | * that can be found in the LICENSE file in the root of the source |
| 6 | * tree. An additional intellectual property rights grant can be found |
| 7 | * in the file PATENTS. All contributing project authors may |
| 8 | * be found in the AUTHORS file in the root of the source tree. |
| 9 | */ |
| 10 | |
| 11 | #include "pc/sctp_data_channel.h" |
| 12 | |
Florent Castelli | 5183f00 | 2021-05-07 11:52:44 | [diff] [blame] | 13 | #include <limits> |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 14 | #include <memory> |
| 15 | #include <string> |
| 16 | #include <utility> |
| 17 | |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 18 | #include "media/sctp/sctp_transport_internal.h" |
Markus Handell | a1b8201 | 2021-05-26 16:56:30 | [diff] [blame] | 19 | #include "pc/proxy.h" |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 20 | #include "rtc_base/checks.h" |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 21 | #include "rtc_base/logging.h" |
Florent Castelli | dcb9ffc | 2021-06-29 12:58:23 | [diff] [blame] | 22 | #include "rtc_base/system/unused.h" |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 23 | #include "rtc_base/thread.h" |
| 24 | |
| 25 | namespace webrtc { |
| 26 | |
| 27 | namespace { |
| 28 | |
| 29 | static size_t kMaxQueuedReceivedDataBytes = 16 * 1024 * 1024; |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 30 | |
| 31 | static std::atomic<int> g_unique_id{0}; |
| 32 | |
| 33 | int GenerateUniqueId() { |
| 34 | return ++g_unique_id; |
| 35 | } |
| 36 | |
| 37 | // Define proxy for DataChannelInterface. |
Tommi | 55f7280 | 2023-03-27 10:39:33 | [diff] [blame] | 38 | BEGIN_PROXY_MAP(DataChannel) |
Mirko Bonadei | 9d9b8de | 2021-02-26 08:51:26 | [diff] [blame] | 39 | PROXY_PRIMARY_THREAD_DESTRUCTOR() |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 40 | BYPASS_PROXY_METHOD1(void, RegisterObserver, DataChannelObserver*) |
| 41 | BYPASS_PROXY_METHOD0(void, UnregisterObserver) |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 42 | BYPASS_PROXY_CONSTMETHOD0(std::string, label) |
| 43 | BYPASS_PROXY_CONSTMETHOD0(bool, reliable) |
| 44 | BYPASS_PROXY_CONSTMETHOD0(bool, ordered) |
| 45 | BYPASS_PROXY_CONSTMETHOD0(uint16_t, maxRetransmitTime) |
| 46 | BYPASS_PROXY_CONSTMETHOD0(uint16_t, maxRetransmits) |
| 47 | BYPASS_PROXY_CONSTMETHOD0(absl::optional<int>, maxRetransmitsOpt) |
| 48 | BYPASS_PROXY_CONSTMETHOD0(absl::optional<int>, maxPacketLifeTime) |
| 49 | BYPASS_PROXY_CONSTMETHOD0(std::string, protocol) |
| 50 | BYPASS_PROXY_CONSTMETHOD0(bool, negotiated) |
| 51 | // Can't bypass the proxy since the id may change. |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 52 | PROXY_SECONDARY_CONSTMETHOD0(int, id) |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 53 | BYPASS_PROXY_CONSTMETHOD0(Priority, priority) |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 54 | BYPASS_PROXY_CONSTMETHOD0(DataState, state) |
| 55 | BYPASS_PROXY_CONSTMETHOD0(RTCError, error) |
| 56 | PROXY_SECONDARY_CONSTMETHOD0(uint32_t, messages_sent) |
| 57 | PROXY_SECONDARY_CONSTMETHOD0(uint64_t, bytes_sent) |
| 58 | PROXY_SECONDARY_CONSTMETHOD0(uint32_t, messages_received) |
| 59 | PROXY_SECONDARY_CONSTMETHOD0(uint64_t, bytes_received) |
| 60 | PROXY_SECONDARY_CONSTMETHOD0(uint64_t, buffered_amount) |
| 61 | PROXY_SECONDARY_METHOD0(void, Close) |
| 62 | PROXY_SECONDARY_METHOD1(bool, Send, const DataBuffer&) |
Tommi | a50a81a | 2023-04-11 15:32:34 | [diff] [blame] | 63 | BYPASS_PROXY_METHOD2(void, |
| 64 | SendAsync, |
| 65 | DataBuffer, |
| 66 | absl::AnyInvocable<void(RTCError) &&>) |
Markus Handell | 3d46d0b | 2021-05-27 19:42:57 | [diff] [blame] | 67 | END_PROXY_MAP(DataChannel) |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 68 | } // namespace |
| 69 | |
| 70 | InternalDataChannelInit::InternalDataChannelInit(const DataChannelInit& base) |
| 71 | : DataChannelInit(base), open_handshake_role(kOpener) { |
| 72 | // If the channel is externally negotiated, do not send the OPEN message. |
| 73 | if (base.negotiated) { |
| 74 | open_handshake_role = kNone; |
| 75 | } else { |
| 76 | // Datachannel is externally negotiated. Ignore the id value. |
| 77 | // Specified in createDataChannel, WebRTC spec section 6.1 bullet 13. |
| 78 | id = -1; |
| 79 | } |
Florent Castelli | 5183f00 | 2021-05-07 11:52:44 | [diff] [blame] | 80 | // Backwards compatibility: If maxRetransmits or maxRetransmitTime |
| 81 | // are negative, the feature is not enabled. |
| 82 | // Values are clamped to a 16bit range. |
| 83 | if (maxRetransmits) { |
| 84 | if (*maxRetransmits < 0) { |
| 85 | RTC_LOG(LS_ERROR) |
| 86 | << "Accepting maxRetransmits < 0 for backwards compatibility"; |
| 87 | maxRetransmits = absl::nullopt; |
| 88 | } else if (*maxRetransmits > std::numeric_limits<uint16_t>::max()) { |
| 89 | maxRetransmits = std::numeric_limits<uint16_t>::max(); |
| 90 | } |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 91 | } |
Florent Castelli | 5183f00 | 2021-05-07 11:52:44 | [diff] [blame] | 92 | |
| 93 | if (maxRetransmitTime) { |
| 94 | if (*maxRetransmitTime < 0) { |
| 95 | RTC_LOG(LS_ERROR) |
| 96 | << "Accepting maxRetransmitTime < 0 for backwards compatibility"; |
| 97 | maxRetransmitTime = absl::nullopt; |
| 98 | } else if (*maxRetransmitTime > std::numeric_limits<uint16_t>::max()) { |
| 99 | maxRetransmitTime = std::numeric_limits<uint16_t>::max(); |
| 100 | } |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 101 | } |
| 102 | } |
| 103 | |
Tommi | c2429a0 | 2023-03-03 10:28:23 | [diff] [blame] | 104 | bool InternalDataChannelInit::IsValid() const { |
| 105 | if (id < -1) |
| 106 | return false; |
| 107 | |
| 108 | if (maxRetransmits.has_value() && maxRetransmits.value() < 0) |
| 109 | return false; |
| 110 | |
| 111 | if (maxRetransmitTime.has_value() && maxRetransmitTime.value() < 0) |
| 112 | return false; |
| 113 | |
| 114 | // Only one of these can be set. |
| 115 | if (maxRetransmits.has_value() && maxRetransmitTime.has_value()) |
| 116 | return false; |
| 117 | |
| 118 | return true; |
| 119 | } |
| 120 | |
Tommi | 8efaec6 | 2023-03-21 17:45:24 | [diff] [blame] | 121 | StreamId SctpSidAllocator::AllocateSid(rtc::SSLRole role) { |
| 122 | RTC_DCHECK_RUN_ON(&sequence_checker_); |
| 123 | int potential_sid = (role == rtc::SSL_CLIENT) ? 0 : 1; |
| 124 | while (potential_sid <= static_cast<int>(cricket::kMaxSctpSid)) { |
| 125 | StreamId sid(potential_sid); |
| 126 | if (used_sids_.insert(sid).second) |
| 127 | return sid; |
| 128 | potential_sid += 2; |
| 129 | } |
| 130 | RTC_LOG(LS_ERROR) << "SCTP sid allocation pool exhausted."; |
| 131 | return StreamId(); |
| 132 | } |
| 133 | |
| 134 | bool SctpSidAllocator::ReserveSid(StreamId sid) { |
| 135 | RTC_DCHECK_RUN_ON(&sequence_checker_); |
Tommi | 492296c | 2023-03-12 15:59:25 | [diff] [blame] | 136 | if (!sid.HasValue() || sid.stream_id_int() > cricket::kMaxSctpSid) |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 137 | return false; |
Tommi | 492296c | 2023-03-12 15:59:25 | [diff] [blame] | 138 | return used_sids_.insert(sid).second; |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 139 | } |
| 140 | |
Tommi | 8efaec6 | 2023-03-21 17:45:24 | [diff] [blame] | 141 | void SctpSidAllocator::ReleaseSid(StreamId sid) { |
| 142 | RTC_DCHECK_RUN_ON(&sequence_checker_); |
Tommi | 492296c | 2023-03-12 15:59:25 | [diff] [blame] | 143 | used_sids_.erase(sid); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 144 | } |
| 145 | |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 146 | // A DataChannelObserver implementation that offers backwards compatibility with |
| 147 | // implementations that aren't yet ready to be called back on the network |
| 148 | // thread. This implementation posts events to the signaling thread where |
| 149 | // events are delivered. |
| 150 | // In the class, and together with the `SctpDataChannel` implementation, there's |
| 151 | // special handling for the `state()` property whereby if that property is |
| 152 | // queried on the channel object while inside an event callback, we return |
| 153 | // the state that was active at the time the event was issued. This is to avoid |
| 154 | // a problem with calling the `state()` getter on the proxy, which would do |
| 155 | // a blocking call to the network thread, effectively flushing operations on |
| 156 | // the network thread that could cause the state to change and eventually return |
| 157 | // a misleading or arguably, wrong, state value to the callback implementation. |
| 158 | // As a future improvement to the ObserverAdapter, we could do the same for |
| 159 | // other properties that need to be read on the network thread. Eventually |
| 160 | // all implementations should expect to be called on the network thread though |
| 161 | // and the ObserverAdapter no longer be necessary. |
| 162 | class SctpDataChannel::ObserverAdapter : public DataChannelObserver { |
| 163 | public: |
| 164 | explicit ObserverAdapter( |
| 165 | SctpDataChannel* channel, |
| 166 | rtc::scoped_refptr<PendingTaskSafetyFlag> signaling_safety) |
| 167 | : channel_(channel), signaling_safety_(std::move(signaling_safety)) {} |
| 168 | |
| 169 | bool IsInsideCallback() const { |
| 170 | RTC_DCHECK_RUN_ON(signaling_thread()); |
| 171 | return cached_getters_ != nullptr; |
| 172 | } |
| 173 | |
| 174 | DataChannelInterface::DataState cached_state() const { |
| 175 | RTC_DCHECK_RUN_ON(signaling_thread()); |
| 176 | RTC_DCHECK(IsInsideCallback()); |
| 177 | return cached_getters_->state(); |
| 178 | } |
| 179 | |
| 180 | RTCError cached_error() const { |
| 181 | RTC_DCHECK_RUN_ON(signaling_thread()); |
| 182 | RTC_DCHECK(IsInsideCallback()); |
| 183 | return cached_getters_->error(); |
| 184 | } |
| 185 | |
| 186 | void SetDelegate(DataChannelObserver* delegate) { |
| 187 | RTC_DCHECK_RUN_ON(signaling_thread()); |
| 188 | delegate_ = delegate; |
| 189 | safety_.reset(PendingTaskSafetyFlag::CreateDetached()); |
| 190 | } |
| 191 | |
| 192 | static void DeleteOnSignalingThread( |
| 193 | std::unique_ptr<ObserverAdapter> observer) { |
| 194 | auto* signaling_thread = observer->signaling_thread(); |
| 195 | if (!signaling_thread->IsCurrent()) |
| 196 | signaling_thread->PostTask([observer = std::move(observer)]() {}); |
| 197 | } |
| 198 | |
| 199 | private: |
| 200 | class CachedGetters { |
| 201 | public: |
| 202 | explicit CachedGetters(ObserverAdapter* adapter) |
| 203 | : adapter_(adapter), |
| 204 | cached_state_(adapter_->channel_->state()), |
| 205 | cached_error_(adapter_->channel_->error()) { |
| 206 | RTC_DCHECK_RUN_ON(adapter->network_thread()); |
| 207 | } |
| 208 | |
| 209 | ~CachedGetters() { |
| 210 | if (!was_dropped_) { |
| 211 | RTC_DCHECK_RUN_ON(adapter_->signaling_thread()); |
| 212 | RTC_DCHECK_EQ(adapter_->cached_getters_, this); |
| 213 | adapter_->cached_getters_ = nullptr; |
| 214 | } |
| 215 | } |
| 216 | |
| 217 | bool PrepareForCallback() { |
| 218 | RTC_DCHECK_RUN_ON(adapter_->signaling_thread()); |
| 219 | RTC_DCHECK(was_dropped_); |
| 220 | was_dropped_ = false; |
| 221 | adapter_->cached_getters_ = this; |
| 222 | return adapter_->delegate_ && adapter_->signaling_safety_->alive(); |
| 223 | } |
| 224 | |
| 225 | RTCError error() { return cached_error_; } |
| 226 | DataChannelInterface::DataState state() { return cached_state_; } |
| 227 | |
| 228 | private: |
| 229 | ObserverAdapter* const adapter_; |
| 230 | bool was_dropped_ = true; |
| 231 | const DataChannelInterface::DataState cached_state_; |
| 232 | const RTCError cached_error_; |
| 233 | }; |
| 234 | |
| 235 | void OnStateChange() override { |
| 236 | RTC_DCHECK_RUN_ON(network_thread()); |
| 237 | signaling_thread()->PostTask( |
| 238 | SafeTask(safety_.flag(), |
| 239 | [this, cached_state = std::make_unique<CachedGetters>(this)] { |
| 240 | RTC_DCHECK_RUN_ON(signaling_thread()); |
| 241 | if (cached_state->PrepareForCallback()) |
| 242 | delegate_->OnStateChange(); |
| 243 | })); |
| 244 | } |
| 245 | |
| 246 | void OnMessage(const DataBuffer& buffer) override { |
| 247 | RTC_DCHECK_RUN_ON(network_thread()); |
| 248 | signaling_thread()->PostTask(SafeTask( |
| 249 | safety_.flag(), [this, buffer = buffer, |
| 250 | cached_state = std::make_unique<CachedGetters>(this)] { |
| 251 | RTC_DCHECK_RUN_ON(signaling_thread()); |
| 252 | if (cached_state->PrepareForCallback()) |
| 253 | delegate_->OnMessage(buffer); |
| 254 | })); |
| 255 | } |
| 256 | |
| 257 | void OnBufferedAmountChange(uint64_t sent_data_size) override { |
| 258 | RTC_DCHECK_RUN_ON(network_thread()); |
| 259 | signaling_thread()->PostTask(SafeTask( |
| 260 | safety_.flag(), [this, sent_data_size, |
| 261 | cached_state = std::make_unique<CachedGetters>(this)] { |
| 262 | RTC_DCHECK_RUN_ON(signaling_thread()); |
| 263 | if (cached_state->PrepareForCallback()) |
| 264 | delegate_->OnBufferedAmountChange(sent_data_size); |
| 265 | })); |
| 266 | } |
| 267 | |
Tommi | a50a81a | 2023-04-11 15:32:34 | [diff] [blame] | 268 | bool IsOkToCallOnTheNetworkThread() override { return true; } |
| 269 | |
Tommi | 56577cc | 2023-04-07 10:04:02 | [diff] [blame] | 270 | rtc::Thread* signaling_thread() const { return signaling_thread_; } |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 271 | rtc::Thread* network_thread() const { return channel_->network_thread_; } |
| 272 | |
| 273 | DataChannelObserver* delegate_ RTC_GUARDED_BY(signaling_thread()) = nullptr; |
| 274 | SctpDataChannel* const channel_; |
Tommi | 56577cc | 2023-04-07 10:04:02 | [diff] [blame] | 275 | // Make sure to keep our own signaling_thread_ pointer to avoid dereferencing |
| 276 | // `channel_` in the `RTC_DCHECK_RUN_ON` checks on the signaling thread. |
| 277 | rtc::Thread* const signaling_thread_{channel_->signaling_thread_}; |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 278 | ScopedTaskSafety safety_; |
| 279 | rtc::scoped_refptr<PendingTaskSafetyFlag> signaling_safety_; |
| 280 | CachedGetters* cached_getters_ RTC_GUARDED_BY(signaling_thread()) = nullptr; |
| 281 | }; |
| 282 | |
Tommi | 1c0d91f | 2023-03-02 14:42:06 | [diff] [blame] | 283 | // static |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 284 | rtc::scoped_refptr<SctpDataChannel> SctpDataChannel::Create( |
Tommi | 1c0d91f | 2023-03-02 14:42:06 | [diff] [blame] | 285 | rtc::WeakPtr<SctpDataChannelControllerInterface> controller, |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 286 | const std::string& label, |
Tommi | e9aa867 | 2023-03-20 13:43:09 | [diff] [blame] | 287 | bool connected_to_transport, |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 288 | const InternalDataChannelInit& config, |
| 289 | rtc::Thread* signaling_thread, |
| 290 | rtc::Thread* network_thread) { |
Tommi | 9296a16 | 2023-03-21 15:28:52 | [diff] [blame] | 291 | RTC_DCHECK(config.IsValid()); |
| 292 | return rtc::make_ref_counted<SctpDataChannel>( |
Tommi | e9aa867 | 2023-03-20 13:43:09 | [diff] [blame] | 293 | config, std::move(controller), label, connected_to_transport, |
| 294 | signaling_thread, network_thread); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 295 | } |
| 296 | |
| 297 | // static |
| 298 | rtc::scoped_refptr<DataChannelInterface> SctpDataChannel::CreateProxy( |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 299 | rtc::scoped_refptr<SctpDataChannel> channel, |
| 300 | rtc::scoped_refptr<PendingTaskSafetyFlag> signaling_safety) { |
Tommi | 55f7280 | 2023-03-27 10:39:33 | [diff] [blame] | 301 | // Copy thread params to local variables before `std::move()`. |
Tomas Gunnarsson | 0d5ce62 | 2022-03-18 14:57:15 | [diff] [blame] | 302 | auto* signaling_thread = channel->signaling_thread_; |
Tommi | 55f7280 | 2023-03-27 10:39:33 | [diff] [blame] | 303 | auto* network_thread = channel->network_thread_; |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 304 | channel->observer_adapter_ = std::make_unique<ObserverAdapter>( |
| 305 | channel.get(), std::move(signaling_safety)); |
Tommi | 55f7280 | 2023-03-27 10:39:33 | [diff] [blame] | 306 | return DataChannelProxy::Create(signaling_thread, network_thread, |
| 307 | std::move(channel)); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 308 | } |
| 309 | |
Tommi | 1c0d91f | 2023-03-02 14:42:06 | [diff] [blame] | 310 | SctpDataChannel::SctpDataChannel( |
| 311 | const InternalDataChannelInit& config, |
| 312 | rtc::WeakPtr<SctpDataChannelControllerInterface> controller, |
| 313 | const std::string& label, |
Tommi | e9aa867 | 2023-03-20 13:43:09 | [diff] [blame] | 314 | bool connected_to_transport, |
Tommi | 1c0d91f | 2023-03-02 14:42:06 | [diff] [blame] | 315 | rtc::Thread* signaling_thread, |
| 316 | rtc::Thread* network_thread) |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 317 | : signaling_thread_(signaling_thread), |
| 318 | network_thread_(network_thread), |
Tommi | 1158bde | 2023-03-30 10:01:56 | [diff] [blame] | 319 | id_n_(config.id), |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 320 | internal_id_(GenerateUniqueId()), |
| 321 | label_(label), |
Tommi | 492296c | 2023-03-12 15:59:25 | [diff] [blame] | 322 | protocol_(config.protocol), |
| 323 | max_retransmit_time_(config.maxRetransmitTime), |
| 324 | max_retransmits_(config.maxRetransmits), |
| 325 | priority_(config.priority), |
| 326 | negotiated_(config.negotiated), |
| 327 | ordered_(config.ordered), |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 328 | observer_(nullptr), |
Tommi | a50a81a | 2023-04-11 15:32:34 | [diff] [blame] | 329 | controller_(std::move(controller)) { |
Tommi | 4f7ade5 | 2023-03-29 18:46:59 | [diff] [blame] | 330 | RTC_DCHECK_RUN_ON(network_thread_); |
| 331 | // Since we constructed on the network thread we can't (yet) check the |
| 332 | // `controller_` pointer since doing so will trigger a thread check. |
Florent Castelli | dcb9ffc | 2021-06-29 12:58:23 | [diff] [blame] | 333 | RTC_UNUSED(network_thread_); |
Tommi | 492296c | 2023-03-12 15:59:25 | [diff] [blame] | 334 | RTC_DCHECK(config.IsValid()); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 335 | |
Tommi | a50a81a | 2023-04-11 15:32:34 | [diff] [blame] | 336 | if (connected_to_transport) |
| 337 | network_safety_->SetAlive(); |
| 338 | |
Tommi | 492296c | 2023-03-12 15:59:25 | [diff] [blame] | 339 | switch (config.open_handshake_role) { |
Tommi | 4e1c957 | 2023-03-15 11:36:20 | [diff] [blame] | 340 | case InternalDataChannelInit::kNone: // pre-negotiated |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 341 | handshake_state_ = kHandshakeReady; |
| 342 | break; |
Tommi | 4e1c957 | 2023-03-15 11:36:20 | [diff] [blame] | 343 | case InternalDataChannelInit::kOpener: |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 344 | handshake_state_ = kHandshakeShouldSendOpen; |
| 345 | break; |
Tommi | 4e1c957 | 2023-03-15 11:36:20 | [diff] [blame] | 346 | case InternalDataChannelInit::kAcker: |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 347 | handshake_state_ = kHandshakeShouldSendAck; |
| 348 | break; |
| 349 | } |
Tommi | c2429a0 | 2023-03-03 10:28:23 | [diff] [blame] | 350 | } |
| 351 | |
Andrey Logvin | 7f16fcd | 2023-04-05 08:53:13 | [diff] [blame] | 352 | SctpDataChannel::~SctpDataChannel() { |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 353 | if (observer_adapter_) |
| 354 | ObserverAdapter::DeleteOnSignalingThread(std::move(observer_adapter_)); |
Andrey Logvin | 7f16fcd | 2023-04-05 08:53:13 | [diff] [blame] | 355 | } |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 356 | |
| 357 | void SctpDataChannel::RegisterObserver(DataChannelObserver* observer) { |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 358 | // Note: at this point, we do not know on which thread we're being called |
| 359 | // from since this method bypasses the proxy. On Android in particular, |
| 360 | // registration methods are called from unknown threads. |
| 361 | |
| 362 | // Check if we should set up an observer adapter that will make sure that |
| 363 | // callbacks are delivered on the signaling thread rather than directly |
| 364 | // on the network thread. |
| 365 | const auto* current_thread = rtc::Thread::Current(); |
| 366 | // TODO(webrtc:11547): Eventually all DataChannelObserver implementations |
| 367 | // should be called on the network thread and IsOkToCallOnTheNetworkThread(). |
| 368 | if (!observer->IsOkToCallOnTheNetworkThread()) { |
| 369 | RTC_LOG(LS_WARNING) << "DataChannelObserver - adapter needed"; |
| 370 | auto prepare_observer = [&]() { |
| 371 | RTC_DCHECK(observer_adapter_) << "CreateProxy hasn't been called"; |
| 372 | observer_adapter_->SetDelegate(observer); |
| 373 | return observer_adapter_.get(); |
| 374 | }; |
| 375 | // Instantiate the adapter in the right context and then substitute the |
| 376 | // observer pointer the SctpDataChannel will call back on, with the adapter. |
| 377 | if (signaling_thread_ == current_thread) { |
| 378 | observer = prepare_observer(); |
| 379 | } else { |
| 380 | observer = signaling_thread_->BlockingCall(std::move(prepare_observer)); |
| 381 | } |
| 382 | } |
| 383 | |
Tommi | efb361c | 2023-05-09 14:41:51 | [diff] [blame] | 384 | // Now do the observer registration on the network thread. In the common case, |
| 385 | // we'll do this asynchronously via `PostTask()`. For that reason we grab |
| 386 | // a reference to ourselves while the task is in flight. We can't use |
| 387 | // `SafeTask(network_safety_, ...)` for this since we can't assume that we |
| 388 | // have a transport (network_safety_ represents the transport connection). |
| 389 | rtc::scoped_refptr<SctpDataChannel> me(this); |
| 390 | auto register_observer = [me = std::move(me), observer = observer] { |
| 391 | RTC_DCHECK_RUN_ON(me->network_thread_); |
| 392 | me->observer_ = observer; |
| 393 | me->DeliverQueuedReceivedData(); |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 394 | }; |
| 395 | |
| 396 | if (network_thread_ == current_thread) { |
| 397 | register_observer(); |
| 398 | } else { |
Tommi | efb361c | 2023-05-09 14:41:51 | [diff] [blame] | 399 | network_thread_->PostTask(std::move(register_observer)); |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 400 | } |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 401 | } |
| 402 | |
| 403 | void SctpDataChannel::UnregisterObserver() { |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 404 | // Note: As with `RegisterObserver`, the proxy is being bypassed. |
| 405 | const auto* current_thread = rtc::Thread::Current(); |
| 406 | // Callers must not be invoking the unregistration from the network thread |
| 407 | // (assuming a multi-threaded environment where we have a dedicated network |
| 408 | // thread). That would indicate non-network related work happening on the |
| 409 | // network thread or that unregistration is being done from within a callback |
| 410 | // (without unwinding the stack, which is a requirement). |
| 411 | // The network thread is not allowed to make blocking calls to the signaling |
| 412 | // thread, so that would blow up if attempted. Since we support an adapter |
| 413 | // for observers that are not safe to call on the network thread, we do |
| 414 | // need to check+free it on the signaling thread. |
| 415 | RTC_DCHECK(current_thread != network_thread_ || |
| 416 | network_thread_ == signaling_thread_); |
| 417 | |
| 418 | auto unregister_observer = [&] { |
| 419 | RTC_DCHECK_RUN_ON(network_thread_); |
| 420 | observer_ = nullptr; |
| 421 | }; |
| 422 | |
| 423 | if (current_thread == network_thread_) { |
| 424 | unregister_observer(); |
| 425 | } else { |
| 426 | network_thread_->BlockingCall(std::move(unregister_observer)); |
| 427 | } |
| 428 | |
| 429 | auto clear_observer = [&]() { |
| 430 | if (observer_adapter_) |
| 431 | observer_adapter_->SetDelegate(nullptr); |
| 432 | }; |
| 433 | |
| 434 | if (current_thread != signaling_thread_) { |
| 435 | signaling_thread_->BlockingCall(std::move(clear_observer)); |
| 436 | } else { |
| 437 | clear_observer(); |
| 438 | } |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 439 | } |
| 440 | |
Tommi | 492296c | 2023-03-12 15:59:25 | [diff] [blame] | 441 | std::string SctpDataChannel::label() const { |
| 442 | return label_; |
| 443 | } |
| 444 | |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 445 | bool SctpDataChannel::reliable() const { |
| 446 | // May be called on any thread. |
Tommi | 492296c | 2023-03-12 15:59:25 | [diff] [blame] | 447 | return !max_retransmits_ && !max_retransmit_time_; |
| 448 | } |
| 449 | |
| 450 | bool SctpDataChannel::ordered() const { |
| 451 | return ordered_; |
| 452 | } |
| 453 | |
| 454 | uint16_t SctpDataChannel::maxRetransmitTime() const { |
| 455 | return max_retransmit_time_ ? *max_retransmit_time_ |
| 456 | : static_cast<uint16_t>(-1); |
| 457 | } |
| 458 | |
| 459 | uint16_t SctpDataChannel::maxRetransmits() const { |
| 460 | return max_retransmits_ ? *max_retransmits_ : static_cast<uint16_t>(-1); |
| 461 | } |
| 462 | |
| 463 | absl::optional<int> SctpDataChannel::maxPacketLifeTime() const { |
| 464 | return max_retransmit_time_; |
| 465 | } |
| 466 | |
| 467 | absl::optional<int> SctpDataChannel::maxRetransmitsOpt() const { |
| 468 | return max_retransmits_; |
| 469 | } |
| 470 | |
| 471 | std::string SctpDataChannel::protocol() const { |
| 472 | return protocol_; |
| 473 | } |
| 474 | |
| 475 | bool SctpDataChannel::negotiated() const { |
| 476 | return negotiated_; |
| 477 | } |
| 478 | |
| 479 | int SctpDataChannel::id() const { |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 480 | RTC_DCHECK_RUN_ON(network_thread_); |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 481 | return id_n_.stream_id_int(); |
Tommi | 492296c | 2023-03-12 15:59:25 | [diff] [blame] | 482 | } |
| 483 | |
| 484 | Priority SctpDataChannel::priority() const { |
| 485 | return priority_ ? *priority_ : Priority::kLow; |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 486 | } |
| 487 | |
| 488 | uint64_t SctpDataChannel::buffered_amount() const { |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 489 | RTC_DCHECK_RUN_ON(network_thread_); |
Florent Castelli | 6595685 | 2021-10-18 09:13:22 | [diff] [blame] | 490 | return queued_send_data_.byte_count(); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 491 | } |
| 492 | |
| 493 | void SctpDataChannel::Close() { |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 494 | RTC_DCHECK_RUN_ON(network_thread_); |
Florent Castelli | 8f04c7c | 2022-05-05 21:43:44 | [diff] [blame] | 495 | if (state_ == kClosing || state_ == kClosed) |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 496 | return; |
| 497 | SetState(kClosing); |
| 498 | // Will send queued data before beginning the underlying closing procedure. |
| 499 | UpdateState(); |
| 500 | } |
| 501 | |
| 502 | SctpDataChannel::DataState SctpDataChannel::state() const { |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 503 | // Note: The proxy is bypassed for the `state()` accessor. This is to allow |
| 504 | // observer callbacks to query what the new state is from within a state |
| 505 | // update notification without having to do a blocking call to the network |
| 506 | // thread from within a callback. This also makes it so that the returned |
| 507 | // state is guaranteed to be the new state that provoked the state change |
| 508 | // notification, whereby a blocking call to the network thread might end up |
| 509 | // getting put behind other messages on the network thread and eventually |
| 510 | // fetch a different state value (since pending messages might cause the |
| 511 | // state to change in the meantime). |
| 512 | const auto* current_thread = rtc::Thread::Current(); |
| 513 | if (current_thread == signaling_thread_ && observer_adapter_ && |
| 514 | observer_adapter_->IsInsideCallback()) { |
| 515 | return observer_adapter_->cached_state(); |
| 516 | } |
| 517 | |
| 518 | auto return_state = [&] { |
| 519 | RTC_DCHECK_RUN_ON(network_thread_); |
| 520 | return state_; |
| 521 | }; |
| 522 | |
| 523 | return current_thread == network_thread_ |
| 524 | ? return_state() |
| 525 | : network_thread_->BlockingCall(std::move(return_state)); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 526 | } |
| 527 | |
| 528 | RTCError SctpDataChannel::error() const { |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 529 | const auto* current_thread = rtc::Thread::Current(); |
| 530 | if (current_thread == signaling_thread_ && observer_adapter_ && |
| 531 | observer_adapter_->IsInsideCallback()) { |
| 532 | return observer_adapter_->cached_error(); |
| 533 | } |
| 534 | |
| 535 | auto return_error = [&] { |
| 536 | RTC_DCHECK_RUN_ON(network_thread_); |
| 537 | return error_; |
| 538 | }; |
| 539 | |
| 540 | return current_thread == network_thread_ |
| 541 | ? return_error() |
| 542 | : network_thread_->BlockingCall(std::move(return_error)); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 543 | } |
| 544 | |
| 545 | uint32_t SctpDataChannel::messages_sent() const { |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 546 | RTC_DCHECK_RUN_ON(network_thread_); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 547 | return messages_sent_; |
| 548 | } |
| 549 | |
| 550 | uint64_t SctpDataChannel::bytes_sent() const { |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 551 | RTC_DCHECK_RUN_ON(network_thread_); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 552 | return bytes_sent_; |
| 553 | } |
| 554 | |
| 555 | uint32_t SctpDataChannel::messages_received() const { |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 556 | RTC_DCHECK_RUN_ON(network_thread_); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 557 | return messages_received_; |
| 558 | } |
| 559 | |
| 560 | uint64_t SctpDataChannel::bytes_received() const { |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 561 | RTC_DCHECK_RUN_ON(network_thread_); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 562 | return bytes_received_; |
| 563 | } |
| 564 | |
| 565 | bool SctpDataChannel::Send(const DataBuffer& buffer) { |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 566 | RTC_DCHECK_RUN_ON(network_thread_); |
Tommi | a50a81a | 2023-04-11 15:32:34 | [diff] [blame] | 567 | RTCError err = SendImpl(buffer); |
| 568 | if (err.type() == RTCErrorType::INVALID_STATE || |
| 569 | err.type() == RTCErrorType::RESOURCE_EXHAUSTED) { |
| 570 | return false; |
| 571 | } |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 572 | |
Tommi | a50a81a | 2023-04-11 15:32:34 | [diff] [blame] | 573 | // Always return true for SCTP DataChannel per the spec. |
| 574 | return true; |
| 575 | } |
| 576 | |
| 577 | // RTC_RUN_ON(network_thread_); |
| 578 | RTCError SctpDataChannel::SendImpl(DataBuffer buffer) { |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 579 | if (state_ != kOpen) { |
Tommi | e25c122 | 2023-04-11 09:46:24 | [diff] [blame] | 580 | error_ = RTCError(RTCErrorType::INVALID_STATE); |
Tommi | a50a81a | 2023-04-11 15:32:34 | [diff] [blame] | 581 | return error_; |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 582 | } |
| 583 | |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 584 | // If the queue is non-empty, we're waiting for SignalReadyToSend, |
| 585 | // so just add to the end of the queue and keep waiting. |
| 586 | if (!queued_send_data_.Empty()) { |
Tommi | a50a81a | 2023-04-11 15:32:34 | [diff] [blame] | 587 | error_ = QueueSendDataMessage(buffer) |
| 588 | ? RTCError::OK() |
| 589 | : RTCError(RTCErrorType::RESOURCE_EXHAUSTED); |
| 590 | return error_; |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 591 | } |
| 592 | |
Tommi | a50a81a | 2023-04-11 15:32:34 | [diff] [blame] | 593 | return SendDataMessage(buffer, true); |
| 594 | } |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 595 | |
Tommi | a50a81a | 2023-04-11 15:32:34 | [diff] [blame] | 596 | void SctpDataChannel::SendAsync( |
| 597 | DataBuffer buffer, |
| 598 | absl::AnyInvocable<void(RTCError) &&> on_complete) { |
| 599 | // Note: at this point, we do not know on which thread we're being called |
| 600 | // since this method bypasses the proxy. On Android the thread might be VM |
| 601 | // owned, on other platforms it might be the signaling thread, or in Chrome |
| 602 | // it can be the JS thread. We also don't know if it's consistently the same |
| 603 | // thread. So we always post to the network thread (even if the current thread |
| 604 | // might be the network thread - in theory a call could even come from within |
| 605 | // the `on_complete` callback). |
| 606 | network_thread_->PostTask(SafeTask( |
| 607 | network_safety_, [this, buffer = std::move(buffer), |
| 608 | on_complete = std::move(on_complete)]() mutable { |
| 609 | RTC_DCHECK_RUN_ON(network_thread_); |
| 610 | RTCError err = SendImpl(std::move(buffer)); |
| 611 | if (on_complete) |
| 612 | std::move(on_complete)(err); |
| 613 | })); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 614 | } |
| 615 | |
Tommi | 1158bde | 2023-03-30 10:01:56 | [diff] [blame] | 616 | void SctpDataChannel::SetSctpSid_n(StreamId sid) { |
| 617 | RTC_DCHECK_RUN_ON(network_thread_); |
| 618 | RTC_DCHECK(!id_n_.HasValue()); |
| 619 | RTC_DCHECK(sid.HasValue()); |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 620 | RTC_DCHECK_NE(handshake_state_, kHandshakeWaitingForAck); |
| 621 | RTC_DCHECK_EQ(state_, kConnecting); |
Tommi | 1158bde | 2023-03-30 10:01:56 | [diff] [blame] | 622 | id_n_ = sid; |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 623 | } |
| 624 | |
Tommi | 00264ca | 2023-03-14 12:21:06 | [diff] [blame] | 625 | void SctpDataChannel::OnClosingProcedureStartedRemotely() { |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 626 | RTC_DCHECK_RUN_ON(network_thread_); |
Tommi | 00264ca | 2023-03-14 12:21:06 | [diff] [blame] | 627 | if (state_ != kClosing && state_ != kClosed) { |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 628 | // Don't bother sending queued data since the side that initiated the |
| 629 | // closure wouldn't receive it anyway. See crbug.com/559394 for a lengthy |
| 630 | // discussion about this. |
| 631 | queued_send_data_.Clear(); |
| 632 | queued_control_data_.Clear(); |
| 633 | // Just need to change state to kClosing, SctpTransport will handle the |
| 634 | // rest of the closing procedure and OnClosingProcedureComplete will be |
| 635 | // called later. |
| 636 | started_closing_procedure_ = true; |
| 637 | SetState(kClosing); |
| 638 | } |
| 639 | } |
| 640 | |
Tommi | 51edb56 | 2023-03-14 08:23:51 | [diff] [blame] | 641 | void SctpDataChannel::OnClosingProcedureComplete() { |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 642 | RTC_DCHECK_RUN_ON(network_thread_); |
Tommi | 51edb56 | 2023-03-14 08:23:51 | [diff] [blame] | 643 | // If the closing procedure is complete, we should have finished sending |
| 644 | // all pending data and transitioned to kClosing already. |
| 645 | RTC_DCHECK_EQ(state_, kClosing); |
| 646 | RTC_DCHECK(queued_send_data_.Empty()); |
Tommi | 51edb56 | 2023-03-14 08:23:51 | [diff] [blame] | 647 | SetState(kClosed); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 648 | } |
| 649 | |
| 650 | void SctpDataChannel::OnTransportChannelCreated() { |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 651 | RTC_DCHECK_RUN_ON(network_thread_); |
Tommi | a50a81a | 2023-04-11 15:32:34 | [diff] [blame] | 652 | network_safety_->SetAlive(); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 653 | } |
| 654 | |
Florent Castelli | dcb9ffc | 2021-06-29 12:58:23 | [diff] [blame] | 655 | void SctpDataChannel::OnTransportChannelClosed(RTCError error) { |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 656 | RTC_DCHECK_RUN_ON(network_thread_); |
Tommi | 492296c | 2023-03-12 15:59:25 | [diff] [blame] | 657 | // The SctpTransport is unusable, which could come from multiple reasons: |
Florent Castelli | dcb9ffc | 2021-06-29 12:58:23 | [diff] [blame] | 658 | // - the SCTP m= section was rejected |
| 659 | // - the DTLS transport is closed |
| 660 | // - the SCTP transport is closed |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 661 | CloseAbruptlyWithError(std::move(error)); |
| 662 | } |
| 663 | |
| 664 | DataChannelStats SctpDataChannel::GetStats() const { |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 665 | RTC_DCHECK_RUN_ON(network_thread_); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 666 | DataChannelStats stats{internal_id_, id(), label(), |
| 667 | protocol(), state(), messages_sent(), |
| 668 | messages_received(), bytes_sent(), bytes_received()}; |
| 669 | return stats; |
| 670 | } |
| 671 | |
Tommi | 4e1c957 | 2023-03-15 11:36:20 | [diff] [blame] | 672 | void SctpDataChannel::OnDataReceived(DataMessageType type, |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 673 | const rtc::CopyOnWriteBuffer& payload) { |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 674 | RTC_DCHECK_RUN_ON(network_thread_); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 675 | |
Tommi | 4e1c957 | 2023-03-15 11:36:20 | [diff] [blame] | 676 | if (type == DataMessageType::kControl) { |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 677 | if (handshake_state_ != kHandshakeWaitingForAck) { |
| 678 | // Ignore it if we are not expecting an ACK message. |
| 679 | RTC_LOG(LS_WARNING) |
| 680 | << "DataChannel received unexpected CONTROL message, sid = " |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 681 | << id_n_.stream_id_int(); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 682 | return; |
| 683 | } |
| 684 | if (ParseDataChannelOpenAckMessage(payload)) { |
| 685 | // We can send unordered as soon as we receive the ACK message. |
| 686 | handshake_state_ = kHandshakeReady; |
| 687 | RTC_LOG(LS_INFO) << "DataChannel received OPEN_ACK message, sid = " |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 688 | << id_n_.stream_id_int(); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 689 | } else { |
| 690 | RTC_LOG(LS_WARNING) |
| 691 | << "DataChannel failed to parse OPEN_ACK message, sid = " |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 692 | << id_n_.stream_id_int(); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 693 | } |
| 694 | return; |
| 695 | } |
| 696 | |
Tommi | 4e1c957 | 2023-03-15 11:36:20 | [diff] [blame] | 697 | RTC_DCHECK(type == DataMessageType::kBinary || |
| 698 | type == DataMessageType::kText); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 699 | |
Tommi | 934a88a | 2023-03-15 13:34:56 | [diff] [blame] | 700 | RTC_DLOG(LS_VERBOSE) << "DataChannel received DATA message, sid = " |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 701 | << id_n_.stream_id_int(); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 702 | // We can send unordered as soon as we receive any DATA message since the |
| 703 | // remote side must have received the OPEN (and old clients do not send |
| 704 | // OPEN_ACK). |
| 705 | if (handshake_state_ == kHandshakeWaitingForAck) { |
| 706 | handshake_state_ = kHandshakeReady; |
| 707 | } |
| 708 | |
Tommi | 4e1c957 | 2023-03-15 11:36:20 | [diff] [blame] | 709 | bool binary = (type == DataMessageType::kBinary); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 710 | auto buffer = std::make_unique<DataBuffer>(payload, binary); |
| 711 | if (state_ == kOpen && observer_) { |
| 712 | ++messages_received_; |
| 713 | bytes_received_ += buffer->size(); |
| 714 | observer_->OnMessage(*buffer.get()); |
| 715 | } else { |
| 716 | if (queued_received_data_.byte_count() + payload.size() > |
| 717 | kMaxQueuedReceivedDataBytes) { |
| 718 | RTC_LOG(LS_ERROR) << "Queued received data exceeds the max buffer size."; |
| 719 | |
| 720 | queued_received_data_.Clear(); |
| 721 | CloseAbruptlyWithError( |
| 722 | RTCError(RTCErrorType::RESOURCE_EXHAUSTED, |
| 723 | "Queued received data exceeds the max buffer size.")); |
| 724 | |
| 725 | return; |
| 726 | } |
| 727 | queued_received_data_.PushBack(std::move(buffer)); |
| 728 | } |
| 729 | } |
| 730 | |
Tommi | e9aa867 | 2023-03-20 13:43:09 | [diff] [blame] | 731 | void SctpDataChannel::OnTransportReady() { |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 732 | RTC_DCHECK_RUN_ON(network_thread_); |
Tommi | a50a81a | 2023-04-11 15:32:34 | [diff] [blame] | 733 | RTC_DCHECK(connected_to_transport()); |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 734 | RTC_DCHECK(id_n_.HasValue()); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 735 | |
| 736 | SendQueuedControlMessages(); |
| 737 | SendQueuedDataMessages(); |
| 738 | |
| 739 | UpdateState(); |
| 740 | } |
| 741 | |
| 742 | void SctpDataChannel::CloseAbruptlyWithError(RTCError error) { |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 743 | RTC_DCHECK_RUN_ON(network_thread_); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 744 | |
| 745 | if (state_ == kClosed) { |
| 746 | return; |
| 747 | } |
| 748 | |
Tommi | a50a81a | 2023-04-11 15:32:34 | [diff] [blame] | 749 | network_safety_->SetNotAlive(); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 750 | |
| 751 | // Closing abruptly means any queued data gets thrown away. |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 752 | queued_send_data_.Clear(); |
| 753 | queued_control_data_.Clear(); |
| 754 | |
| 755 | // Still go to "kClosing" before "kClosed", since observers may be expecting |
| 756 | // that. |
| 757 | SetState(kClosing); |
| 758 | error_ = std::move(error); |
| 759 | SetState(kClosed); |
| 760 | } |
| 761 | |
| 762 | void SctpDataChannel::CloseAbruptlyWithDataChannelFailure( |
| 763 | const std::string& message) { |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 764 | RTC_DCHECK_RUN_ON(network_thread_); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 765 | RTCError error(RTCErrorType::OPERATION_ERROR_WITH_DATA, message); |
| 766 | error.set_error_detail(RTCErrorDetailType::DATA_CHANNEL_FAILURE); |
| 767 | CloseAbruptlyWithError(std::move(error)); |
| 768 | } |
| 769 | |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 770 | // RTC_RUN_ON(network_thread_). |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 771 | void SctpDataChannel::UpdateState() { |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 772 | // UpdateState determines what to do from a few state variables. Include |
| 773 | // all conditions required for each state transition here for |
| 774 | // clarity. OnTransportReady(true) will send any queued data and then invoke |
| 775 | // UpdateState(). |
| 776 | |
| 777 | switch (state_) { |
| 778 | case kConnecting: { |
Tommi | a50a81a | 2023-04-11 15:32:34 | [diff] [blame] | 779 | if (connected_to_transport() && controller_) { |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 780 | if (handshake_state_ == kHandshakeShouldSendOpen) { |
| 781 | rtc::CopyOnWriteBuffer payload; |
Tommi | 492296c | 2023-03-12 15:59:25 | [diff] [blame] | 782 | WriteDataChannelOpenMessage(label_, protocol_, priority_, ordered_, |
| 783 | max_retransmits_, max_retransmit_time_, |
| 784 | &payload); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 785 | SendControlMessage(payload); |
| 786 | } else if (handshake_state_ == kHandshakeShouldSendAck) { |
| 787 | rtc::CopyOnWriteBuffer payload; |
| 788 | WriteDataChannelOpenAckMessage(&payload); |
| 789 | SendControlMessage(payload); |
| 790 | } |
Tommi | e08f9a9 | 2023-03-28 08:09:40 | [diff] [blame] | 791 | if (handshake_state_ == kHandshakeReady || |
| 792 | handshake_state_ == kHandshakeWaitingForAck) { |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 793 | SetState(kOpen); |
| 794 | // If we have received buffers before the channel got writable. |
| 795 | // Deliver them now. |
| 796 | DeliverQueuedReceivedData(); |
| 797 | } |
Tommi | e9aa867 | 2023-03-20 13:43:09 | [diff] [blame] | 798 | } else { |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 799 | RTC_DCHECK(!id_n_.HasValue()); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 800 | } |
| 801 | break; |
| 802 | } |
| 803 | case kOpen: { |
| 804 | break; |
| 805 | } |
| 806 | case kClosing: { |
Tommi | a50a81a | 2023-04-11 15:32:34 | [diff] [blame] | 807 | if (connected_to_transport() && controller_) { |
Tommi | f21354c | 2023-03-07 07:43:24 | [diff] [blame] | 808 | // Wait for all queued data to be sent before beginning the closing |
| 809 | // procedure. |
| 810 | if (queued_send_data_.Empty() && queued_control_data_.Empty()) { |
| 811 | // For SCTP data channels, we need to wait for the closing procedure |
| 812 | // to complete; after calling RemoveSctpDataStream, |
| 813 | // OnClosingProcedureComplete will end up called asynchronously |
| 814 | // afterwards. |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 815 | if (!started_closing_procedure_ && id_n_.HasValue()) { |
Tommi | f21354c | 2023-03-07 07:43:24 | [diff] [blame] | 816 | started_closing_procedure_ = true; |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 817 | controller_->RemoveSctpDataStream(id_n_); |
Tommi | f21354c | 2023-03-07 07:43:24 | [diff] [blame] | 818 | } |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 819 | } |
Tommi | f21354c | 2023-03-07 07:43:24 | [diff] [blame] | 820 | } else { |
| 821 | // When we're not connected to a transport, we'll transition |
| 822 | // directly to the `kClosed` state from here. |
| 823 | queued_send_data_.Clear(); |
| 824 | queued_control_data_.Clear(); |
| 825 | SetState(kClosed); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 826 | } |
| 827 | break; |
| 828 | } |
| 829 | case kClosed: |
| 830 | break; |
| 831 | } |
| 832 | } |
| 833 | |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 834 | // RTC_RUN_ON(network_thread_). |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 835 | void SctpDataChannel::SetState(DataState state) { |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 836 | if (state_ == state) { |
| 837 | return; |
| 838 | } |
| 839 | |
| 840 | state_ = state; |
| 841 | if (observer_) { |
| 842 | observer_->OnStateChange(); |
| 843 | } |
Tommi | d2afbaf | 2023-03-02 09:51:16 | [diff] [blame] | 844 | |
Tommi | 1c0d91f | 2023-03-02 14:42:06 | [diff] [blame] | 845 | if (controller_) |
Tommi | d2afbaf | 2023-03-02 09:51:16 | [diff] [blame] | 846 | controller_->OnChannelStateChanged(this, state_); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 847 | } |
| 848 | |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 849 | // RTC_RUN_ON(network_thread_). |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 850 | void SctpDataChannel::DeliverQueuedReceivedData() { |
Yuwei Huang | 2083894 | 2023-05-09 23:01:33 | [diff] [blame] | 851 | if (!observer_ || state_ != kOpen) { |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 852 | return; |
| 853 | } |
| 854 | |
| 855 | while (!queued_received_data_.Empty()) { |
| 856 | std::unique_ptr<DataBuffer> buffer = queued_received_data_.PopFront(); |
| 857 | ++messages_received_; |
| 858 | bytes_received_ += buffer->size(); |
| 859 | observer_->OnMessage(*buffer); |
| 860 | } |
| 861 | } |
| 862 | |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 863 | // RTC_RUN_ON(network_thread_). |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 864 | void SctpDataChannel::SendQueuedDataMessages() { |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 865 | if (queued_send_data_.Empty()) { |
| 866 | return; |
| 867 | } |
| 868 | |
| 869 | RTC_DCHECK(state_ == kOpen || state_ == kClosing); |
| 870 | |
| 871 | while (!queued_send_data_.Empty()) { |
| 872 | std::unique_ptr<DataBuffer> buffer = queued_send_data_.PopFront(); |
Tommi | a50a81a | 2023-04-11 15:32:34 | [diff] [blame] | 873 | if (!SendDataMessage(*buffer, false).ok()) { |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 874 | // Return the message to the front of the queue if sending is aborted. |
| 875 | queued_send_data_.PushFront(std::move(buffer)); |
| 876 | break; |
| 877 | } |
| 878 | } |
| 879 | } |
| 880 | |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 881 | // RTC_RUN_ON(network_thread_). |
Tommi | a50a81a | 2023-04-11 15:32:34 | [diff] [blame] | 882 | RTCError SctpDataChannel::SendDataMessage(const DataBuffer& buffer, |
| 883 | bool queue_if_blocked) { |
Florent Castelli | d95b149 | 2021-05-10 09:29:56 | [diff] [blame] | 884 | SendDataParams send_params; |
Tommi | 1c0d91f | 2023-03-02 14:42:06 | [diff] [blame] | 885 | if (!controller_) { |
Tommi | e25c122 | 2023-04-11 09:46:24 | [diff] [blame] | 886 | error_ = RTCError(RTCErrorType::INVALID_STATE); |
Tommi | a50a81a | 2023-04-11 15:32:34 | [diff] [blame] | 887 | return error_; |
Harald Alvestrand | 9e5aeb9 | 2022-05-11 09:35:36 | [diff] [blame] | 888 | } |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 889 | |
Tommi | 492296c | 2023-03-12 15:59:25 | [diff] [blame] | 890 | send_params.ordered = ordered_; |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 891 | // Send as ordered if it is still going through OPEN/ACK signaling. |
Tommi | 492296c | 2023-03-12 15:59:25 | [diff] [blame] | 892 | if (handshake_state_ != kHandshakeReady && !ordered_) { |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 893 | send_params.ordered = true; |
Tommi | 934a88a | 2023-03-15 13:34:56 | [diff] [blame] | 894 | RTC_DLOG(LS_VERBOSE) |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 895 | << "Sending data as ordered for unordered DataChannel " |
| 896 | "because the OPEN_ACK message has not been received."; |
| 897 | } |
| 898 | |
Tommi | 492296c | 2023-03-12 15:59:25 | [diff] [blame] | 899 | send_params.max_rtx_count = max_retransmits_; |
| 900 | send_params.max_rtx_ms = max_retransmit_time_; |
Florent Castelli | d95b149 | 2021-05-10 09:29:56 | [diff] [blame] | 901 | send_params.type = |
| 902 | buffer.binary ? DataMessageType::kBinary : DataMessageType::kText; |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 903 | |
Tommi | e25c122 | 2023-04-11 09:46:24 | [diff] [blame] | 904 | error_ = controller_->SendData(id_n_, send_params, buffer.data); |
| 905 | if (error_.ok()) { |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 906 | ++messages_sent_; |
| 907 | bytes_sent_ += buffer.size(); |
| 908 | |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 909 | if (observer_ && buffer.size() > 0) { |
| 910 | observer_->OnBufferedAmountChange(buffer.size()); |
| 911 | } |
Tommi | a50a81a | 2023-04-11 15:32:34 | [diff] [blame] | 912 | return error_; |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 913 | } |
| 914 | |
Tommi | e25c122 | 2023-04-11 09:46:24 | [diff] [blame] | 915 | if (error_.type() == RTCErrorType::RESOURCE_EXHAUSTED) { |
Tommi | a50a81a | 2023-04-11 15:32:34 | [diff] [blame] | 916 | if (!queue_if_blocked) |
| 917 | return error_; |
| 918 | |
| 919 | if (QueueSendDataMessage(buffer)) { |
| 920 | error_ = RTCError::OK(); |
| 921 | return error_; |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 922 | } |
| 923 | } |
| 924 | // Close the channel if the error is not SDR_BLOCK, or if queuing the |
| 925 | // message failed. |
| 926 | RTC_LOG(LS_ERROR) << "Closing the DataChannel due to a failure to send data, " |
| 927 | "send_result = " |
Tommi | e25c122 | 2023-04-11 09:46:24 | [diff] [blame] | 928 | << ToString(error_.type()) << ":" << error_.message(); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 929 | CloseAbruptlyWithError( |
| 930 | RTCError(RTCErrorType::NETWORK_ERROR, "Failure to send data")); |
| 931 | |
Tommi | a50a81a | 2023-04-11 15:32:34 | [diff] [blame] | 932 | return error_; |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 933 | } |
| 934 | |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 935 | // RTC_RUN_ON(network_thread_). |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 936 | bool SctpDataChannel::QueueSendDataMessage(const DataBuffer& buffer) { |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 937 | size_t start_buffered_amount = queued_send_data_.byte_count(); |
Florent Castelli | a563a2a | 2021-10-18 09:46:21 | [diff] [blame] | 938 | if (start_buffered_amount + buffer.size() > |
| 939 | DataChannelInterface::MaxSendQueueSize()) { |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 940 | RTC_LOG(LS_ERROR) << "Can't buffer any more data for the data channel."; |
Tommi | e25c122 | 2023-04-11 09:46:24 | [diff] [blame] | 941 | error_ = RTCError(RTCErrorType::RESOURCE_EXHAUSTED); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 942 | return false; |
| 943 | } |
| 944 | queued_send_data_.PushBack(std::make_unique<DataBuffer>(buffer)); |
| 945 | return true; |
| 946 | } |
| 947 | |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 948 | // RTC_RUN_ON(network_thread_). |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 949 | void SctpDataChannel::SendQueuedControlMessages() { |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 950 | PacketQueue control_packets; |
| 951 | control_packets.Swap(&queued_control_data_); |
| 952 | |
| 953 | while (!control_packets.Empty()) { |
| 954 | std::unique_ptr<DataBuffer> buf = control_packets.PopFront(); |
| 955 | SendControlMessage(buf->data); |
| 956 | } |
| 957 | } |
| 958 | |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 959 | // RTC_RUN_ON(network_thread_). |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 960 | bool SctpDataChannel::SendControlMessage(const rtc::CopyOnWriteBuffer& buffer) { |
Tommi | a50a81a | 2023-04-11 15:32:34 | [diff] [blame] | 961 | RTC_DCHECK(connected_to_transport()); |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 962 | RTC_DCHECK(id_n_.HasValue()); |
Tommi | 1158bde | 2023-03-30 10:01:56 | [diff] [blame] | 963 | RTC_DCHECK(controller_); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 964 | |
| 965 | bool is_open_message = handshake_state_ == kHandshakeShouldSendOpen; |
Tommi | 492296c | 2023-03-12 15:59:25 | [diff] [blame] | 966 | RTC_DCHECK(!is_open_message || !negotiated_); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 967 | |
Florent Castelli | d95b149 | 2021-05-10 09:29:56 | [diff] [blame] | 968 | SendDataParams send_params; |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 969 | // Send data as ordered before we receive any message from the remote peer to |
| 970 | // make sure the remote peer will not receive any data before it receives the |
| 971 | // OPEN message. |
Tommi | 492296c | 2023-03-12 15:59:25 | [diff] [blame] | 972 | send_params.ordered = ordered_ || is_open_message; |
Florent Castelli | d95b149 | 2021-05-10 09:29:56 | [diff] [blame] | 973 | send_params.type = DataMessageType::kControl; |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 974 | |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 975 | RTCError err = controller_->SendData(id_n_, send_params, buffer); |
Tommi | 1fabbac | 2023-03-21 13:48:51 | [diff] [blame] | 976 | if (err.ok()) { |
Tommi | 934a88a | 2023-03-15 13:34:56 | [diff] [blame] | 977 | RTC_DLOG(LS_VERBOSE) << "Sent CONTROL message on channel " |
Tommi | f9e13f8 | 2023-04-06 19:21:45 | [diff] [blame] | 978 | << id_n_.stream_id_int(); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 979 | |
| 980 | if (handshake_state_ == kHandshakeShouldSendAck) { |
| 981 | handshake_state_ = kHandshakeReady; |
| 982 | } else if (handshake_state_ == kHandshakeShouldSendOpen) { |
| 983 | handshake_state_ = kHandshakeWaitingForAck; |
| 984 | } |
Tommi | 1fabbac | 2023-03-21 13:48:51 | [diff] [blame] | 985 | } else if (err.type() == RTCErrorType::RESOURCE_EXHAUSTED) { |
Tommi | 1158bde | 2023-03-30 10:01:56 | [diff] [blame] | 986 | queued_control_data_.PushBack(std::make_unique<DataBuffer>(buffer, true)); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 987 | } else { |
| 988 | RTC_LOG(LS_ERROR) << "Closing the DataChannel due to a failure to send" |
| 989 | " the CONTROL message, send_result = " |
Tommi | 1fabbac | 2023-03-21 13:48:51 | [diff] [blame] | 990 | << ToString(err.type()); |
| 991 | err.set_message("Failed to send a CONTROL message"); |
| 992 | CloseAbruptlyWithError(err); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 993 | } |
Tommi | 1fabbac | 2023-03-21 13:48:51 | [diff] [blame] | 994 | return err.ok(); |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 995 | } |
| 996 | |
| 997 | // static |
| 998 | void SctpDataChannel::ResetInternalIdAllocatorForTesting(int new_value) { |
| 999 | g_unique_id = new_value; |
| 1000 | } |
| 1001 | |
Taylor Brandstetter | 3a034e1 | 2020-07-09 22:32:34 | [diff] [blame] | 1002 | } // namespace webrtc |