Sebastian Jansson | b34556e | 2018-03-21 13:38:32 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license |
| 5 | * that can be found in the LICENSE file in the root of the source |
| 6 | * tree. An additional intellectual property rights grant can be found |
| 7 | * in the file PATENTS. All contributing project authors may |
| 8 | * be found in the AUTHORS file in the root of the source tree. |
| 9 | */ |
| 10 | |
| 11 | #include "call/receive_time_calculator.h" |
| 12 | |
Yves Gerey | 3e70781 | 2018-11-28 15:47:49 | [diff] [blame] | 13 | #include <stdlib.h> |
Jonas Olsson | a4d8737 | 2019-07-05 17:08:33 | [diff] [blame] | 14 | |
Christoffer Rodbro | 76ad154 | 2018-10-12 09:15:09 | [diff] [blame] | 15 | #include <algorithm> |
Yves Gerey | 3e70781 | 2018-11-28 15:47:49 | [diff] [blame] | 16 | #include <cmath> |
| 17 | #include <cstdint> |
Christoffer Rodbro | 76ad154 | 2018-10-12 09:15:09 | [diff] [blame] | 18 | #include <vector> |
| 19 | |
Christoffer Rodbro | 76ad154 | 2018-10-12 09:15:09 | [diff] [blame] | 20 | #include "absl/types/optional.h" |
| 21 | #include "rtc_base/random.h" |
Steve Anton | 10542f2 | 2019-01-11 17:11:00 | [diff] [blame] | 22 | #include "rtc_base/time_utils.h" |
Sebastian Jansson | b34556e | 2018-03-21 13:38:32 | [diff] [blame] | 23 | #include "test/gtest.h" |
| 24 | |
| 25 | namespace webrtc { |
| 26 | namespace test { |
| 27 | namespace { |
| 28 | |
Christoffer Rodbro | 76ad154 | 2018-10-12 09:15:09 | [diff] [blame] | 29 | class EmulatedClock { |
| 30 | public: |
| 31 | explicit EmulatedClock(int seed, float drift = 0.0f) |
| 32 | : random_(seed), clock_us_(random_.Rand<uint32_t>()), drift_(drift) {} |
| 33 | virtual ~EmulatedClock() = default; |
| 34 | int64_t GetClockUs() const { return clock_us_; } |
| 35 | |
| 36 | protected: |
| 37 | int64_t UpdateClock(int64_t time_us) { |
| 38 | if (!last_query_us_) |
| 39 | last_query_us_ = time_us; |
| 40 | int64_t skip_us = time_us - *last_query_us_; |
| 41 | accumulated_drift_us_ += skip_us * drift_; |
| 42 | int64_t drift_correction_us = static_cast<int64_t>(accumulated_drift_us_); |
| 43 | accumulated_drift_us_ -= drift_correction_us; |
| 44 | clock_us_ += skip_us + drift_correction_us; |
| 45 | last_query_us_ = time_us; |
| 46 | return skip_us; |
| 47 | } |
| 48 | Random random_; |
| 49 | |
| 50 | private: |
| 51 | int64_t clock_us_; |
| 52 | absl::optional<int64_t> last_query_us_; |
| 53 | float drift_; |
| 54 | float accumulated_drift_us_ = 0; |
| 55 | }; |
| 56 | |
| 57 | class EmulatedMonotoneousClock : public EmulatedClock { |
| 58 | public: |
| 59 | explicit EmulatedMonotoneousClock(int seed) : EmulatedClock(seed) {} |
| 60 | ~EmulatedMonotoneousClock() = default; |
| 61 | |
| 62 | int64_t Query(int64_t time_us) { |
| 63 | int64_t skip_us = UpdateClock(time_us); |
| 64 | |
| 65 | // In a stall |
| 66 | if (stall_recovery_time_us_ > 0) { |
| 67 | if (GetClockUs() > stall_recovery_time_us_) { |
| 68 | stall_recovery_time_us_ = 0; |
| 69 | return GetClockUs(); |
| 70 | } else { |
| 71 | return stall_recovery_time_us_; |
| 72 | } |
| 73 | } |
| 74 | |
| 75 | // Check if we enter a stall |
| 76 | for (int k = 0; k < skip_us; ++k) { |
| 77 | if (random_.Rand<double>() < kChanceOfStallPerUs) { |
| 78 | int64_t stall_duration_us = |
| 79 | static_cast<int64_t>(random_.Rand<float>() * kMaxStallDurationUs); |
| 80 | stall_recovery_time_us_ = GetClockUs() + stall_duration_us; |
| 81 | return stall_recovery_time_us_; |
| 82 | } |
| 83 | } |
| 84 | return GetClockUs(); |
| 85 | } |
| 86 | |
| 87 | void ForceStallUs() { |
| 88 | int64_t stall_duration_us = |
| 89 | static_cast<int64_t>(random_.Rand<float>() * kMaxStallDurationUs); |
| 90 | stall_recovery_time_us_ = GetClockUs() + stall_duration_us; |
| 91 | } |
| 92 | |
| 93 | bool Stalled() const { return stall_recovery_time_us_ > 0; } |
| 94 | |
| 95 | int64_t GetRemainingStall(int64_t time_us) const { |
| 96 | return stall_recovery_time_us_ > 0 ? stall_recovery_time_us_ - GetClockUs() |
| 97 | : 0; |
| 98 | } |
| 99 | |
| 100 | const int64_t kMaxStallDurationUs = rtc::kNumMicrosecsPerSec; |
| 101 | |
| 102 | private: |
| 103 | const float kChanceOfStallPerUs = 5e-6f; |
| 104 | int64_t stall_recovery_time_us_ = 0; |
| 105 | }; |
| 106 | |
| 107 | class EmulatedNonMonotoneousClock : public EmulatedClock { |
| 108 | public: |
| 109 | EmulatedNonMonotoneousClock(int seed, int64_t duration_us, float drift = 0) |
| 110 | : EmulatedClock(seed, drift) { |
| 111 | Pregenerate(duration_us); |
| 112 | } |
| 113 | ~EmulatedNonMonotoneousClock() = default; |
| 114 | |
| 115 | void Pregenerate(int64_t duration_us) { |
| 116 | int64_t time_since_reset_us = kMinTimeBetweenResetsUs; |
| 117 | int64_t clock_offset_us = 0; |
| 118 | for (int64_t time_us = 0; time_us < duration_us; time_us += kResolutionUs) { |
| 119 | int64_t skip_us = UpdateClock(time_us); |
| 120 | time_since_reset_us += skip_us; |
| 121 | int64_t reset_us = 0; |
| 122 | if (time_since_reset_us >= kMinTimeBetweenResetsUs) { |
| 123 | for (int k = 0; k < skip_us; ++k) { |
| 124 | if (random_.Rand<double>() < kChanceOfResetPerUs) { |
| 125 | reset_us = static_cast<int64_t>(2 * random_.Rand<float>() * |
| 126 | kMaxAbsResetUs) - |
| 127 | kMaxAbsResetUs; |
| 128 | clock_offset_us += reset_us; |
| 129 | time_since_reset_us = 0; |
| 130 | break; |
| 131 | } |
| 132 | } |
| 133 | } |
| 134 | pregenerated_clock_.emplace_back(GetClockUs() + clock_offset_us); |
| 135 | resets_us_.emplace_back(reset_us); |
| 136 | } |
| 137 | } |
| 138 | |
| 139 | int64_t Query(int64_t time_us) { |
| 140 | size_t ixStart = |
| 141 | (last_reset_query_time_us_ + (kResolutionUs >> 1)) / kResolutionUs + 1; |
| 142 | size_t ixEnd = (time_us + (kResolutionUs >> 1)) / kResolutionUs; |
| 143 | if (ixEnd >= pregenerated_clock_.size()) |
| 144 | return -1; |
| 145 | last_reset_size_us_ = 0; |
| 146 | for (size_t ix = ixStart; ix <= ixEnd; ++ix) { |
| 147 | if (resets_us_[ix] != 0) { |
| 148 | last_reset_size_us_ = resets_us_[ix]; |
| 149 | } |
| 150 | } |
| 151 | last_reset_query_time_us_ = time_us; |
| 152 | return pregenerated_clock_[ixEnd]; |
| 153 | } |
| 154 | |
| 155 | bool WasReset() const { return last_reset_size_us_ != 0; } |
| 156 | bool WasNegativeReset() const { return last_reset_size_us_ < 0; } |
| 157 | int64_t GetLastResetUs() const { return last_reset_size_us_; } |
| 158 | |
| 159 | private: |
| 160 | const float kChanceOfResetPerUs = 1e-6f; |
| 161 | const int64_t kMaxAbsResetUs = rtc::kNumMicrosecsPerSec; |
| 162 | const int64_t kMinTimeBetweenResetsUs = 3 * rtc::kNumMicrosecsPerSec; |
| 163 | const int64_t kResolutionUs = rtc::kNumMicrosecsPerMillisec; |
| 164 | int64_t last_reset_query_time_us_ = 0; |
| 165 | int64_t last_reset_size_us_ = 0; |
| 166 | std::vector<int64_t> pregenerated_clock_; |
| 167 | std::vector<int64_t> resets_us_; |
| 168 | }; |
| 169 | |
| 170 | TEST(ClockRepair, NoClockDrift) { |
| 171 | const int kSeeds = 10; |
| 172 | const int kFirstSeed = 1; |
| 173 | const int64_t kRuntimeUs = 10 * rtc::kNumMicrosecsPerSec; |
| 174 | const float kDrift = 0.0f; |
| 175 | const int64_t kMaxPacketInterarrivalUs = 50 * rtc::kNumMicrosecsPerMillisec; |
| 176 | for (int seed = kFirstSeed; seed < kSeeds + kFirstSeed; ++seed) { |
| 177 | EmulatedMonotoneousClock monotone_clock(seed); |
| 178 | EmulatedNonMonotoneousClock non_monotone_clock( |
| 179 | seed + 1, kRuntimeUs + rtc::kNumMicrosecsPerSec, kDrift); |
| 180 | ReceiveTimeCalculator reception_time_tracker; |
| 181 | int64_t corrected_clock_0 = 0; |
| 182 | int64_t reset_during_stall_tol_us = 0; |
| 183 | bool initial_clock_stall = true; |
| 184 | int64_t accumulated_upper_bound_tolerance_us = 0; |
| 185 | int64_t accumulated_lower_bound_tolerance_us = 0; |
| 186 | Random random(1); |
| 187 | monotone_clock.ForceStallUs(); |
| 188 | int64_t last_time_us = 0; |
| 189 | bool add_tolerance_on_next_packet = false; |
| 190 | int64_t monotone_noise_us = 1000; |
| 191 | |
| 192 | for (int64_t time_us = 0; time_us < kRuntimeUs; |
| 193 | time_us += static_cast<int64_t>(random.Rand<float>() * |
| 194 | kMaxPacketInterarrivalUs)) { |
| 195 | int64_t socket_time_us = non_monotone_clock.Query(time_us); |
| 196 | int64_t monotone_us = monotone_clock.Query(time_us) + |
| 197 | 2 * random.Rand<float>() * monotone_noise_us - |
| 198 | monotone_noise_us; |
| 199 | int64_t system_time_us = non_monotone_clock.Query( |
| 200 | time_us + monotone_clock.GetRemainingStall(time_us)); |
| 201 | |
| 202 | int64_t corrected_clock_us = reception_time_tracker.ReconcileReceiveTimes( |
| 203 | socket_time_us, system_time_us, monotone_us); |
| 204 | if (time_us == 0) |
| 205 | corrected_clock_0 = corrected_clock_us; |
| 206 | |
| 207 | if (add_tolerance_on_next_packet) |
| 208 | accumulated_lower_bound_tolerance_us -= (time_us - last_time_us); |
| 209 | |
| 210 | // Perfect repair cannot be achiveved if non-monotone clock resets during |
| 211 | // a monotone clock stall. |
| 212 | add_tolerance_on_next_packet = false; |
| 213 | if (monotone_clock.Stalled() && non_monotone_clock.WasReset()) { |
| 214 | reset_during_stall_tol_us = |
| 215 | std::max(reset_during_stall_tol_us, time_us - last_time_us); |
| 216 | if (non_monotone_clock.WasNegativeReset()) { |
| 217 | add_tolerance_on_next_packet = true; |
| 218 | } |
| 219 | if (initial_clock_stall && !non_monotone_clock.WasNegativeReset()) { |
| 220 | // Positive resets during an initial clock stall cannot be repaired |
| 221 | // and error will propagate through rest of trace. |
| 222 | accumulated_upper_bound_tolerance_us += |
| 223 | std::abs(non_monotone_clock.GetLastResetUs()); |
| 224 | } |
| 225 | } else { |
| 226 | reset_during_stall_tol_us = 0; |
| 227 | initial_clock_stall = false; |
| 228 | } |
| 229 | int64_t err = corrected_clock_us - corrected_clock_0 - time_us; |
| 230 | |
| 231 | // Resets during stalls may lead to small errors temporarily. |
| 232 | int64_t lower_tol_us = accumulated_lower_bound_tolerance_us - |
| 233 | reset_during_stall_tol_us - monotone_noise_us - |
| 234 | 2 * rtc::kNumMicrosecsPerMillisec; |
| 235 | EXPECT_GE(err, lower_tol_us); |
| 236 | int64_t upper_tol_us = accumulated_upper_bound_tolerance_us + |
| 237 | monotone_noise_us + |
| 238 | 2 * rtc::kNumMicrosecsPerMillisec; |
| 239 | EXPECT_LE(err, upper_tol_us); |
| 240 | |
| 241 | last_time_us = time_us; |
| 242 | } |
| 243 | } |
Sebastian Jansson | b34556e | 2018-03-21 13:38:32 | [diff] [blame] | 244 | } |
| 245 | } // namespace |
Sebastian Jansson | b34556e | 2018-03-21 13:38:32 | [diff] [blame] | 246 | } // namespace test |
Sebastian Jansson | b34556e | 2018-03-21 13:38:32 | [diff] [blame] | 247 | } // namespace webrtc |