blob: f2e3d54f0c1b0154c3d18d90ee3f9910924685c1 [file] [log] [blame]
Sebastian Janssonb34556e2018-03-21 13:38:321/*
2 * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11#include "call/receive_time_calculator.h"
12
Yves Gerey3e707812018-11-28 15:47:4913#include <stdlib.h>
Jonas Olssona4d87372019-07-05 17:08:3314
Christoffer Rodbro76ad1542018-10-12 09:15:0915#include <algorithm>
Yves Gerey3e707812018-11-28 15:47:4916#include <cmath>
17#include <cstdint>
Christoffer Rodbro76ad1542018-10-12 09:15:0918#include <vector>
19
Christoffer Rodbro76ad1542018-10-12 09:15:0920#include "absl/types/optional.h"
21#include "rtc_base/random.h"
Steve Anton10542f22019-01-11 17:11:0022#include "rtc_base/time_utils.h"
Sebastian Janssonb34556e2018-03-21 13:38:3223#include "test/gtest.h"
Jonas Orelandc7f691a2022-03-09 14:12:0724#include "test/scoped_key_value_config.h"
Sebastian Janssonb34556e2018-03-21 13:38:3225
26namespace webrtc {
27namespace test {
28namespace {
29
Christoffer Rodbro76ad1542018-10-12 09:15:0930class EmulatedClock {
31 public:
32 explicit EmulatedClock(int seed, float drift = 0.0f)
33 : random_(seed), clock_us_(random_.Rand<uint32_t>()), drift_(drift) {}
34 virtual ~EmulatedClock() = default;
35 int64_t GetClockUs() const { return clock_us_; }
36
37 protected:
38 int64_t UpdateClock(int64_t time_us) {
39 if (!last_query_us_)
40 last_query_us_ = time_us;
41 int64_t skip_us = time_us - *last_query_us_;
42 accumulated_drift_us_ += skip_us * drift_;
43 int64_t drift_correction_us = static_cast<int64_t>(accumulated_drift_us_);
44 accumulated_drift_us_ -= drift_correction_us;
45 clock_us_ += skip_us + drift_correction_us;
46 last_query_us_ = time_us;
47 return skip_us;
48 }
49 Random random_;
50
51 private:
52 int64_t clock_us_;
53 absl::optional<int64_t> last_query_us_;
54 float drift_;
55 float accumulated_drift_us_ = 0;
56};
57
58class EmulatedMonotoneousClock : public EmulatedClock {
59 public:
60 explicit EmulatedMonotoneousClock(int seed) : EmulatedClock(seed) {}
61 ~EmulatedMonotoneousClock() = default;
62
63 int64_t Query(int64_t time_us) {
64 int64_t skip_us = UpdateClock(time_us);
65
66 // In a stall
67 if (stall_recovery_time_us_ > 0) {
68 if (GetClockUs() > stall_recovery_time_us_) {
69 stall_recovery_time_us_ = 0;
70 return GetClockUs();
71 } else {
72 return stall_recovery_time_us_;
73 }
74 }
75
76 // Check if we enter a stall
77 for (int k = 0; k < skip_us; ++k) {
78 if (random_.Rand<double>() < kChanceOfStallPerUs) {
79 int64_t stall_duration_us =
80 static_cast<int64_t>(random_.Rand<float>() * kMaxStallDurationUs);
81 stall_recovery_time_us_ = GetClockUs() + stall_duration_us;
82 return stall_recovery_time_us_;
83 }
84 }
85 return GetClockUs();
86 }
87
88 void ForceStallUs() {
89 int64_t stall_duration_us =
90 static_cast<int64_t>(random_.Rand<float>() * kMaxStallDurationUs);
91 stall_recovery_time_us_ = GetClockUs() + stall_duration_us;
92 }
93
94 bool Stalled() const { return stall_recovery_time_us_ > 0; }
95
96 int64_t GetRemainingStall(int64_t time_us) const {
97 return stall_recovery_time_us_ > 0 ? stall_recovery_time_us_ - GetClockUs()
98 : 0;
99 }
100
101 const int64_t kMaxStallDurationUs = rtc::kNumMicrosecsPerSec;
102
103 private:
104 const float kChanceOfStallPerUs = 5e-6f;
105 int64_t stall_recovery_time_us_ = 0;
106};
107
108class EmulatedNonMonotoneousClock : public EmulatedClock {
109 public:
110 EmulatedNonMonotoneousClock(int seed, int64_t duration_us, float drift = 0)
111 : EmulatedClock(seed, drift) {
112 Pregenerate(duration_us);
113 }
114 ~EmulatedNonMonotoneousClock() = default;
115
116 void Pregenerate(int64_t duration_us) {
117 int64_t time_since_reset_us = kMinTimeBetweenResetsUs;
118 int64_t clock_offset_us = 0;
119 for (int64_t time_us = 0; time_us < duration_us; time_us += kResolutionUs) {
120 int64_t skip_us = UpdateClock(time_us);
121 time_since_reset_us += skip_us;
122 int64_t reset_us = 0;
123 if (time_since_reset_us >= kMinTimeBetweenResetsUs) {
124 for (int k = 0; k < skip_us; ++k) {
125 if (random_.Rand<double>() < kChanceOfResetPerUs) {
126 reset_us = static_cast<int64_t>(2 * random_.Rand<float>() *
127 kMaxAbsResetUs) -
128 kMaxAbsResetUs;
129 clock_offset_us += reset_us;
130 time_since_reset_us = 0;
131 break;
132 }
133 }
134 }
135 pregenerated_clock_.emplace_back(GetClockUs() + clock_offset_us);
136 resets_us_.emplace_back(reset_us);
137 }
138 }
139
140 int64_t Query(int64_t time_us) {
141 size_t ixStart =
142 (last_reset_query_time_us_ + (kResolutionUs >> 1)) / kResolutionUs + 1;
143 size_t ixEnd = (time_us + (kResolutionUs >> 1)) / kResolutionUs;
144 if (ixEnd >= pregenerated_clock_.size())
145 return -1;
146 last_reset_size_us_ = 0;
147 for (size_t ix = ixStart; ix <= ixEnd; ++ix) {
148 if (resets_us_[ix] != 0) {
149 last_reset_size_us_ = resets_us_[ix];
150 }
151 }
152 last_reset_query_time_us_ = time_us;
153 return pregenerated_clock_[ixEnd];
154 }
155
156 bool WasReset() const { return last_reset_size_us_ != 0; }
157 bool WasNegativeReset() const { return last_reset_size_us_ < 0; }
158 int64_t GetLastResetUs() const { return last_reset_size_us_; }
159
160 private:
161 const float kChanceOfResetPerUs = 1e-6f;
162 const int64_t kMaxAbsResetUs = rtc::kNumMicrosecsPerSec;
163 const int64_t kMinTimeBetweenResetsUs = 3 * rtc::kNumMicrosecsPerSec;
164 const int64_t kResolutionUs = rtc::kNumMicrosecsPerMillisec;
165 int64_t last_reset_query_time_us_ = 0;
166 int64_t last_reset_size_us_ = 0;
167 std::vector<int64_t> pregenerated_clock_;
168 std::vector<int64_t> resets_us_;
169};
170
171TEST(ClockRepair, NoClockDrift) {
Jonas Orelandc7f691a2022-03-09 14:12:07172 webrtc::test::ScopedKeyValueConfig field_trials;
Christoffer Rodbro76ad1542018-10-12 09:15:09173 const int kSeeds = 10;
174 const int kFirstSeed = 1;
175 const int64_t kRuntimeUs = 10 * rtc::kNumMicrosecsPerSec;
176 const float kDrift = 0.0f;
177 const int64_t kMaxPacketInterarrivalUs = 50 * rtc::kNumMicrosecsPerMillisec;
178 for (int seed = kFirstSeed; seed < kSeeds + kFirstSeed; ++seed) {
179 EmulatedMonotoneousClock monotone_clock(seed);
180 EmulatedNonMonotoneousClock non_monotone_clock(
181 seed + 1, kRuntimeUs + rtc::kNumMicrosecsPerSec, kDrift);
Jonas Orelandc7f691a2022-03-09 14:12:07182 ReceiveTimeCalculator reception_time_tracker(field_trials);
Christoffer Rodbro76ad1542018-10-12 09:15:09183 int64_t corrected_clock_0 = 0;
184 int64_t reset_during_stall_tol_us = 0;
185 bool initial_clock_stall = true;
186 int64_t accumulated_upper_bound_tolerance_us = 0;
187 int64_t accumulated_lower_bound_tolerance_us = 0;
188 Random random(1);
189 monotone_clock.ForceStallUs();
190 int64_t last_time_us = 0;
191 bool add_tolerance_on_next_packet = false;
192 int64_t monotone_noise_us = 1000;
193
194 for (int64_t time_us = 0; time_us < kRuntimeUs;
195 time_us += static_cast<int64_t>(random.Rand<float>() *
196 kMaxPacketInterarrivalUs)) {
197 int64_t socket_time_us = non_monotone_clock.Query(time_us);
198 int64_t monotone_us = monotone_clock.Query(time_us) +
199 2 * random.Rand<float>() * monotone_noise_us -
200 monotone_noise_us;
201 int64_t system_time_us = non_monotone_clock.Query(
202 time_us + monotone_clock.GetRemainingStall(time_us));
203
204 int64_t corrected_clock_us = reception_time_tracker.ReconcileReceiveTimes(
205 socket_time_us, system_time_us, monotone_us);
206 if (time_us == 0)
207 corrected_clock_0 = corrected_clock_us;
208
209 if (add_tolerance_on_next_packet)
210 accumulated_lower_bound_tolerance_us -= (time_us - last_time_us);
211
212 // Perfect repair cannot be achiveved if non-monotone clock resets during
213 // a monotone clock stall.
214 add_tolerance_on_next_packet = false;
215 if (monotone_clock.Stalled() && non_monotone_clock.WasReset()) {
216 reset_during_stall_tol_us =
217 std::max(reset_during_stall_tol_us, time_us - last_time_us);
218 if (non_monotone_clock.WasNegativeReset()) {
219 add_tolerance_on_next_packet = true;
220 }
221 if (initial_clock_stall && !non_monotone_clock.WasNegativeReset()) {
222 // Positive resets during an initial clock stall cannot be repaired
223 // and error will propagate through rest of trace.
224 accumulated_upper_bound_tolerance_us +=
225 std::abs(non_monotone_clock.GetLastResetUs());
226 }
227 } else {
228 reset_during_stall_tol_us = 0;
229 initial_clock_stall = false;
230 }
231 int64_t err = corrected_clock_us - corrected_clock_0 - time_us;
232
233 // Resets during stalls may lead to small errors temporarily.
234 int64_t lower_tol_us = accumulated_lower_bound_tolerance_us -
235 reset_during_stall_tol_us - monotone_noise_us -
236 2 * rtc::kNumMicrosecsPerMillisec;
237 EXPECT_GE(err, lower_tol_us);
238 int64_t upper_tol_us = accumulated_upper_bound_tolerance_us +
239 monotone_noise_us +
240 2 * rtc::kNumMicrosecsPerMillisec;
241 EXPECT_LE(err, upper_tol_us);
242
243 last_time_us = time_us;
244 }
245 }
Sebastian Janssonb34556e2018-03-21 13:38:32246}
247} // namespace
Sebastian Janssonb34556e2018-03-21 13:38:32248} // namespace test
Sebastian Janssonb34556e2018-03-21 13:38:32249} // namespace webrtc