Replace DataSize and DataRate factories with newer versions

This is search and replace change:
find . -type f \( -name "*.h" -o -name "*.cc" \) | xargs sed -i -e "s/DataSize::Bytes<\(.*\)>()/DataSize::Bytes(\1)/g"
find . -type f \( -name "*.h" -o -name "*.cc" \) | xargs sed -i -e "s/DataSize::bytes/DataSize::Bytes/g"
find . -type f \( -name "*.h" -o -name "*.cc" \) | xargs sed -i -e "s/DataRate::BitsPerSec<\(.*\)>()/DataRate::BitsPerSec(\1)/g"
find . -type f \( -name "*.h" -o -name "*.cc" \) | xargs sed -i -e "s/DataRate::BytesPerSec<\(.*\)>()/DataRate::BytesPerSec(\1)/g"
find . -type f \( -name "*.h" -o -name "*.cc" \) | xargs sed -i -e "s/DataRate::KilobitsPerSec<\(.*\)>()/DataRate::KilobitsPerSec(\1)/g"
find . -type f \( -name "*.h" -o -name "*.cc" \) | xargs sed -i -e "s/DataRate::bps/DataRate::BitsPerSec/g"
find . -type f \( -name "*.h" -o -name "*.cc" \) | xargs sed -i -e "s/DataRate::kbps/DataRate::KilobitsPerSec/g"
git cl format

Bug: webrtc:9709
Change-Id: I65aaca69474ba038c1fe2dd8dc30d3f8e7b94c29
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/168647
Reviewed-by: Karl Wiberg <kwiberg@webrtc.org>
Commit-Queue: Danil Chapovalov <danilchap@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#30545}
diff --git a/call/bitrate_allocator_unittest.cc b/call/bitrate_allocator_unittest.cc
index e15f913..1479a47 100644
--- a/call/bitrate_allocator_unittest.cc
+++ b/call/bitrate_allocator_unittest.cc
@@ -30,19 +30,19 @@
                         uint32_t max_padding_rate_bps,
                         uint32_t max_allocatable_rate_bps) {
   return AllOf(Field(&BitrateAllocationLimits::min_allocatable_rate,
-                     DataRate::bps(min_allocatable_rate_bps)),
+                     DataRate::BitsPerSec(min_allocatable_rate_bps)),
                Field(&BitrateAllocationLimits::max_allocatable_rate,
-                     DataRate::bps(max_allocatable_rate_bps)),
+                     DataRate::BitsPerSec(max_allocatable_rate_bps)),
                Field(&BitrateAllocationLimits::max_padding_rate,
-                     DataRate::bps(max_padding_rate_bps)));
+                     DataRate::BitsPerSec(max_padding_rate_bps)));
 }
 
 auto AllocationLimitsEq(uint32_t min_allocatable_rate_bps,
                         uint32_t max_padding_rate_bps) {
   return AllOf(Field(&BitrateAllocationLimits::min_allocatable_rate,
-                     DataRate::bps(min_allocatable_rate_bps)),
+                     DataRate::BitsPerSec(min_allocatable_rate_bps)),
                Field(&BitrateAllocationLimits::max_padding_rate,
-                     DataRate::bps(max_padding_rate_bps)));
+                     DataRate::BitsPerSec(max_padding_rate_bps)));
 }
 
 class MockLimitObserver : public BitrateAllocator::LimitObserver {
@@ -89,7 +89,7 @@
   // The timestamp is just for log output, keeping it fixed just means fewer log
   // messages in the test.
   msg.at_time = Timestamp::Seconds(10000);
-  msg.target_rate = DataRate::bps(target_bitrate_bps);
+  msg.target_rate = DataRate::BitsPerSec(target_bitrate_bps);
   msg.stable_target_rate = msg.target_rate;
   msg.network_estimate.bandwidth = msg.target_rate;
   msg.network_estimate.loss_rate_ratio = fraction_loss / 255.0;