Use webrtc name specifier instead of rtc/cricket in modules/video_coding
WebRTC has unified all namespaces to webrtc, and the rtc:: and cricket::
name specifiers need to be replaced with webrtc::. This was generated using
a combination of clang AST rewriting tools and sed.
This CL was uploaded by git cl split.
Bug: webrtc:42232595
Change-Id: Id51a855f7b76c7423d3422fcd4eca41964444d79
No-Iwyu: LSC
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/386663
Auto-Submit: Evan Shrubsole <eshr@webrtc.org>
Reviewed-by: Ilya Nikolaevskiy <ilnik@webrtc.org>
Commit-Queue: Ilya Nikolaevskiy <ilnik@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#44412}
diff --git a/modules/video_capture/linux/device_info_pipewire.h b/modules/video_capture/linux/device_info_pipewire.h
index 4da0c7a..0187e47 100644
--- a/modules/video_capture/linux/device_info_pipewire.h
+++ b/modules/video_capture/linux/device_info_pipewire.h
@@ -44,7 +44,7 @@
int32_t Init() override;
private:
- rtc::scoped_refptr<PipeWireSession> pipewire_session_;
+ webrtc::scoped_refptr<PipeWireSession> pipewire_session_;
};
} // namespace videocapturemodule
} // namespace webrtc
diff --git a/modules/video_capture/linux/pipewire_session.cc b/modules/video_capture/linux/pipewire_session.cc
index a5c5682..990bfde 100644
--- a/modules/video_capture/linux/pipewire_session.cc
+++ b/modules/video_capture/linux/pipewire_session.cc
@@ -103,8 +103,8 @@
vid_str = spa_dict_lookup(info->props, SPA_KEY_DEVICE_VENDOR_ID);
pid_str = spa_dict_lookup(info->props, SPA_KEY_DEVICE_PRODUCT_ID);
- vid = vid_str ? rtc::StringToNumber<int>(vid_str) : std::nullopt;
- pid = pid_str ? rtc::StringToNumber<int>(pid_str) : std::nullopt;
+ vid = vid_str ? webrtc::StringToNumber<int>(vid_str) : std::nullopt;
+ pid = pid_str ? webrtc::StringToNumber<int>(pid_str) : std::nullopt;
if (vid && pid) {
char model_str[10];
diff --git a/modules/video_capture/linux/pipewire_session.h b/modules/video_capture/linux/pipewire_session.h
index 5420a7f..aec268e 100644
--- a/modules/video_capture/linux/pipewire_session.h
+++ b/modules/video_capture/linux/pipewire_session.h
@@ -89,7 +89,7 @@
PipeWireSession* session_;
};
-class PipeWireSession : public rtc::RefCountedNonVirtual<PipeWireSession> {
+class PipeWireSession : public webrtc::RefCountedNonVirtual<PipeWireSession> {
public:
PipeWireSession();
~PipeWireSession();
diff --git a/modules/video_capture/linux/video_capture_linux.cc b/modules/video_capture/linux/video_capture_linux.cc
index a2ea218..0b57cfe 100644
--- a/modules/video_capture/linux/video_capture_linux.cc
+++ b/modules/video_capture/linux/video_capture_linux.cc
@@ -34,9 +34,9 @@
namespace webrtc {
namespace videocapturemodule {
-rtc::scoped_refptr<VideoCaptureModule> VideoCaptureImpl::Create(
+scoped_refptr<VideoCaptureModule> VideoCaptureImpl::Create(
const char* deviceUniqueId) {
- auto implementation = rtc::make_ref_counted<VideoCaptureModuleV4L2>();
+ auto implementation = make_ref_counted<VideoCaptureModuleV4L2>();
if (implementation->Init(deviceUniqueId) != 0)
return nullptr;
@@ -44,20 +44,20 @@
return implementation;
}
-rtc::scoped_refptr<VideoCaptureModule> VideoCaptureImpl::Create(
+scoped_refptr<VideoCaptureModule> VideoCaptureImpl::Create(
VideoCaptureOptions* options,
const char* deviceUniqueId) {
#if defined(WEBRTC_USE_PIPEWIRE)
if (options->allow_pipewire()) {
auto implementation =
- rtc::make_ref_counted<VideoCaptureModulePipeWire>(options);
+ webrtc::make_ref_counted<VideoCaptureModulePipeWire>(options);
if (implementation->Init(deviceUniqueId) == 0)
return implementation;
}
#endif
if (options->allow_v4l2()) {
- auto implementation = rtc::make_ref_counted<VideoCaptureModuleV4L2>();
+ auto implementation = make_ref_counted<VideoCaptureModuleV4L2>();
if (implementation->Init(deviceUniqueId) == 0)
return implementation;
diff --git a/modules/video_capture/linux/video_capture_pipewire.h b/modules/video_capture/linux/video_capture_pipewire.h
index 789f203..240e7da 100644
--- a/modules/video_capture/linux/video_capture_pipewire.h
+++ b/modules/video_capture/linux/video_capture_pipewire.h
@@ -44,7 +44,7 @@
void OnFormatChanged(const struct spa_pod* format);
void ProcessBuffers();
- const rtc::scoped_refptr<PipeWireSession> session_
+ const webrtc::scoped_refptr<PipeWireSession> session_
RTC_GUARDED_BY(api_checker_);
bool initialized_ RTC_GUARDED_BY(api_checker_);
bool started_ RTC_GUARDED_BY(api_lock_);
diff --git a/modules/video_capture/test/video_capture_unittest.cc b/modules/video_capture/test/video_capture_unittest.cc
index 7873dee..2019f0b 100644
--- a/modules/video_capture/test/video_capture_unittest.cc
+++ b/modules/video_capture/test/video_capture_unittest.cc
@@ -130,7 +130,7 @@
int64_t last_render_time_ms_;
int incoming_frames_;
int timing_warnings_;
- rtc::scoped_refptr<webrtc::VideoFrameBuffer> last_frame_;
+ webrtc::scoped_refptr<webrtc::VideoFrameBuffer> last_frame_;
webrtc::VideoRotation rotate_frame_;
};
@@ -145,7 +145,7 @@
ASSERT_GT(number_of_devices_, 0u);
}
- rtc::scoped_refptr<VideoCaptureModule> OpenVideoCaptureDevice(
+ webrtc::scoped_refptr<VideoCaptureModule> OpenVideoCaptureDevice(
unsigned int device,
webrtc::VideoSinkInterface<webrtc::VideoFrame>* callback) {
char device_name[256];
@@ -154,7 +154,7 @@
EXPECT_EQ(0, device_info_->GetDeviceName(device, device_name, 256,
unique_name, 256));
- rtc::scoped_refptr<VideoCaptureModule> module(
+ webrtc::scoped_refptr<VideoCaptureModule> module(
VideoCaptureFactory::Create(unique_name));
if (module.get() == NULL)
return nullptr;
@@ -191,7 +191,7 @@
for (int i = 0; i < 5; ++i) {
int64_t start_time = webrtc::TimeMillis();
TestVideoCaptureCallback capture_observer;
- rtc::scoped_refptr<VideoCaptureModule> module(
+ webrtc::scoped_refptr<VideoCaptureModule> module(
OpenVideoCaptureDevice(0, &capture_observer));
ASSERT_TRUE(module.get() != NULL);
@@ -235,7 +235,7 @@
TEST_F(VideoCaptureTest, MAYBE_Capabilities) {
TestVideoCaptureCallback capture_observer;
- rtc::scoped_refptr<VideoCaptureModule> module(
+ webrtc::scoped_refptr<VideoCaptureModule> module(
OpenVideoCaptureDevice(0, &capture_observer));
ASSERT_TRUE(module.get() != NULL);
@@ -299,7 +299,7 @@
}
TestVideoCaptureCallback capture_observer1;
- rtc::scoped_refptr<VideoCaptureModule> module1(
+ webrtc::scoped_refptr<VideoCaptureModule> module1(
OpenVideoCaptureDevice(0, &capture_observer1));
ASSERT_TRUE(module1.get() != NULL);
VideoCaptureCapability capability1;
@@ -314,7 +314,7 @@
capture_observer1.SetExpectedCapability(capability1);
TestVideoCaptureCallback capture_observer2;
- rtc::scoped_refptr<VideoCaptureModule> module2(
+ webrtc::scoped_refptr<VideoCaptureModule> module2(
OpenVideoCaptureDevice(1, &capture_observer2));
ASSERT_TRUE(module1.get() != NULL);
@@ -351,7 +351,7 @@
#endif
TEST_F(VideoCaptureTest, MAYBE_ConcurrentAccess) {
TestVideoCaptureCallback capture_observer1;
- rtc::scoped_refptr<VideoCaptureModule> module1(
+ webrtc::scoped_refptr<VideoCaptureModule> module1(
OpenVideoCaptureDevice(0, &capture_observer1));
ASSERT_TRUE(module1.get() != NULL);
VideoCaptureCapability capability;
@@ -359,7 +359,7 @@
capture_observer1.SetExpectedCapability(capability);
TestVideoCaptureCallback capture_observer2;
- rtc::scoped_refptr<VideoCaptureModule> module2(
+ webrtc::scoped_refptr<VideoCaptureModule> module2(
OpenVideoCaptureDevice(0, &capture_observer2));
ASSERT_TRUE(module2.get() != NULL);
capture_observer2.SetExpectedCapability(capability);
diff --git a/modules/video_capture/video_capture_factory.cc b/modules/video_capture/video_capture_factory.cc
index e4afdb4..8354067 100644
--- a/modules/video_capture/video_capture_factory.cc
+++ b/modules/video_capture/video_capture_factory.cc
@@ -14,7 +14,7 @@
namespace webrtc {
-rtc::scoped_refptr<VideoCaptureModule> VideoCaptureFactory::Create(
+scoped_refptr<VideoCaptureModule> VideoCaptureFactory::Create(
[[maybe_unused]] const char* deviceUniqueIdUTF8) {
#if defined(WEBRTC_ANDROID) || defined(WEBRTC_MAC)
return nullptr;
@@ -23,7 +23,7 @@
#endif
}
-rtc::scoped_refptr<VideoCaptureModule> VideoCaptureFactory::Create(
+scoped_refptr<VideoCaptureModule> VideoCaptureFactory::Create(
[[maybe_unused]] VideoCaptureOptions* options,
[[maybe_unused]] const char* deviceUniqueIdUTF8) {
// This is only implemented on pure Linux and WEBRTC_LINUX is defined for
diff --git a/modules/video_capture/video_capture_factory.h b/modules/video_capture/video_capture_factory.h
index 62b4067..c24eaf5 100644
--- a/modules/video_capture/video_capture_factory.h
+++ b/modules/video_capture/video_capture_factory.h
@@ -29,9 +29,9 @@
// id - unique identifier of this video capture module object.
// deviceUniqueIdUTF8 - name of the device.
// Available names can be found by using GetDeviceName
- static rtc::scoped_refptr<VideoCaptureModule> Create(
+ static scoped_refptr<VideoCaptureModule> Create(
const char* deviceUniqueIdUTF8);
- static rtc::scoped_refptr<VideoCaptureModule> Create(
+ static scoped_refptr<VideoCaptureModule> Create(
VideoCaptureOptions* options,
const char* deviceUniqueIdUTF8);
diff --git a/modules/video_capture/video_capture_factory_null.cc b/modules/video_capture/video_capture_factory_null.cc
index 7808d19..77b7f4b 100644
--- a/modules/video_capture/video_capture_factory_null.cc
+++ b/modules/video_capture/video_capture_factory_null.cc
@@ -18,7 +18,7 @@
return nullptr;
}
-rtc::scoped_refptr<VideoCaptureModule> VideoCaptureImpl::Create(
+webrtc::scoped_refptr<VideoCaptureModule> VideoCaptureImpl::Create(
const char* device_id) {
return nullptr;
}
diff --git a/modules/video_capture/video_capture_impl.cc b/modules/video_capture/video_capture_impl.cc
index ef2df2b..06f9bd316 100644
--- a/modules/video_capture/video_capture_impl.cc
+++ b/modules/video_capture/video_capture_impl.cc
@@ -186,7 +186,7 @@
// Setting absolute height (in case it was negative).
// In Windows, the image starts bottom left, instead of top left.
// Setting a negative source height, inverts the image (within LibYuv).
- rtc::scoped_refptr<I420Buffer> buffer = I420Buffer::Create(
+ scoped_refptr<I420Buffer> buffer = I420Buffer::Create(
target_width, target_height, stride_y, stride_uv, stride_uv);
libyuv::RotationMode rotation_mode = libyuv::kRotate0;
diff --git a/modules/video_capture/video_capture_impl.h b/modules/video_capture/video_capture_impl.h
index bb4fa16..3ec832f 100644
--- a/modules/video_capture/video_capture_impl.h
+++ b/modules/video_capture/video_capture_impl.h
@@ -45,9 +45,9 @@
* deviceUniqueIdUTF8 - name of the device. Available names can be found by
* using GetDeviceName
*/
- static rtc::scoped_refptr<VideoCaptureModule> Create(
+ static scoped_refptr<VideoCaptureModule> Create(
const char* deviceUniqueIdUTF8);
- static rtc::scoped_refptr<VideoCaptureModule> Create(
+ static scoped_refptr<VideoCaptureModule> Create(
VideoCaptureOptions* options,
const char* deviceUniqueIdUTF8);
diff --git a/modules/video_capture/video_capture_options.cc b/modules/video_capture/video_capture_options.cc
index 203d0a6..64cd621 100644
--- a/modules/video_capture/video_capture_options.cc
+++ b/modules/video_capture/video_capture_options.cc
@@ -32,7 +32,7 @@
#if defined(WEBRTC_USE_PIPEWIRE)
if (allow_pipewire_) {
pipewire_session_ =
- rtc::make_ref_counted<videocapturemodule::PipeWireSession>();
+ webrtc::make_ref_counted<videocapturemodule::PipeWireSession>();
pipewire_session_->Init(callback, pipewire_fd_);
return;
}
@@ -46,7 +46,7 @@
}
#if defined(WEBRTC_USE_PIPEWIRE)
-rtc::scoped_refptr<videocapturemodule::PipeWireSession>
+webrtc::scoped_refptr<videocapturemodule::PipeWireSession>
VideoCaptureOptions::pipewire_session() {
return pipewire_session_;
}
diff --git a/modules/video_capture/video_capture_options.h b/modules/video_capture/video_capture_options.h
index 6f72f79..d1d4ebe 100644
--- a/modules/video_capture/video_capture_options.h
+++ b/modules/video_capture/video_capture_options.h
@@ -64,7 +64,7 @@
bool allow_pipewire() const { return allow_pipewire_; }
void set_allow_pipewire(bool allow) { allow_pipewire_ = allow; }
void set_pipewire_fd(int fd) { pipewire_fd_ = fd; }
- rtc::scoped_refptr<videocapturemodule::PipeWireSession> pipewire_session();
+ webrtc::scoped_refptr<videocapturemodule::PipeWireSession> pipewire_session();
#endif
private:
@@ -74,7 +74,7 @@
#if defined(WEBRTC_USE_PIPEWIRE)
bool allow_pipewire_ = false;
int pipewire_fd_ = kInvalidPipeWireFd;
- rtc::scoped_refptr<videocapturemodule::PipeWireSession> pipewire_session_;
+ webrtc::scoped_refptr<videocapturemodule::PipeWireSession> pipewire_session_;
#endif
};
diff --git a/modules/video_capture/windows/device_info_ds.cc b/modules/video_capture/windows/device_info_ds.cc
index d0d274c..c4bef2d 100644
--- a/modules/video_capture/windows/device_info_ds.cc
+++ b/modules/video_capture/windows/device_info_ds.cc
@@ -74,7 +74,7 @@
RTC_DLOG(LS_INFO) << __FUNCTION__
<< ": CoInitializeEx(NULL, COINIT_APARTMENTTHREADED)"
" => RPC_E_CHANGED_MODE, error 0x"
- << rtc::ToHex(hr);
+ << webrtc::ToHex(hr);
}
}
}
@@ -92,7 +92,7 @@
IID_ICreateDevEnum, (void**)&_dsDevEnum);
if (hr != NOERROR) {
RTC_LOG(LS_INFO) << "Failed to create CLSID_SystemDeviceEnum, error 0x"
- << rtc::ToHex(hr);
+ << webrtc::ToHex(hr);
return -1;
}
return 0;
@@ -131,7 +131,7 @@
&_dsMonikerDevEnum, 0);
if (hr != NOERROR) {
RTC_LOG(LS_INFO) << "Failed to enumerate CLSID_SystemDeviceEnum, error 0x"
- << rtc::ToHex(hr) << ". No webcam exist?";
+ << webrtc::ToHex(hr) << ". No webcam exist?";
return 0;
}
@@ -223,7 +223,7 @@
&_dsMonikerDevEnum, 0);
if (hr != NOERROR) {
RTC_LOG(LS_INFO) << "Failed to enumerate CLSID_SystemDeviceEnum, error 0x"
- << rtc::ToHex(hr) << ". No webcam exist?";
+ << webrtc::ToHex(hr) << ". No webcam exist?";
return 0;
}
_dsMonikerDevEnum->Reset();
diff --git a/modules/video_capture/windows/help_functions_ds.h b/modules/video_capture/windows/help_functions_ds.h
index a16f953..aaec288 100644
--- a/modules/video_capture/windows/help_functions_ds.h
+++ b/modules/video_capture/windows/help_functions_ds.h
@@ -62,14 +62,14 @@
HRESULT CopyMediaType(AM_MEDIA_TYPE* target, const AM_MEDIA_TYPE* source);
// Helper function to make using scoped_refptr with COM interface pointers
-// a little less awkward. rtc::scoped_refptr doesn't support the & operator
+// a little less awkward. webrtc::scoped_refptr doesn't support the & operator
// or a way to receive values via an out ptr.
// The function is intentionally not called QueryInterface to make things less
// confusing for the compiler to figure out what the caller wants to do when
// called from within the context of a class that also implements COM
// interfaces.
template <class T>
-HRESULT GetComInterface(IUnknown* object, rtc::scoped_refptr<T>* ptr) {
+HRESULT GetComInterface(IUnknown* object, webrtc::scoped_refptr<T>* ptr) {
// This helper function is not meant to magically free ptr. If we do that
// we add code bloat to most places where it's not needed and make the code
// less readable since it's not clear at the call site that the pointer
diff --git a/modules/video_capture/windows/sink_filter_ds.cc b/modules/video_capture/windows/sink_filter_ds.cc
index 0130e66..33e7a26 100644
--- a/modules/video_capture/windows/sink_filter_ds.cc
+++ b/modules/video_capture/windows/sink_filter_ds.cc
@@ -92,7 +92,7 @@
return S_OK;
}
- rtc::scoped_refptr<IPin> pin_;
+ webrtc::scoped_refptr<IPin> pin_;
int pos_ = 0;
};
@@ -143,7 +143,7 @@
}
void GetSampleProperties(IMediaSample* sample, AM_SAMPLE2_PROPERTIES* props) {
- rtc::scoped_refptr<IMediaSample2> sample2;
+ webrtc::scoped_refptr<IMediaSample2> sample2;
if (SUCCEEDED(GetComInterface(sample, &sample2))) {
sample2->GetProperties(sizeof(*props), reinterpret_cast<BYTE*>(props));
return;
@@ -197,7 +197,7 @@
RTC_LOG(LS_INFO) << "TranslateMediaTypeToVideoCaptureCapability width:"
<< bih->biWidth << " height:" << bih->biHeight
- << " Compression:0x" << rtc::ToHex(bih->biCompression);
+ << " Compression:0x" << webrtc::ToHex(bih->biCompression);
const GUID& sub_type = media_type->subtype;
if (sub_type == MEDIASUBTYPE_MJPG &&
@@ -745,7 +745,7 @@
if (!capture_thread_id_) {
// Make sure we set the thread name only once.
capture_thread_id_ = GetCurrentThreadId();
- rtc::SetCurrentThreadName("webrtc_video_capture");
+ webrtc::SetCurrentThreadName("webrtc_video_capture");
}
AM_SAMPLE2_PROPERTIES sample_props = {};
@@ -900,7 +900,7 @@
if (info_.pGraph) {
// make sure we don't hold on to the reference we may receive.
// Note that this assumes the same object identity, but so be it.
- rtc::scoped_refptr<IMediaEventSink> sink;
+ webrtc::scoped_refptr<IMediaEventSink> sink;
GetComInterface(info_.pGraph, &sink);
sink_ = sink.get();
}
diff --git a/modules/video_capture/windows/sink_filter_ds.h b/modules/video_capture/windows/sink_filter_ds.h
index b0fabda..4cc5670 100644
--- a/modules/video_capture/windows/sink_filter_ds.h
+++ b/modules/video_capture/windows/sink_filter_ds.h
@@ -97,8 +97,8 @@
// running), otherwise accessed on the capture thread.
VideoCaptureCapability resulting_capability_;
DWORD capture_thread_id_ = 0;
- rtc::scoped_refptr<IMemAllocator> allocator_ RTC_GUARDED_BY(main_checker_);
- rtc::scoped_refptr<IPin> receive_pin_ RTC_GUARDED_BY(main_checker_);
+ webrtc::scoped_refptr<IMemAllocator> allocator_ RTC_GUARDED_BY(main_checker_);
+ webrtc::scoped_refptr<IPin> receive_pin_ RTC_GUARDED_BY(main_checker_);
std::atomic_bool flushing_{false};
std::atomic_bool runtime_error_{false};
// Holds a referenceless pointer to the owning filter, the name and
@@ -148,7 +148,7 @@
private:
SequenceChecker main_checker_;
- const rtc::scoped_refptr<ComRefCount<CaptureInputPin>> input_pin_;
+ const webrtc::scoped_refptr<ComRefCount<CaptureInputPin>> input_pin_;
VideoCaptureImpl* const capture_observer_;
FILTER_INFO info_ RTC_GUARDED_BY(main_checker_) = {};
// Set/cleared in JoinFilterGraph. The filter must be stopped (no capture)
diff --git a/modules/video_capture/windows/video_capture_ds.h b/modules/video_capture/windows/video_capture_ds.h
index d689715..5157115 100644
--- a/modules/video_capture/windows/video_capture_ds.h
+++ b/modules/video_capture/windows/video_capture_ds.h
@@ -60,7 +60,7 @@
IBaseFilter* _captureFilter RTC_GUARDED_BY(api_checker_);
IGraphBuilder* _graphBuilder RTC_GUARDED_BY(api_checker_);
IMediaControl* _mediaControl RTC_GUARDED_BY(api_checker_);
- rtc::scoped_refptr<CaptureSinkFilter> sink_filter_
+ webrtc::scoped_refptr<CaptureSinkFilter> sink_filter_
RTC_GUARDED_BY(api_checker_);
IPin* _inputSendPin RTC_GUARDED_BY(api_checker_);
IPin* _outputCapturePin RTC_GUARDED_BY(api_checker_);
diff --git a/modules/video_capture/windows/video_capture_factory_windows.cc b/modules/video_capture/windows/video_capture_factory_windows.cc
index 481326c..8cf328e 100644
--- a/modules/video_capture/windows/video_capture_factory_windows.cc
+++ b/modules/video_capture/windows/video_capture_factory_windows.cc
@@ -20,13 +20,13 @@
return DeviceInfoDS::Create();
}
-rtc::scoped_refptr<VideoCaptureModule> VideoCaptureImpl::Create(
+webrtc::scoped_refptr<VideoCaptureModule> VideoCaptureImpl::Create(
const char* device_id) {
if (device_id == nullptr)
return nullptr;
// TODO(tommi): Use Media Foundation implementation for Vista and up.
- auto capture = rtc::make_ref_counted<VideoCaptureDS>();
+ auto capture = webrtc::make_ref_counted<VideoCaptureDS>();
if (capture->Init(device_id) != 0) {
return nullptr;
}
diff --git a/modules/video_coding/codecs/av1/dav1d_decoder.cc b/modules/video_coding/codecs/av1/dav1d_decoder.cc
index e8a7354..a9384b0 100644
--- a/modules/video_coding/codecs/av1/dav1d_decoder.cc
+++ b/modules/video_coding/codecs/av1/dav1d_decoder.cc
@@ -68,13 +68,12 @@
Dav1dData data_ = {};
};
-class ScopedDav1dPicture
- : public rtc::RefCountedNonVirtual<ScopedDav1dPicture> {
+class ScopedDav1dPicture : public RefCountedNonVirtual<ScopedDav1dPicture> {
public:
~ScopedDav1dPicture() { dav1d_picture_unref(&picture_); }
Dav1dPicture& Picture() { return picture_; }
- using rtc::RefCountedNonVirtual<ScopedDav1dPicture>::HasOneRef;
+ using RefCountedNonVirtual<ScopedDav1dPicture>::HasOneRef;
private:
Dav1dPicture picture_ = {};
@@ -153,7 +152,7 @@
return WEBRTC_VIDEO_CODEC_ERROR;
}
- rtc::scoped_refptr<ScopedDav1dPicture> scoped_dav1d_picture(
+ scoped_refptr<ScopedDav1dPicture> scoped_dav1d_picture(
new ScopedDav1dPicture{});
Dav1dPicture& dav1d_picture = scoped_dav1d_picture->Picture();
if (int get_picture_res = dav1d_get_picture(context_, &dav1d_picture)) {
@@ -187,7 +186,7 @@
}
}
- rtc::scoped_refptr<VideoFrameBuffer> wrapped_buffer;
+ scoped_refptr<VideoFrameBuffer> wrapped_buffer;
if (dav1d_picture.p.layout == DAV1D_PIXEL_LAYOUT_I420) {
wrapped_buffer = WrapI420Buffer(
width, height, static_cast<uint8_t*>(dav1d_picture.data[0]),
diff --git a/modules/video_coding/codecs/av1/dav1d_decoder_unittest.cc b/modules/video_coding/codecs/av1/dav1d_decoder_unittest.cc
index 5c2d33a..f2ad3a5 100644
--- a/modules/video_coding/codecs/av1/dav1d_decoder_unittest.cc
+++ b/modules/video_coding/codecs/av1/dav1d_decoder_unittest.cc
@@ -39,7 +39,7 @@
0x20, 0x03, 0xe0, 0x01, 0xf2, 0xb0, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
0x00, 0xf2, 0x44, 0xd6, 0xa5, 0x3b, 0x7c, 0x8b, 0x7c, 0x8c, 0x6b, 0x9a};
-EncodedImage CreateEncodedImage(rtc::ArrayView<const uint8_t> data) {
+EncodedImage CreateEncodedImage(ArrayView<const uint8_t> data) {
EncodedImage image;
image.SetEncodedData(EncodedImageBuffer::Create(data.data(), data.size()));
return image;
diff --git a/modules/video_coding/codecs/av1/libaom_av1_encoder.cc b/modules/video_coding/codecs/av1/libaom_av1_encoder.cc
index 7f17d9c..d0c513e 100644
--- a/modules/video_coding/codecs/av1/libaom_av1_encoder.cc
+++ b/modules/video_coding/codecs/av1/libaom_av1_encoder.cc
@@ -574,11 +574,11 @@
return WEBRTC_VIDEO_CODEC_ERROR;
}
- rtc::scoped_refptr<VideoFrameBuffer> buffer = frame.video_frame_buffer();
+ scoped_refptr<VideoFrameBuffer> buffer = frame.video_frame_buffer();
absl::InlinedVector<VideoFrameBuffer::Type, kMaxPreferredPixelFormats>
supported_formats = {VideoFrameBuffer::Type::kI420,
VideoFrameBuffer::Type::kNV12};
- rtc::scoped_refptr<VideoFrameBuffer> mapped_buffer;
+ scoped_refptr<VideoFrameBuffer> mapped_buffer;
if (buffer->type() != VideoFrameBuffer::Type::kNative) {
// `buffer` is already mapped.
mapped_buffer = buffer;
@@ -592,7 +592,7 @@
(absl::c_find(supported_formats, mapped_buffer->type()) ==
supported_formats.end() &&
mapped_buffer->type() != VideoFrameBuffer::Type::kI420A)) {
- rtc::scoped_refptr<I420BufferInterface> converted_buffer(buffer->ToI420());
+ scoped_refptr<I420BufferInterface> converted_buffer(buffer->ToI420());
if (!converted_buffer) {
RTC_LOG(LS_ERROR) << "Failed to convert "
<< VideoFrameBufferTypeToString(
diff --git a/modules/video_coding/codecs/h264/h264_decoder_impl.cc b/modules/video_coding/codecs/h264/h264_decoder_impl.cc
index 2ebb4cb..f3737a1 100644
--- a/modules/video_coding/codecs/h264/h264_decoder_impl.cc
+++ b/modules/video_coding/codecs/h264/h264_decoder_impl.cc
@@ -119,13 +119,13 @@
// http://crbug.com/390941. Our pool is set up to zero-initialize new buffers.
// TODO(https://crbug.com/390941): Delete that feature from the video pool,
// instead add an explicit call to InitializeData here.
- rtc::scoped_refptr<PlanarYuvBuffer> frame_buffer;
- rtc::scoped_refptr<I444Buffer> i444_buffer;
- rtc::scoped_refptr<I420Buffer> i420_buffer;
- rtc::scoped_refptr<I422Buffer> i422_buffer;
- rtc::scoped_refptr<I010Buffer> i010_buffer;
- rtc::scoped_refptr<I210Buffer> i210_buffer;
- rtc::scoped_refptr<I410Buffer> i410_buffer;
+ webrtc::scoped_refptr<PlanarYuvBuffer> frame_buffer;
+ webrtc::scoped_refptr<I444Buffer> i444_buffer;
+ webrtc::scoped_refptr<I420Buffer> i420_buffer;
+ webrtc::scoped_refptr<I422Buffer> i422_buffer;
+ webrtc::scoped_refptr<I010Buffer> i010_buffer;
+ webrtc::scoped_refptr<I210Buffer> i210_buffer;
+ webrtc::scoped_refptr<I410Buffer> i410_buffer;
int bytes_per_pixel = 1;
switch (context->pix_fmt) {
case AV_PIX_FMT_YUV420P:
@@ -405,7 +405,7 @@
VideoFrame* input_frame =
static_cast<VideoFrame*>(av_buffer_get_opaque(av_frame_->buf[0]));
RTC_DCHECK(input_frame);
- rtc::scoped_refptr<VideoFrameBuffer> frame_buffer =
+ webrtc::scoped_refptr<VideoFrameBuffer> frame_buffer =
input_frame->video_frame_buffer();
// Instantiate Planar YUV buffer according to video frame buffer type
@@ -530,7 +530,7 @@
return WEBRTC_VIDEO_CODEC_ERROR;
}
- rtc::scoped_refptr<webrtc::VideoFrameBuffer> cropped_buffer;
+ webrtc::scoped_refptr<webrtc::VideoFrameBuffer> cropped_buffer;
switch (video_frame_buffer_type) {
case VideoFrameBuffer::Type::kI420:
cropped_buffer = WrapI420Buffer(
diff --git a/modules/video_coding/codecs/h264/h264_encoder_impl.cc b/modules/video_coding/codecs/h264/h264_encoder_impl.cc
index a3ced2d..7792d53 100644
--- a/modules/video_coding/codecs/h264/h264_encoder_impl.cc
+++ b/modules/video_coding/codecs/h264/h264_encoder_impl.cc
@@ -420,7 +420,7 @@
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
- rtc::scoped_refptr<I420BufferInterface> frame_buffer =
+ webrtc::scoped_refptr<I420BufferInterface> frame_buffer =
input_frame.video_frame_buffer()->ToI420();
if (!frame_buffer) {
RTC_LOG(LS_ERROR) << "Failed to convert "
diff --git a/modules/video_coding/codecs/h264/h264_encoder_impl.h b/modules/video_coding/codecs/h264/h264_encoder_impl.h
index f04b649..2d4a953 100644
--- a/modules/video_coding/codecs/h264/h264_encoder_impl.h
+++ b/modules/video_coding/codecs/h264/h264_encoder_impl.h
@@ -99,7 +99,7 @@
std::vector<ISVCEncoder*> encoders_;
std::vector<SSourcePicture> pictures_;
- std::vector<rtc::scoped_refptr<I420Buffer>> downscaled_buffers_;
+ std::vector<webrtc::scoped_refptr<I420Buffer>> downscaled_buffers_;
std::vector<LayerConfig> configurations_;
std::vector<EncodedImage> encoded_images_;
std::vector<std::unique_ptr<ScalableVideoController>> svc_controllers_;
diff --git a/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc b/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc
index 7df11da..5974266 100644
--- a/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc
+++ b/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc
@@ -100,8 +100,7 @@
encoder_config.simulcast_layers.resize(
codec_settings->numberOfSimulcastStreams);
VideoEncoder::EncoderInfo encoder_info;
- auto stream_factory =
- rtc::make_ref_counted<EncoderStreamFactory>(encoder_info);
+ auto stream_factory = make_ref_counted<EncoderStreamFactory>(encoder_info);
const std::vector<VideoStream> streams = stream_factory->CreateEncoderStreams(
trials, codec_settings->width, codec_settings->height, encoder_config);
@@ -431,13 +430,13 @@
void Start() {
if (config_.measure_cpu) {
cpu_time_ -= GetProcessCpuTimeNanos();
- wallclock_time_ -= rtc::SystemTimeNanos();
+ wallclock_time_ -= SystemTimeNanos();
}
}
void Stop() {
if (config_.measure_cpu) {
cpu_time_ += GetProcessCpuTimeNanos();
- wallclock_time_ += rtc::SystemTimeNanos();
+ wallclock_time_ += SystemTimeNanos();
}
}
void Print() const {
@@ -588,7 +587,7 @@
Unit unit,
absl::string_view non_standard_unit_suffix,
ImprovementDirection improvement_direction) {
- rtc::StringBuilder metric_name(measurement);
+ StringBuilder metric_name(measurement);
metric_name << modifier.str() << non_standard_unit_suffix;
GetGlobalMetricsLogger()->LogSingleValueMetric(
metric_name.str(), config_.test_name, value, unit,
diff --git a/modules/video_coding/codecs/test/videocodec_test_mediacodec.cc b/modules/video_coding/codecs/test/videocodec_test_mediacodec.cc
index 7ddd598..79ff245 100644
--- a/modules/video_coding/codecs/test/videocodec_test_mediacodec.cc
+++ b/modules/video_coding/codecs/test/videocodec_test_mediacodec.cc
@@ -98,7 +98,7 @@
TEST(VideoCodecTestMediaCodec, ForemanCif500kbpsVp8) {
auto config = CreateConfig();
- config.SetCodecSettings(cricket::kVp8CodecName, 1, 1, 1, false, false, false,
+ config.SetCodecSettings(webrtc::kVp8CodecName, 1, 1, 1, false, false, false,
352, 288);
auto fixture = CreateTestFixtureWithConfig(config);
@@ -120,7 +120,7 @@
const auto frame_checker =
std::make_unique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
config.encoded_frame_checker = frame_checker.get();
- config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, false, false,
+ config.SetCodecSettings(webrtc::kH264CodecName, 1, 1, 1, false, false, false,
352, 288);
auto fixture = CreateTestFixtureWithConfig(config);
@@ -146,7 +146,7 @@
config.h264_codec_settings.profile = H264Profile::kProfileConstrainedHigh;
config.encoded_frame_checker = frame_checker.get();
- config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, false, false,
+ config.SetCodecSettings(webrtc::kH264CodecName, 1, 1, 1, false, false, false,
352, 288);
auto fixture = CreateTestFixtureWithConfig(config);
@@ -166,8 +166,8 @@
TEST(VideoCodecTestMediaCodec, ForemanMixedRes100kbpsVp8H264) {
auto config = CreateConfig();
const int kNumFrames = 30;
- const std::vector<std::string> codecs = {cricket::kVp8CodecName,
- cricket::kH264CodecName};
+ const std::vector<std::string> codecs = {webrtc::kVp8CodecName,
+ webrtc::kH264CodecName};
const std::vector<std::tuple<int, int>> resolutions = {
{128, 96}, {176, 144}, {320, 240}, {480, 272}};
const std::vector<RateProfile> rate_profiles = {
@@ -201,7 +201,7 @@
const ::testing::TestParamInfo<
VideoCodecTestMediaCodecRateAdaptation::ParamType>& info) {
char buf[512];
- rtc::SimpleStringBuilder ss(buf);
+ webrtc::SimpleStringBuilder ss(buf);
ss << std::get<0>(info.param).name << "_" << std::get<1>(info.param);
return ss.str();
}
@@ -263,9 +263,9 @@
kBitRateHighLowHigh,
kFrameRateLowHighLow,
kFrameRateHighLowHigh),
- ::testing::Values(cricket::kVp8CodecName,
- cricket::kVp9CodecName,
- cricket::kH264CodecName)),
+ ::testing::Values(webrtc::kVp8CodecName,
+ webrtc::kVp9CodecName,
+ webrtc::kH264CodecName)),
VideoCodecTestMediaCodecRateAdaptation::ParamInfoToStr);
} // namespace test
diff --git a/modules/video_coding/codecs/test/videocodec_test_openh264.cc b/modules/video_coding/codecs/test/videocodec_test_openh264.cc
index 7fb1f83..07da31c 100644
--- a/modules/video_coding/codecs/test/videocodec_test_openh264.cc
+++ b/modules/video_coding/codecs/test/videocodec_test_openh264.cc
@@ -43,7 +43,7 @@
auto frame_checker =
std::make_unique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
auto config = CreateConfig();
- config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, true, false,
+ config.SetCodecSettings(webrtc::kH264CodecName, 1, 1, 1, false, true, false,
kCifWidth, kCifHeight);
config.encoded_frame_checker = frame_checker.get();
auto fixture = CreateVideoCodecTestFixture(config);
@@ -67,7 +67,7 @@
config.h264_codec_settings.packetization_mode =
H264PacketizationMode::SingleNalUnit;
config.max_payload_size_bytes = 500;
- config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, true, false,
+ config.SetCodecSettings(webrtc::kH264CodecName, 1, 1, 1, false, true, false,
kCifWidth, kCifHeight);
config.encoded_frame_checker = frame_checker.get();
auto fixture = CreateVideoCodecTestFixture(config);
diff --git a/modules/video_coding/codecs/test/videocodec_test_videotoolbox.cc b/modules/video_coding/codecs/test/videocodec_test_videotoolbox.cc
index 7b9d668..1c5d2c8 100644
--- a/modules/video_coding/codecs/test/videocodec_test_videotoolbox.cc
+++ b/modules/video_coding/codecs/test/videocodec_test_videotoolbox.cc
@@ -58,7 +58,7 @@
const auto frame_checker =
std::make_unique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
auto config = CreateConfig();
- config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, false, false,
+ config.SetCodecSettings(webrtc::kH264CodecName, 1, 1, 1, false, false, false,
352, 288);
config.encoded_frame_checker = frame_checker.get();
auto fixture = CreateTestFixtureWithConfig(config);
@@ -75,7 +75,7 @@
std::make_unique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
auto config = CreateConfig();
config.h264_codec_settings.profile = H264Profile::kProfileConstrainedHigh;
- config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, false, false,
+ config.SetCodecSettings(webrtc::kH264CodecName, 1, 1, 1, false, false, false,
352, 288);
config.encoded_frame_checker = frame_checker.get();
auto fixture = CreateTestFixtureWithConfig(config);
diff --git a/modules/video_coding/codecs/test/videoprocessor.cc b/modules/video_coding/codecs/test/videoprocessor.cc
index de107f1..5b5c2bb 100644
--- a/modules/video_coding/codecs/test/videoprocessor.cc
+++ b/modules/video_coding/codecs/test/videoprocessor.cc
@@ -111,7 +111,7 @@
RTC_CHECK_GE(ref_buffer.width(), dec_buffer.width());
RTC_CHECK_GE(ref_buffer.height(), dec_buffer.height());
// Downscale reference frame.
- rtc::scoped_refptr<I420Buffer> scaled_buffer =
+ scoped_refptr<I420Buffer> scaled_buffer =
I420Buffer::Create(dec_buffer.width(), dec_buffer.height());
I420Scale(ref_buffer.DataY(), ref_buffer.StrideY(), ref_buffer.DataU(),
ref_buffer.StrideU(), ref_buffer.DataV(), ref_buffer.StrideV(),
@@ -263,9 +263,8 @@
FrameReader::Ratio framerate_scale = FrameReader::Ratio(
{.num = config_.clip_fps.value_or(config_.codec_settings.maxFramerate),
.den = static_cast<int>(config_.codec_settings.maxFramerate)});
- rtc::scoped_refptr<I420BufferInterface> buffer =
- input_frame_reader_->PullFrame(
- /*frame_num*/ nullptr, resolution, framerate_scale);
+ scoped_refptr<I420BufferInterface> buffer = input_frame_reader_->PullFrame(
+ /*frame_num*/ nullptr, resolution, framerate_scale);
RTC_CHECK(buffer) << "Tried to read too many frames from the file.";
const size_t timestamp =
@@ -287,7 +286,7 @@
if (config_.reference_width != -1 && config_.reference_height != -1 &&
(input_frame.width() != config_.reference_width ||
input_frame.height() != config_.reference_height)) {
- rtc::scoped_refptr<I420Buffer> scaled_buffer = I420Buffer::Create(
+ scoped_refptr<I420Buffer> scaled_buffer = I420Buffer::Create(
config_.codec_settings.width, config_.codec_settings.height);
scaled_buffer->ScaleFrom(*input_frame.video_frame_buffer()->ToI420());
@@ -325,7 +324,7 @@
if (input_frame.width() != config_.codec_settings.width ||
input_frame.height() != config_.codec_settings.height) {
- rtc::scoped_refptr<I420Buffer> scaled_buffer = I420Buffer::Create(
+ scoped_refptr<I420Buffer> scaled_buffer = I420Buffer::Create(
config_.codec_settings.width, config_.codec_settings.height);
scaled_buffer->ScaleFrom(*input_frame.video_frame_buffer()->ToI420());
input_frame.set_video_frame_buffer(scaled_buffer);
@@ -538,7 +537,7 @@
int input_video_width = config_.codec_settings.width;
int input_video_height = config_.codec_settings.height;
- rtc::scoped_refptr<I420Buffer> scaled_buffer;
+ scoped_refptr<I420Buffer> scaled_buffer;
const I420BufferInterface* scaled_frame;
if (decoded_frame.width() == input_video_width &&
@@ -616,7 +615,7 @@
// Skip quality metrics calculation to not affect CPU usage.
if (analyze_frame_quality_ || decoded_frame_writers_) {
// Save last decoded frame to handle possible future drops.
- rtc::scoped_refptr<I420BufferInterface> i420buffer =
+ scoped_refptr<I420BufferInterface> i420buffer =
decoded_frame.video_frame_buffer()->ToI420();
// Copy decoded frame to a buffer without padding/stride such that we can
diff --git a/modules/video_coding/codecs/test/videoprocessor.h b/modules/video_coding/codecs/test/videoprocessor.h
index 9d59119..200d5bb 100644
--- a/modules/video_coding/codecs/test/videoprocessor.h
+++ b/modules/video_coding/codecs/test/videoprocessor.h
@@ -245,7 +245,7 @@
// simulcast_svc_idx -> frame_number.
std::vector<size_t> last_decoded_frame_num_ RTC_GUARDED_BY(sequence_checker_);
// simulcast_svc_idx -> buffer.
- std::vector<rtc::scoped_refptr<I420Buffer>> last_decoded_frame_buffer_
+ std::vector<scoped_refptr<I420Buffer>> last_decoded_frame_buffer_
RTC_GUARDED_BY(sequence_checker_);
// Time spent in frame encode callback. It is accumulated for layers and
diff --git a/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc b/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc
index fab6c53..3eef8e5 100644
--- a/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc
+++ b/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc
@@ -282,9 +282,9 @@
last_frame_width_ = img->d_w;
last_frame_height_ = img->d_h;
// Allocate memory for decoded image.
- rtc::scoped_refptr<VideoFrameBuffer> buffer;
+ scoped_refptr<VideoFrameBuffer> buffer;
- rtc::scoped_refptr<I420Buffer> i420_buffer =
+ scoped_refptr<I420Buffer> i420_buffer =
buffer_pool_.CreateI420Buffer(img->d_w, img->d_h);
buffer = i420_buffer;
if (i420_buffer.get()) {
diff --git a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
index 2597184..1ec4ab9 100644
--- a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
+++ b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
@@ -1096,7 +1096,7 @@
// Because `raw_images_` are set to hold pointers to the prepared buffers, we
// need to keep these buffers alive through reference counting until after
// encoding is complete.
- std::vector<rtc::scoped_refptr<VideoFrameBuffer>> prepared_buffers =
+ std::vector<scoped_refptr<VideoFrameBuffer>> prepared_buffers =
PrepareBuffers(frame.video_frame_buffer());
if (prepared_buffers.empty()) {
return WEBRTC_VIDEO_CODEC_ERROR;
@@ -1104,7 +1104,7 @@
struct CleanUpOnExit {
explicit CleanUpOnExit(
vpx_image_t* raw_image,
- std::vector<rtc::scoped_refptr<VideoFrameBuffer>> prepared_buffers)
+ std::vector<scoped_refptr<VideoFrameBuffer>> prepared_buffers)
: raw_image_(raw_image),
prepared_buffers_(std::move(prepared_buffers)) {}
~CleanUpOnExit() {
@@ -1113,7 +1113,7 @@
raw_image_->planes[VPX_PLANE_V] = nullptr;
}
vpx_image_t* raw_image_;
- std::vector<rtc::scoped_refptr<VideoFrameBuffer>> prepared_buffers_;
+ std::vector<scoped_refptr<VideoFrameBuffer>> prepared_buffers_;
} clean_up_on_exit(&raw_images_[0], std::move(prepared_buffers));
if (send_key_frame) {
@@ -1442,15 +1442,15 @@
}
}
-std::vector<rtc::scoped_refptr<VideoFrameBuffer>>
-LibvpxVp8Encoder::PrepareBuffers(rtc::scoped_refptr<VideoFrameBuffer> buffer) {
+std::vector<scoped_refptr<VideoFrameBuffer>> LibvpxVp8Encoder::PrepareBuffers(
+ scoped_refptr<VideoFrameBuffer> buffer) {
RTC_DCHECK_EQ(buffer->width(), raw_images_[0].d_w);
RTC_DCHECK_EQ(buffer->height(), raw_images_[0].d_h);
absl::InlinedVector<VideoFrameBuffer::Type, kMaxPreferredPixelFormats>
supported_formats = {VideoFrameBuffer::Type::kI420,
VideoFrameBuffer::Type::kNV12};
- rtc::scoped_refptr<VideoFrameBuffer> mapped_buffer;
+ scoped_refptr<VideoFrameBuffer> mapped_buffer;
if (buffer->type() != VideoFrameBuffer::Type::kNative) {
// `buffer` is already mapped.
mapped_buffer = buffer;
@@ -1495,7 +1495,7 @@
// Prepare `raw_images_` from `mapped_buffer` and, if simulcast, scaled
// versions of `buffer`.
- std::vector<rtc::scoped_refptr<VideoFrameBuffer>> prepared_buffers;
+ std::vector<scoped_refptr<VideoFrameBuffer>> prepared_buffers;
SetRawImagePlanes(&raw_images_[0], mapped_buffer.get());
prepared_buffers.push_back(mapped_buffer);
for (size_t i = 1; i < encoders_.size(); ++i) {
diff --git a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h
index 853ea62..02f36e8 100644
--- a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h
+++ b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h
@@ -111,8 +111,8 @@
// as a result, allowing the caller to keep references to them until after
// encoding has finished. On failure to convert the buffer, an empty list is
// returned.
- std::vector<rtc::scoped_refptr<VideoFrameBuffer>> PrepareBuffers(
- rtc::scoped_refptr<VideoFrameBuffer> buffer);
+ std::vector<scoped_refptr<VideoFrameBuffer>> PrepareBuffers(
+ scoped_refptr<VideoFrameBuffer> buffer);
const Environment env_;
const std::unique_ptr<LibvpxInterface> libvpx_;
diff --git a/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc b/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
index 487f50b..c0aca08 100644
--- a/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
+++ b/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
@@ -1064,9 +1064,9 @@
EncodeAndWaitForFrame(input_frame, &encoded_frame, &codec_specific_info);
// After encoding, we expect one mapping per simulcast layer.
- rtc::scoped_refptr<test::MappableNativeBuffer> mappable_buffer =
+ scoped_refptr<test::MappableNativeBuffer> mappable_buffer =
test::GetMappableNativeBufferFromVideoFrame(input_frame);
- std::vector<rtc::scoped_refptr<VideoFrameBuffer>> mapped_buffers =
+ std::vector<scoped_refptr<VideoFrameBuffer>> mapped_buffers =
mappable_buffer->GetMappedFramedBuffers();
ASSERT_EQ(mapped_buffers.size(), 3u);
EXPECT_EQ(mapped_buffers[0]->type(), mappable_type_);
diff --git a/modules/video_coding/codecs/vp9/libvpx_vp9_decoder.cc b/modules/video_coding/codecs/vp9/libvpx_vp9_decoder.cc
index b3f4ce1..4d4e147 100644
--- a/modules/video_coding/codecs/vp9/libvpx_vp9_decoder.cc
+++ b/modules/video_coding/codecs/vp9/libvpx_vp9_decoder.cc
@@ -210,7 +210,7 @@
if (input_image._frameType == VideoFrameType::kVideoFrameKey) {
std::optional<Vp9UncompressedHeader> frame_info =
ParseUncompressedVp9Header(
- rtc::MakeArrayView(input_image.data(), input_image.size()));
+ MakeArrayView(input_image.data(), input_image.size()));
if (frame_info) {
RenderResolution frame_resolution(frame_info->frame_width,
frame_info->frame_height);
@@ -278,12 +278,12 @@
// This buffer contains all of `img`'s image data, a reference counted
// Vp9FrameBuffer. (libvpx is done with the buffers after a few
// vpx_codec_decode calls or vpx_codec_destroy).
- rtc::scoped_refptr<Vp9FrameBufferPool::Vp9FrameBuffer> img_buffer(
+ scoped_refptr<Vp9FrameBufferPool::Vp9FrameBuffer> img_buffer(
static_cast<Vp9FrameBufferPool::Vp9FrameBuffer*>(img->fb_priv));
// The buffer can be used directly by the VideoFrame (without copy) by
// using a Wrapped*Buffer.
- rtc::scoped_refptr<VideoFrameBuffer> img_wrapped_buffer;
+ scoped_refptr<VideoFrameBuffer> img_wrapped_buffer;
switch (img->fmt) {
case VPX_IMG_FMT_I420:
img_wrapped_buffer = WrapI420Buffer(
diff --git a/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc b/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc
index 689d1d7..b34baf6 100644
--- a/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc
+++ b/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc
@@ -202,7 +202,7 @@
}
vpx_svc_ref_frame_config_t Vp9References(
- rtc::ArrayView<const ScalableVideoController::LayerFrameConfig> layers) {
+ ArrayView<const ScalableVideoController::LayerFrameConfig> layers) {
vpx_svc_ref_frame_config_t ref_config = {};
for (const ScalableVideoController::LayerFrameConfig& layer_frame : layers) {
const auto& buffers = layer_frame.Buffers();
@@ -1174,9 +1174,9 @@
// In case we need to map the buffer, `mapped_buffer` is used to keep it alive
// through reference counting until after encoding has finished.
- rtc::scoped_refptr<const VideoFrameBuffer> mapped_buffer;
+ scoped_refptr<const VideoFrameBuffer> mapped_buffer;
const I010BufferInterface* i010_buffer;
- rtc::scoped_refptr<const I010BufferInterface> i010_copy;
+ scoped_refptr<const I010BufferInterface> i010_copy;
switch (profile_) {
case VP9Profile::kProfile0: {
mapped_buffer = PrepareBufferForProfile0(scaled_image);
@@ -2070,13 +2070,13 @@
raw_->bit_depth = (fmt == VPX_IMG_FMT_I42016) ? 16 : 8;
}
-rtc::scoped_refptr<VideoFrameBuffer> LibvpxVp9Encoder::PrepareBufferForProfile0(
- rtc::scoped_refptr<VideoFrameBuffer> buffer) {
+scoped_refptr<VideoFrameBuffer> LibvpxVp9Encoder::PrepareBufferForProfile0(
+ scoped_refptr<VideoFrameBuffer> buffer) {
absl::InlinedVector<VideoFrameBuffer::Type, kMaxPreferredPixelFormats>
supported_formats = {VideoFrameBuffer::Type::kI420,
VideoFrameBuffer::Type::kNV12};
- rtc::scoped_refptr<VideoFrameBuffer> mapped_buffer;
+ scoped_refptr<VideoFrameBuffer> mapped_buffer;
if (buffer->type() != VideoFrameBuffer::Type::kNative) {
// `buffer` is already mapped.
mapped_buffer = buffer;
diff --git a/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h b/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h
index 2741fe8..aa54caa 100644
--- a/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h
+++ b/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h
@@ -134,8 +134,8 @@
// versions of `buffer`. Returns the buffer that got referenced as a result,
// allowing the caller to keep a reference to it until after encoding has
// finished. On failure to convert the buffer, null is returned.
- rtc::scoped_refptr<VideoFrameBuffer> PrepareBufferForProfile0(
- rtc::scoped_refptr<VideoFrameBuffer> buffer);
+ scoped_refptr<VideoFrameBuffer> PrepareBufferForProfile0(
+ scoped_refptr<VideoFrameBuffer> buffer);
const Environment env_;
const std::unique_ptr<LibvpxInterface> libvpx_;
diff --git a/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc b/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc
index 3aef85e..3152ccb 100644
--- a/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc
+++ b/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc
@@ -209,9 +209,9 @@
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
// After encoding, we would expect a single mapping to have happened.
- rtc::scoped_refptr<test::MappableNativeBuffer> mappable_buffer =
+ scoped_refptr<test::MappableNativeBuffer> mappable_buffer =
test::GetMappableNativeBufferFromVideoFrame(input_frame);
- std::vector<rtc::scoped_refptr<VideoFrameBuffer>> mapped_buffers =
+ std::vector<scoped_refptr<VideoFrameBuffer>> mapped_buffers =
mappable_buffer->GetMappedFramedBuffers();
ASSERT_EQ(mapped_buffers.size(), 1u);
EXPECT_EQ(mapped_buffers[0]->type(), mappable_buffer->mappable_type());
@@ -2056,7 +2056,7 @@
if (picture_idx == 0) {
EXPECT_EQ(vp9.num_ref_pics, 0) << "Frame " << i;
} else {
- EXPECT_THAT(rtc::MakeArrayView(vp9.p_diff, vp9.num_ref_pics),
+ EXPECT_THAT(MakeArrayView(vp9.p_diff, vp9.num_ref_pics),
UnorderedElementsAreArray(gof.pid_diff[gof_idx],
gof.num_ref_pics[gof_idx]))
<< "Frame " << i;
diff --git a/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc b/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc
index cb9726b..d8d91bc 100644
--- a/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc
+++ b/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc
@@ -57,10 +57,10 @@
return true;
}
-rtc::scoped_refptr<Vp9FrameBufferPool::Vp9FrameBuffer>
+scoped_refptr<Vp9FrameBufferPool::Vp9FrameBuffer>
Vp9FrameBufferPool::GetFrameBuffer(size_t min_size) {
RTC_DCHECK_GT(min_size, 0);
- rtc::scoped_refptr<Vp9FrameBuffer> available_buffer = nullptr;
+ scoped_refptr<Vp9FrameBuffer> available_buffer = nullptr;
{
MutexLock lock(&buffers_lock_);
// Do we have a buffer we can recycle?
@@ -154,7 +154,7 @@
Vp9FrameBufferPool* pool = static_cast<Vp9FrameBufferPool*>(user_priv);
- rtc::scoped_refptr<Vp9FrameBuffer> buffer = pool->GetFrameBuffer(min_size);
+ scoped_refptr<Vp9FrameBuffer> buffer = pool->GetFrameBuffer(min_size);
fb->data = buffer->GetData();
fb->size = buffer->GetDataSize();
// Store Vp9FrameBuffer* in `priv` for use in VpxReleaseFrameBuffer.
diff --git a/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h b/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h
index d81bbe2..592c13d 100644
--- a/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h
+++ b/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h
@@ -68,18 +68,17 @@
// vpx_codec_destroy(decoder_ctx);
class Vp9FrameBufferPool {
public:
- class Vp9FrameBuffer final
- : public rtc::RefCountedNonVirtual<Vp9FrameBuffer> {
+ class Vp9FrameBuffer final : public RefCountedNonVirtual<Vp9FrameBuffer> {
public:
uint8_t* GetData();
size_t GetDataSize() const;
void SetSize(size_t size);
- using rtc::RefCountedNonVirtual<Vp9FrameBuffer>::HasOneRef;
+ using RefCountedNonVirtual<Vp9FrameBuffer>::HasOneRef;
private:
// Data as an easily resizable buffer.
- rtc::Buffer data_;
+ Buffer data_;
};
// Configures libvpx to, in the specified context, use this memory pool for
@@ -89,7 +88,7 @@
// Gets a frame buffer of at least `min_size`, recycling an available one or
// creating a new one. When no longer referenced from the outside the buffer
// becomes recyclable.
- rtc::scoped_refptr<Vp9FrameBuffer> GetFrameBuffer(size_t min_size);
+ scoped_refptr<Vp9FrameBuffer> GetFrameBuffer(size_t min_size);
// Gets the number of buffers currently in use (not ready to be recycled).
int GetNumBuffersInUse() const;
// Changes the max amount of buffers in the pool to the new value.
@@ -125,7 +124,7 @@
// Protects `allocated_buffers_`.
mutable Mutex buffers_lock_;
// All buffers, in use or ready to be recycled.
- std::vector<rtc::scoped_refptr<Vp9FrameBuffer>> allocated_buffers_
+ std::vector<scoped_refptr<Vp9FrameBuffer>> allocated_buffers_
RTC_GUARDED_BY(buffers_lock_);
size_t max_num_buffers_ = kDefaultMaxNumBuffers;
};
diff --git a/modules/video_coding/deprecated/frame_buffer.h b/modules/video_coding/deprecated/frame_buffer.h
index 30942bd..9aae5db 100644
--- a/modules/video_coding/deprecated/frame_buffer.h
+++ b/modules/video_coding/deprecated/frame_buffer.h
@@ -80,7 +80,7 @@
VCMFrameBufferStateEnum _state; // Current state of the frame
// Set with SetEncodedData, but keep pointer to the concrete class here, to
// enable reallocation and mutation.
- rtc::scoped_refptr<EncodedImageBuffer> encoded_image_buffer_;
+ scoped_refptr<EncodedImageBuffer> encoded_image_buffer_;
VCMSessionInfo _sessionInfo;
uint16_t _nackCount;
int64_t _latestPacketTimeMs;
diff --git a/modules/video_coding/frame_dependencies_calculator.cc b/modules/video_coding/frame_dependencies_calculator.cc
index 4a7f07a..64a4476 100644
--- a/modules/video_coding/frame_dependencies_calculator.cc
+++ b/modules/video_coding/frame_dependencies_calculator.cc
@@ -27,7 +27,7 @@
absl::InlinedVector<int64_t, 5> FrameDependenciesCalculator::FromBuffersUsage(
int64_t frame_id,
- rtc::ArrayView<const CodecBufferUsage> buffers_usage) {
+ ArrayView<const CodecBufferUsage> buffers_usage) {
absl::InlinedVector<int64_t, 5> dependencies;
RTC_DCHECK_GT(buffers_usage.size(), 0);
for (const CodecBufferUsage& buffer_usage : buffers_usage) {
diff --git a/modules/video_coding/frame_dependencies_calculator.h b/modules/video_coding/frame_dependencies_calculator.h
index 12e9b09..3a354c5 100644
--- a/modules/video_coding/frame_dependencies_calculator.h
+++ b/modules/video_coding/frame_dependencies_calculator.h
@@ -32,7 +32,7 @@
// Calculates frame dependencies based on previous encoder buffer usage.
absl::InlinedVector<int64_t, 5> FromBuffersUsage(
int64_t frame_id,
- rtc::ArrayView<const CodecBufferUsage> buffers_usage);
+ ArrayView<const CodecBufferUsage> buffers_usage);
private:
struct BufferUsage {
diff --git a/modules/video_coding/generic_decoder_unittest.cc b/modules/video_coding/generic_decoder_unittest.cc
index 5fa69f1..52fc057 100644
--- a/modules/video_coding/generic_decoder_unittest.cc
+++ b/modules/video_coding/generic_decoder_unittest.cc
@@ -70,7 +70,7 @@
return ret;
}
- rtc::ArrayView<const VideoFrame> GetAllFrames() const { return frames_; }
+ ArrayView<const VideoFrame> GetAllFrames() const { return frames_; }
void OnDroppedFrames(uint32_t frames_dropped) {
frames_dropped_ += frames_dropped;
diff --git a/modules/video_coding/h264_sps_pps_tracker.cc b/modules/video_coding/h264_sps_pps_tracker.cc
index d0f7093..c00ec68 100644
--- a/modules/video_coding/h264_sps_pps_tracker.cc
+++ b/modules/video_coding/h264_sps_pps_tracker.cc
@@ -35,7 +35,7 @@
} // namespace
H264SpsPpsTracker::FixedBitstream H264SpsPpsTracker::CopyAndFixBitstream(
- rtc::ArrayView<const uint8_t> bitstream,
+ ArrayView<const uint8_t> bitstream,
RTPVideoHeader* video_header) {
RTC_DCHECK(video_header);
RTC_DCHECK(video_header->codec == kVideoCodecH264);
@@ -206,9 +206,9 @@
return;
}
std::optional<SpsParser::SpsState> parsed_sps = SpsParser::ParseSps(
- rtc::ArrayView<const uint8_t>(sps).subview(kNaluHeaderOffset));
+ ArrayView<const uint8_t>(sps).subview(kNaluHeaderOffset));
std::optional<PpsParser::PpsState> parsed_pps = PpsParser::ParsePps(
- rtc::ArrayView<const uint8_t>(pps).subview(kNaluHeaderOffset));
+ ArrayView<const uint8_t>(pps).subview(kNaluHeaderOffset));
if (!parsed_sps) {
RTC_LOG(LS_WARNING) << "Failed to parse SPS.";
diff --git a/modules/video_coding/h264_sps_pps_tracker.h b/modules/video_coding/h264_sps_pps_tracker.h
index 74166d2..132aaa0 100644
--- a/modules/video_coding/h264_sps_pps_tracker.h
+++ b/modules/video_coding/h264_sps_pps_tracker.h
@@ -29,7 +29,7 @@
enum PacketAction { kInsert, kDrop, kRequestKeyframe };
struct FixedBitstream {
PacketAction action;
- rtc::CopyOnWriteBuffer bitstream;
+ CopyOnWriteBuffer bitstream;
};
H264SpsPpsTracker() = default;
@@ -38,7 +38,7 @@
~H264SpsPpsTracker() = default;
// Returns fixed bitstream and modifies `video_header`.
- FixedBitstream CopyAndFixBitstream(rtc::ArrayView<const uint8_t> bitstream,
+ FixedBitstream CopyAndFixBitstream(ArrayView<const uint8_t> bitstream,
RTPVideoHeader* video_header);
void InsertSpsPpsNalus(const std::vector<uint8_t>& sps,
@@ -47,13 +47,13 @@
private:
struct PpsInfo {
int sps_id = -1;
- rtc::Buffer data;
+ Buffer data;
};
struct SpsInfo {
int width = -1;
int height = -1;
- rtc::Buffer data;
+ Buffer data;
};
std::map<int, PpsInfo> pps_data_;
diff --git a/modules/video_coding/h264_sps_pps_tracker_unittest.cc b/modules/video_coding/h264_sps_pps_tracker_unittest.cc
index d6cc450..079049d 100644
--- a/modules/video_coding/h264_sps_pps_tracker_unittest.cc
+++ b/modules/video_coding/h264_sps_pps_tracker_unittest.cc
@@ -30,7 +30,7 @@
const uint8_t start_code[] = {0, 0, 0, 1};
-rtc::ArrayView<const uint8_t> Bitstream(
+ArrayView<const uint8_t> Bitstream(
const H264SpsPpsTracker::FixedBitstream& fixed) {
return fixed.bitstream;
}
diff --git a/modules/video_coding/h26x_packet_buffer.cc b/modules/video_coding/h26x_packet_buffer.cc
index f1ecafc..dc141e5 100644
--- a/modules/video_coding/h26x_packet_buffer.cc
+++ b/modules/video_coding/h26x_packet_buffer.cc
@@ -77,7 +77,7 @@
});
}
-int64_t* GetContinuousSequence(rtc::ArrayView<int64_t> last_continuous,
+int64_t* GetContinuousSequence(ArrayView<int64_t> last_continuous,
int64_t unwrapped_seq_num) {
for (int64_t& last : last_continuous) {
if (unwrapped_seq_num - 1 == last) {
@@ -361,9 +361,9 @@
return;
}
std::optional<SpsParser::SpsState> parsed_sps = SpsParser::ParseSps(
- rtc::ArrayView<const uint8_t>(sps).subview(kNaluHeaderOffset));
+ ArrayView<const uint8_t>(sps).subview(kNaluHeaderOffset));
std::optional<PpsParser::PpsState> parsed_pps = PpsParser::ParsePps(
- rtc::ArrayView<const uint8_t>(pps).subview(kNaluHeaderOffset));
+ ArrayView<const uint8_t>(pps).subview(kNaluHeaderOffset));
if (!parsed_sps) {
RTC_LOG(LS_WARNING) << "Failed to parse SPS.";
@@ -408,7 +408,7 @@
RTPVideoHeaderH264& h264_header =
std::get<RTPVideoHeaderH264>(video_header.video_type_header);
- rtc::CopyOnWriteBuffer result;
+ CopyOnWriteBuffer result;
if (h264_idr_only_keyframes_allowed_) {
// Check if sps and pps insertion is needed.
diff --git a/modules/video_coding/h26x_packet_buffer_unittest.cc b/modules/video_coding/h26x_packet_buffer_unittest.cc
index b071190..c07de1f 100644
--- a/modules/video_coding/h26x_packet_buffer_unittest.cc
+++ b/modules/video_coding/h26x_packet_buffer_unittest.cc
@@ -89,9 +89,9 @@
std::unique_ptr<H26xPacketBuffer::Packet> Build();
private:
- rtc::CopyOnWriteBuffer BuildFuaPayload() const;
- rtc::CopyOnWriteBuffer BuildSingleNaluPayload() const;
- rtc::CopyOnWriteBuffer BuildStapAPayload() const;
+ CopyOnWriteBuffer BuildFuaPayload() const;
+ CopyOnWriteBuffer BuildSingleNaluPayload() const;
+ CopyOnWriteBuffer BuildStapAPayload() const;
RTPVideoHeaderH264& H264Header() {
return std::get<RTPVideoHeaderH264>(video_header_.video_type_header);
@@ -230,20 +230,20 @@
return res;
}
-rtc::CopyOnWriteBuffer H264Packet::BuildFuaPayload() const {
- return rtc::CopyOnWriteBuffer(nalu_payloads_[0]);
+CopyOnWriteBuffer H264Packet::BuildFuaPayload() const {
+ return CopyOnWriteBuffer(nalu_payloads_[0]);
}
-rtc::CopyOnWriteBuffer H264Packet::BuildSingleNaluPayload() const {
- rtc::CopyOnWriteBuffer res;
+CopyOnWriteBuffer H264Packet::BuildSingleNaluPayload() const {
+ CopyOnWriteBuffer res;
auto& h264_header = H264Header();
res.AppendData(&h264_header.nalus[0].type, 1);
res.AppendData(nalu_payloads_[0]);
return res;
}
-rtc::CopyOnWriteBuffer H264Packet::BuildStapAPayload() const {
- rtc::CopyOnWriteBuffer res;
+CopyOnWriteBuffer H264Packet::BuildStapAPayload() const {
+ CopyOnWriteBuffer res;
const uint8_t indicator = H264::NaluType::kStapA;
res.AppendData(&indicator, 1);
@@ -348,7 +348,7 @@
res->timestamp = rtp_timestamp_;
res->sequence_number = rtp_seq_num_;
res->video_header.codec = kVideoCodecH265;
- res->video_payload = rtc::CopyOnWriteBuffer();
+ res->video_payload = CopyOnWriteBuffer();
res->video_header.is_first_packet_in_frame = first_packet_;
for (const auto& payload : nalu_payloads_) {
res->video_payload.AppendData(payload);
@@ -378,7 +378,7 @@
}
#endif
-rtc::ArrayView<const uint8_t> PacketPayload(
+ArrayView<const uint8_t> PacketPayload(
const std::unique_ptr<H26xPacketBuffer::Packet>& packet) {
return packet->video_payload;
}
diff --git a/modules/video_coding/loss_notification_controller.cc b/modules/video_coding/loss_notification_controller.cc
index 73ee2d8..43d4b19 100644
--- a/modules/video_coding/loss_notification_controller.cc
+++ b/modules/video_coding/loss_notification_controller.cc
@@ -114,7 +114,7 @@
uint16_t first_seq_num,
int64_t frame_id,
bool discardable,
- rtc::ArrayView<const int64_t> frame_dependencies) {
+ ArrayView<const int64_t> frame_dependencies) {
RTC_DCHECK_RUN_ON(&sequence_checker_);
DiscardOldInformation(); // Prevent memory overconsumption.
@@ -140,7 +140,7 @@
}
bool LossNotificationController::AllDependenciesDecodable(
- rtc::ArrayView<const int64_t> frame_dependencies) const {
+ ArrayView<const int64_t> frame_dependencies) const {
RTC_DCHECK_RUN_ON(&sequence_checker_);
// Due to packet reordering, frame buffering and asynchronous decoders, it is
diff --git a/modules/video_coding/loss_notification_controller.h b/modules/video_coding/loss_notification_controller.h
index 402e0a5..f6b9992 100644
--- a/modules/video_coding/loss_notification_controller.h
+++ b/modules/video_coding/loss_notification_controller.h
@@ -29,7 +29,7 @@
struct FrameDetails {
bool is_keyframe;
int64_t frame_id;
- rtc::ArrayView<const int64_t> frame_dependencies;
+ ArrayView<const int64_t> frame_dependencies;
};
LossNotificationController(KeyFrameRequestSender* key_frame_request_sender,
@@ -45,13 +45,13 @@
void OnAssembledFrame(uint16_t first_seq_num,
int64_t frame_id,
bool discardable,
- rtc::ArrayView<const int64_t> frame_dependencies);
+ ArrayView<const int64_t> frame_dependencies);
private:
void DiscardOldInformation();
bool AllDependenciesDecodable(
- rtc::ArrayView<const int64_t> frame_dependencies) const;
+ ArrayView<const int64_t> frame_dependencies) const;
// When the loss of a packet or the non-decodability of a frame is detected,
// produces a key frame request or a loss notification.
diff --git a/modules/video_coding/packet_buffer.h b/modules/video_coding/packet_buffer.h
index ecf7343..f17b00e 100644
--- a/modules/video_coding/packet_buffer.h
+++ b/modules/video_coding/packet_buffer.h
@@ -62,7 +62,7 @@
uint32_t timestamp = 0;
int times_nacked = -1;
- rtc::CopyOnWriteBuffer video_payload;
+ CopyOnWriteBuffer video_payload;
RTPVideoHeader video_header;
};
struct InsertResult {
diff --git a/modules/video_coding/packet_buffer_unittest.cc b/modules/video_coding/packet_buffer_unittest.cc
index d7e2683..17a0d75 100644
--- a/modules/video_coding/packet_buffer_unittest.cc
+++ b/modules/video_coding/packet_buffer_unittest.cc
@@ -48,7 +48,7 @@
// Validates frame boundaries are valid and returns first sequence_number for
// each frame.
std::vector<uint16_t> StartSeqNums(
- rtc::ArrayView<const std::unique_ptr<PacketBuffer::Packet>> packets) {
+ ArrayView<const std::unique_ptr<PacketBuffer::Packet>> packets) {
std::vector<uint16_t> result;
bool frame_boundary = true;
for (const auto& packet : packets) {
@@ -117,7 +117,7 @@
IsKeyFrame keyframe, // is keyframe
IsFirst first, // is first packet of frame
IsLast last, // is last packet of frame
- rtc::ArrayView<const uint8_t> data = {},
+ ArrayView<const uint8_t> data = {},
uint32_t timestamp = 123u) { // rtp timestamp
auto packet = std::make_unique<PacketBuffer::Packet>();
packet->video_header.codec = kVideoCodecGeneric;
@@ -421,7 +421,7 @@
IsFirst first, // is first packet of frame
IsLast last, // is last packet of frame
uint32_t timestamp, // rtp timestamp
- rtc::ArrayView<const uint8_t> data = {},
+ ArrayView<const uint8_t> data = {},
uint32_t width = 0, // width of frame (SPS/IDR)
uint32_t height = 0, // height of frame (SPS/IDR)
bool generic = false) { // has generic descriptor
@@ -459,7 +459,7 @@
IsFirst first, // is first packet of frame
IsLast last, // is last packet of frame
uint32_t timestamp, // rtp timestamp
- rtc::ArrayView<const uint8_t> data = {},
+ ArrayView<const uint8_t> data = {},
uint32_t width = 0, // width of frame (SPS/IDR)
uint32_t height = 0) { // height of frame (SPS/IDR)
auto packet = std::make_unique<PacketBuffer::Packet>();
@@ -533,7 +533,7 @@
TEST_P(PacketBufferH264ParameterizedTest, GetBitstreamBufferPadding) {
int64_t seq_num = Rand();
- rtc::CopyOnWriteBuffer data = "some plain old data";
+ CopyOnWriteBuffer data = "some plain old data";
auto packet = std::make_unique<PacketBuffer::Packet>();
auto& h264_header =
diff --git a/modules/video_coding/rtp_vp8_ref_finder_unittest.cc b/modules/video_coding/rtp_vp8_ref_finder_unittest.cc
index f239609..0ed70b2 100644
--- a/modules/video_coding/rtp_vp8_ref_finder_unittest.cc
+++ b/modules/video_coding/rtp_vp8_ref_finder_unittest.cc
@@ -44,7 +44,7 @@
MATCHER_P2(HasIdAndRefs, id, refs, "") {
return Matches(Eq(id))(arg->Id()) &&
Matches(UnorderedElementsAreArray(refs))(
- rtc::ArrayView<int64_t>(arg->references, arg->num_references));
+ ArrayView<int64_t>(arg->references, arg->num_references));
}
Matcher<const std::vector<std::unique_ptr<EncodedFrame>>&>
diff --git a/modules/video_coding/rtp_vp9_ref_finder_unittest.cc b/modules/video_coding/rtp_vp9_ref_finder_unittest.cc
index fcdb09c..822f114 100644
--- a/modules/video_coding/rtp_vp9_ref_finder_unittest.cc
+++ b/modules/video_coding/rtp_vp9_ref_finder_unittest.cc
@@ -191,8 +191,7 @@
return false;
}
- rtc::ArrayView<int64_t> actual_refs((*it)->references,
- (*it)->num_references);
+ ArrayView<int64_t> actual_refs((*it)->references, (*it)->num_references);
if (!Matches(UnorderedElementsAreArray(expected_refs_))(actual_refs)) {
if (result_listener->IsInterested()) {
*result_listener << "Frame with frame_id:" << frame_id_ << " and "
diff --git a/modules/video_coding/svc/scalability_structure_key_svc_unittest.cc b/modules/video_coding/svc/scalability_structure_key_svc_unittest.cc
index 1c4445f..175b8fc 100644
--- a/modules/video_coding/svc/scalability_structure_key_svc_unittest.cc
+++ b/modules/video_coding/svc/scalability_structure_key_svc_unittest.cc
@@ -234,7 +234,7 @@
EXPECT_EQ(frames[13].temporal_id, 0);
EXPECT_EQ(frames[14].temporal_id, 0);
EXPECT_EQ(frames[15].temporal_id, 0);
- auto all_frames = rtc::MakeArrayView(frames.data(), frames.size());
+ auto all_frames = MakeArrayView(frames.data(), frames.size());
EXPECT_TRUE(wrapper.FrameReferencesAreValid(all_frames.subview(0, 13)));
// Frames starting from the frame#13 should not reference any earlier frames.
EXPECT_TRUE(wrapper.FrameReferencesAreValid(all_frames.subview(13)));
diff --git a/modules/video_coding/svc/scalability_structure_l2t2_key_shift_unittest.cc b/modules/video_coding/svc/scalability_structure_l2t2_key_shift_unittest.cc
index 42b8ff5..1532940 100644
--- a/modules/video_coding/svc/scalability_structure_l2t2_key_shift_unittest.cc
+++ b/modules/video_coding/svc/scalability_structure_l2t2_key_shift_unittest.cc
@@ -236,8 +236,8 @@
EXPECT_THAT(frames[4].temporal_id, 1);
// Expect frame[5] to be a key frame.
- EXPECT_TRUE(wrapper.FrameReferencesAreValid(
- rtc::MakeArrayView(frames.data() + 5, 4)));
+ EXPECT_TRUE(
+ wrapper.FrameReferencesAreValid(MakeArrayView(frames.data() + 5, 4)));
EXPECT_THAT(frames[5].spatial_id, 0);
EXPECT_THAT(frames[6].spatial_id, 1);
diff --git a/modules/video_coding/svc/scalability_structure_test_helpers.cc b/modules/video_coding/svc/scalability_structure_test_helpers.cc
index 08d470d..e5cb1ef 100644
--- a/modules/video_coding/svc/scalability_structure_test_helpers.cc
+++ b/modules/video_coding/svc/scalability_structure_test_helpers.cc
@@ -67,7 +67,7 @@
}
bool ScalabilityStructureWrapper::FrameReferencesAreValid(
- rtc::ArrayView<const GenericFrameInfo> frames) const {
+ ArrayView<const GenericFrameInfo> frames) const {
bool valid = true;
// VP9 and AV1 supports up to 8 buffers. Expect no more buffers are not used.
std::bitset<8> buffer_contains_frame;
diff --git a/modules/video_coding/svc/scalability_structure_test_helpers.h b/modules/video_coding/svc/scalability_structure_test_helpers.h
index 1d21764..42257b5 100644
--- a/modules/video_coding/svc/scalability_structure_test_helpers.h
+++ b/modules/video_coding/svc/scalability_structure_test_helpers.h
@@ -43,8 +43,7 @@
// Returns false and ADD_FAILUREs for frames with invalid references.
// In particular validates no frame frame reference to frame before frames[0].
// In error messages frames are indexed starting with 0.
- bool FrameReferencesAreValid(
- rtc::ArrayView<const GenericFrameInfo> frames) const;
+ bool FrameReferencesAreValid(ArrayView<const GenericFrameInfo> frames) const;
private:
ScalableVideoController& structure_controller_;
diff --git a/modules/video_coding/svc/scalability_structure_unittest.cc b/modules/video_coding/svc/scalability_structure_unittest.cc
index 656a26b..9a66df3 100644
--- a/modules/video_coding/svc/scalability_structure_unittest.cc
+++ b/modules/video_coding/svc/scalability_structure_unittest.cc
@@ -117,11 +117,11 @@
EXPECT_EQ(config.num_spatial_layers, static_config->num_spatial_layers);
EXPECT_EQ(config.num_temporal_layers, static_config->num_temporal_layers);
EXPECT_THAT(
- rtc::MakeArrayView(config.scaling_factor_num, config.num_spatial_layers),
+ MakeArrayView(config.scaling_factor_num, config.num_spatial_layers),
ElementsAreArray(static_config->scaling_factor_num,
static_config->num_spatial_layers));
EXPECT_THAT(
- rtc::MakeArrayView(config.scaling_factor_den, config.num_spatial_layers),
+ MakeArrayView(config.scaling_factor_den, config.num_spatial_layers),
ElementsAreArray(static_config->scaling_factor_den,
static_config->num_spatial_layers));
}
diff --git a/modules/video_coding/utility/ivf_file_reader.cc b/modules/video_coding/utility/ivf_file_reader.cc
index 9e7883e..c8ef46c 100644
--- a/modules/video_coding/utility/ivf_file_reader.cc
+++ b/modules/video_coding/utility/ivf_file_reader.cc
@@ -128,7 +128,7 @@
return std::nullopt;
}
- rtc::scoped_refptr<EncodedImageBuffer> payload = EncodedImageBuffer::Create();
+ scoped_refptr<EncodedImageBuffer> payload = EncodedImageBuffer::Create();
std::vector<size_t> layer_sizes;
// next_frame_header_ have to be presented by the way how it was loaded. If it
// is missing it means there is a bug in error handling.
diff --git a/modules/video_coding/utility/ivf_file_reader_unittest.cc b/modules/video_coding/utility/ivf_file_reader_unittest.cc
index c7a931d..bfd2b61 100644
--- a/modules/video_coding/utility/ivf_file_reader_unittest.cc
+++ b/modules/video_coding/utility/ivf_file_reader_unittest.cc
@@ -52,7 +52,7 @@
int spatial_layers_count) {
EncodedImage frame;
frame.SetSpatialIndex(spatial_layers_count);
- rtc::scoped_refptr<EncodedImageBuffer> payload = EncodedImageBuffer::Create(
+ scoped_refptr<EncodedImageBuffer> payload = EncodedImageBuffer::Create(
sizeof(kDummyPayload) * spatial_layers_count);
for (int i = 0; i < spatial_layers_count; ++i) {
memcpy(&payload->data()[i * sizeof(kDummyPayload)], kDummyPayload,
diff --git a/modules/video_coding/utility/qp_parser.cc b/modules/video_coding/utility/qp_parser.cc
index 501a152..491bae8 100644
--- a/modules/video_coding/utility/qp_parser.cc
+++ b/modules/video_coding/utility/qp_parser.cc
@@ -58,7 +58,7 @@
size_t frame_size) {
MutexLock lock(&mutex_);
bitstream_parser_.ParseBitstream(
- rtc::ArrayView<const uint8_t>(frame_data, frame_size));
+ ArrayView<const uint8_t>(frame_data, frame_size));
return bitstream_parser_.GetLastSliceQp();
}
@@ -67,7 +67,7 @@
size_t frame_size) {
MutexLock lock(&mutex_);
bitstream_parser_.ParseBitstream(
- rtc::ArrayView<const uint8_t>(frame_data, frame_size));
+ ArrayView<const uint8_t>(frame_data, frame_size));
return bitstream_parser_.GetLastSliceQp();
}
#endif
diff --git a/modules/video_coding/utility/simulcast_test_fixture_impl.cc b/modules/video_coding/utility/simulcast_test_fixture_impl.cc
index f032c2d..a8a3e80 100644
--- a/modules/video_coding/utility/simulcast_test_fixture_impl.cc
+++ b/modules/video_coding/utility/simulcast_test_fixture_impl.cc
@@ -149,7 +149,7 @@
public:
TestDecodedImageCallback() : decoded_frames_(0) {}
int32_t Decoded(VideoFrame& decoded_image) override {
- rtc::scoped_refptr<I420BufferInterface> i420_buffer =
+ scoped_refptr<I420BufferInterface> i420_buffer =
decoded_image.video_frame_buffer()->ToI420();
for (int i = 0; i < decoded_image.width(); ++i) {
EXPECT_NEAR(kColorY, i420_buffer->DataY()[i], 1);
@@ -192,7 +192,7 @@
}
// Fills in an I420Buffer from `plane_colors`.
-void CreateImage(const rtc::scoped_refptr<I420Buffer>& buffer,
+void CreateImage(const scoped_refptr<I420Buffer>& buffer,
int plane_colors[kNumOfPlanes]) {
SetPlane(buffer->MutableDataY(), plane_colors[0], buffer->width(),
buffer->height(), buffer->StrideY());
diff --git a/modules/video_coding/utility/simulcast_test_fixture_impl.h b/modules/video_coding/utility/simulcast_test_fixture_impl.h
index bf62dd4..79d891e 100644
--- a/modules/video_coding/utility/simulcast_test_fixture_impl.h
+++ b/modules/video_coding/utility/simulcast_test_fixture_impl.h
@@ -93,7 +93,7 @@
std::unique_ptr<VideoDecoder> decoder_;
MockDecodedImageCallback decoder_callback_;
VideoCodec settings_;
- rtc::scoped_refptr<I420Buffer> input_buffer_;
+ scoped_refptr<I420Buffer> input_buffer_;
std::unique_ptr<VideoFrame> input_frame_;
std::unique_ptr<SimulcastRateAllocator> rate_allocator_;
VideoCodecType codec_type_;
diff --git a/modules/video_coding/utility/vp9_uncompressed_header_parser.cc b/modules/video_coding/utility/vp9_uncompressed_header_parser.cc
index bbfac99..ca3c859 100644
--- a/modules/video_coding/utility/vp9_uncompressed_header_parser.cc
+++ b/modules/video_coding/utility/vp9_uncompressed_header_parser.cc
@@ -513,7 +513,7 @@
}
std::optional<Vp9UncompressedHeader> ParseUncompressedVp9Header(
- rtc::ArrayView<const uint8_t> buf) {
+ ArrayView<const uint8_t> buf) {
BitstreamReader reader(buf);
Vp9UncompressedHeader frame_info;
Parse(reader, &frame_info, /*qp_only=*/false);
@@ -526,7 +526,7 @@
namespace vp9 {
bool GetQp(const uint8_t* buf, size_t length, int* qp) {
- BitstreamReader reader(rtc::MakeArrayView(buf, length));
+ BitstreamReader reader(MakeArrayView(buf, length));
Vp9UncompressedHeader frame_info;
Parse(reader, &frame_info, /*qp_only=*/true);
if (!reader.Ok()) {
diff --git a/modules/video_coding/utility/vp9_uncompressed_header_parser.h b/modules/video_coding/utility/vp9_uncompressed_header_parser.h
index 524d6cf..0153a3b 100644
--- a/modules/video_coding/utility/vp9_uncompressed_header_parser.h
+++ b/modules/video_coding/utility/vp9_uncompressed_header_parser.h
@@ -151,7 +151,7 @@
// Parses the uncompressed header and populates (most) values in a
// UncompressedHeader struct. Returns nullopt on failure.
std::optional<Vp9UncompressedHeader> ParseUncompressedVp9Header(
- rtc::ArrayView<const uint8_t> buf);
+ ArrayView<const uint8_t> buf);
} // namespace webrtc
diff --git a/modules/video_coding/video_codec_initializer_unittest.cc b/modules/video_coding/video_codec_initializer_unittest.cc
index 2dea3f4..849c780 100644
--- a/modules/video_coding/video_codec_initializer_unittest.cc
+++ b/modules/video_coding/video_codec_initializer_unittest.cc
@@ -84,14 +84,14 @@
ASSERT_FALSE(num_spatial_streams.has_value());
VideoCodecVP8 vp8_settings = VideoEncoder::GetDefaultVp8Settings();
vp8_settings.numberOfTemporalLayers = num_temporal_streams;
- config_.encoder_specific_settings = rtc::make_ref_counted<
+ config_.encoder_specific_settings = make_ref_counted<
webrtc::VideoEncoderConfig::Vp8EncoderSpecificSettings>(vp8_settings);
} else if (type == VideoCodecType::kVideoCodecVP9) {
ASSERT_TRUE(num_spatial_streams.has_value());
VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings();
vp9_settings.numberOfSpatialLayers = num_spatial_streams.value();
vp9_settings.numberOfTemporalLayers = num_temporal_streams;
- config_.encoder_specific_settings = rtc::make_ref_counted<
+ config_.encoder_specific_settings = make_ref_counted<
webrtc::VideoEncoderConfig::Vp9EncoderSpecificSettings>(vp9_settings);
}
}