Update thread annotiation macros in rtc_base to use RTC_ prefix

BUG=webrtc:8198

Review-Url: https://codereview.webrtc.org/3006133002
Cr-Original-Commit-Position: refs/heads/master@{#19714}
Cr-Mirrored-From: https://chromium.googlesource.com/external/webrtc
Cr-Mirrored-Commit: 3c6abd200cb9865402c96466fafe6867730cb2be
diff --git a/rtc_base/asyncinvoker.h b/rtc_base/asyncinvoker.h
index 455ded2..0b14e91 100644
--- a/rtc_base/asyncinvoker.h
+++ b/rtc_base/asyncinvoker.h
@@ -248,8 +248,8 @@
   void ThreadDestroyed();
 
   CriticalSection crit_;
-  Thread* thread_ GUARDED_BY(crit_);
-  AsyncInvoker invoker_ GUARDED_BY(crit_);
+  Thread* thread_ RTC_GUARDED_BY(crit_);
+  AsyncInvoker invoker_ RTC_GUARDED_BY(crit_);
 };
 
 }  // namespace rtc
diff --git a/rtc_base/bufferqueue.h b/rtc_base/bufferqueue.h
index 3abb8cf..82f1ac1 100644
--- a/rtc_base/bufferqueue.h
+++ b/rtc_base/bufferqueue.h
@@ -50,8 +50,8 @@
   size_t capacity_;
   size_t default_size_;
   CriticalSection crit_;
-  std::deque<Buffer*> queue_ GUARDED_BY(crit_);
-  std::vector<Buffer*> free_list_ GUARDED_BY(crit_);
+  std::deque<Buffer*> queue_ RTC_GUARDED_BY(crit_);
+  std::vector<Buffer*> free_list_ RTC_GUARDED_BY(crit_);
 
   RTC_DISALLOW_COPY_AND_ASSIGN(BufferQueue);
 };
diff --git a/rtc_base/criticalsection.cc b/rtc_base/criticalsection.cc
index 08acb13..e73b23c 100644
--- a/rtc_base/criticalsection.cc
+++ b/rtc_base/criticalsection.cc
@@ -56,7 +56,7 @@
 #endif
 }
 
-void CriticalSection::Enter() const EXCLUSIVE_LOCK_FUNCTION() {
+void CriticalSection::Enter() const RTC_EXCLUSIVE_LOCK_FUNCTION() {
 #if defined(WEBRTC_WIN)
   EnterCriticalSection(&crit_);
 #elif defined(WEBRTC_POSIX)
@@ -115,7 +115,7 @@
 #endif
 }
 
-bool CriticalSection::TryEnter() const EXCLUSIVE_TRYLOCK_FUNCTION(true) {
+bool CriticalSection::TryEnter() const RTC_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
 #if defined(WEBRTC_WIN)
   return TryEnterCriticalSection(&crit_) != FALSE;
 #elif defined(WEBRTC_POSIX)
@@ -148,7 +148,7 @@
 #endif
 }
 
-void CriticalSection::Leave() const UNLOCK_FUNCTION() {
+void CriticalSection::Leave() const RTC_UNLOCK_FUNCTION() {
   RTC_DCHECK(CurrentThreadIsOwner());
 #if defined(WEBRTC_WIN)
   LeaveCriticalSection(&crit_);
diff --git a/rtc_base/criticalsection.h b/rtc_base/criticalsection.h
index 38172d7..fb55aaf 100644
--- a/rtc_base/criticalsection.h
+++ b/rtc_base/criticalsection.h
@@ -52,14 +52,14 @@
 // Locking methods (Enter, TryEnter, Leave)are const to permit protecting
 // members inside a const context without requiring mutable CriticalSections
 // everywhere.
-class LOCKABLE CriticalSection {
+class RTC_LOCKABLE CriticalSection {
  public:
   CriticalSection();
   ~CriticalSection();
 
-  void Enter() const EXCLUSIVE_LOCK_FUNCTION();
-  bool TryEnter() const EXCLUSIVE_TRYLOCK_FUNCTION(true);
-  void Leave() const UNLOCK_FUNCTION();
+  void Enter() const RTC_EXCLUSIVE_LOCK_FUNCTION();
+  bool TryEnter() const RTC_EXCLUSIVE_TRYLOCK_FUNCTION(true);
+  void Leave() const RTC_UNLOCK_FUNCTION();
 
  private:
   // Use only for RTC_DCHECKing.
@@ -91,10 +91,11 @@
 };
 
 // CritScope, for serializing execution through a scope.
-class SCOPED_LOCKABLE CritScope {
+class RTC_SCOPED_LOCKABLE CritScope {
  public:
-  explicit CritScope(const CriticalSection* cs) EXCLUSIVE_LOCK_FUNCTION(cs);
-  ~CritScope() UNLOCK_FUNCTION();
+  explicit CritScope(const CriticalSection* cs) RTC_EXCLUSIVE_LOCK_FUNCTION(cs);
+  ~CritScope() RTC_UNLOCK_FUNCTION();
+
  private:
   const CriticalSection* const cs_;
   RTC_DISALLOW_COPY_AND_ASSIGN(CritScope);
@@ -127,11 +128,11 @@
 
 // A POD lock used to protect global variables. Do NOT use for other purposes.
 // No custom constructor or private data member should be added.
-class LOCKABLE GlobalLockPod {
+class RTC_LOCKABLE GlobalLockPod {
  public:
-  void Lock() EXCLUSIVE_LOCK_FUNCTION();
+  void Lock() RTC_EXCLUSIVE_LOCK_FUNCTION();
 
-  void Unlock() UNLOCK_FUNCTION();
+  void Unlock() RTC_UNLOCK_FUNCTION();
 
   volatile int lock_acquired;
 };
@@ -142,10 +143,12 @@
 };
 
 // GlobalLockScope, for serializing execution through a scope.
-class SCOPED_LOCKABLE GlobalLockScope {
+class RTC_SCOPED_LOCKABLE GlobalLockScope {
  public:
-  explicit GlobalLockScope(GlobalLockPod* lock) EXCLUSIVE_LOCK_FUNCTION(lock);
-  ~GlobalLockScope() UNLOCK_FUNCTION();
+  explicit GlobalLockScope(GlobalLockPod* lock)
+      RTC_EXCLUSIVE_LOCK_FUNCTION(lock);
+  ~GlobalLockScope() RTC_UNLOCK_FUNCTION();
+
  private:
   GlobalLockPod* const lock_;
   RTC_DISALLOW_COPY_AND_ASSIGN(GlobalLockScope);
diff --git a/rtc_base/criticalsection_unittest.cc b/rtc_base/criticalsection_unittest.cc
index 2e136bf..c264e44 100644
--- a/rtc_base/criticalsection_unittest.cc
+++ b/rtc_base/criticalsection_unittest.cc
@@ -113,14 +113,10 @@
   int shared_value_;
 };
 
-class LOCKABLE CriticalSectionLock {
+class RTC_LOCKABLE CriticalSectionLock {
  public:
-  void Lock() EXCLUSIVE_LOCK_FUNCTION() {
-    cs_.Enter();
-  }
-  void Unlock() UNLOCK_FUNCTION() {
-    cs_.Leave();
-  }
+  void Lock() RTC_EXCLUSIVE_LOCK_FUNCTION() { cs_.Enter(); }
+  void Unlock() RTC_UNLOCK_FUNCTION() { cs_.Leave(); }
 
  private:
   CriticalSection cs_;
diff --git a/rtc_base/event_tracer.cc b/rtc_base/event_tracer.cc
index 008e698..c3b459f 100644
--- a/rtc_base/event_tracer.cc
+++ b/rtc_base/event_tracer.cc
@@ -320,7 +320,7 @@
   }
 
   rtc::CriticalSection crit_;
-  std::vector<TraceEvent> trace_events_ GUARDED_BY(crit_);
+  std::vector<TraceEvent> trace_events_ RTC_GUARDED_BY(crit_);
   rtc::PlatformThread logging_thread_;
   rtc::Event shutdown_event_;
   rtc::ThreadChecker thread_checker_;
diff --git a/rtc_base/fakeclock.h b/rtc_base/fakeclock.h
index c5bdab1..b6a84e6 100644
--- a/rtc_base/fakeclock.h
+++ b/rtc_base/fakeclock.h
@@ -42,7 +42,7 @@
   }
  private:
   CriticalSection lock_;
-  int64_t time_ GUARDED_BY(lock_) = 0;
+  int64_t time_ RTC_GUARDED_BY(lock_) = 0;
 };
 
 // Helper class that sets itself as the global clock in its constructor and
diff --git a/rtc_base/logging.cc b/rtc_base/logging.cc
index 4f46b0a..d52bd7a 100644
--- a/rtc_base/logging.cc
+++ b/rtc_base/logging.cc
@@ -111,7 +111,7 @@
 // Note: we explicitly do not clean this up, because of the uncertain ordering
 // of destructors at program exit.  Let the person who sets the stream trigger
 // cleanup by setting to null, or let it leak (safe at program exit).
-LogMessage::StreamList LogMessage::streams_ GUARDED_BY(g_log_crit);
+LogMessage::StreamList LogMessage::streams_ RTC_GUARDED_BY(g_log_crit);
 
 // Boolean options default to false (0)
 bool LogMessage::thread_, LogMessage::timestamp_;
@@ -333,7 +333,8 @@
   LogToDebug(debug_level);
 }
 
-void LogMessage::UpdateMinLogSeverity() EXCLUSIVE_LOCKS_REQUIRED(g_log_crit) {
+void LogMessage::UpdateMinLogSeverity()
+    RTC_EXCLUSIVE_LOCKS_REQUIRED(g_log_crit) {
   LoggingSeverity min_sev = dbg_sev_;
   for (auto& kv : streams_) {
     min_sev = std::min(dbg_sev_, kv.second);
diff --git a/rtc_base/messagequeue.cc b/rtc_base/messagequeue.cc
index 883735c..3dd1142 100644
--- a/rtc_base/messagequeue.cc
+++ b/rtc_base/messagequeue.cc
@@ -23,16 +23,16 @@
 const int kMaxMsgLatency = 150;  // 150 ms
 const int kSlowDispatchLoggingThreshold = 50;  // 50 ms
 
-class SCOPED_LOCKABLE MarkProcessingCritScope {
+class RTC_SCOPED_LOCKABLE MarkProcessingCritScope {
  public:
   MarkProcessingCritScope(const CriticalSection* cs, size_t* processing)
-      EXCLUSIVE_LOCK_FUNCTION(cs)
+      RTC_EXCLUSIVE_LOCK_FUNCTION(cs)
       : cs_(cs), processing_(processing) {
     cs_->Enter();
     *processing_ += 1;
   }
 
-  ~MarkProcessingCritScope() UNLOCK_FUNCTION() {
+  ~MarkProcessingCritScope() RTC_UNLOCK_FUNCTION() {
     *processing_ -= 1;
     cs_->Leave();
   }
diff --git a/rtc_base/messagequeue.h b/rtc_base/messagequeue.h
index 2345dce..79217fb 100644
--- a/rtc_base/messagequeue.h
+++ b/rtc_base/messagequeue.h
@@ -68,13 +68,13 @@
 
   static MessageQueueManager* instance_;
   // This list contains all live MessageQueues.
-  std::vector<MessageQueue*> message_queues_ GUARDED_BY(crit_);
+  std::vector<MessageQueue*> message_queues_ RTC_GUARDED_BY(crit_);
 
   // Methods that don't modify the list of message queues may be called in a
   // re-entrant fashion. "processing_" keeps track of the depth of re-entrant
   // calls.
   CriticalSection crit_;
-  size_t processing_ GUARDED_BY(crit_);
+  size_t processing_ RTC_GUARDED_BY(crit_);
 };
 
 // Derive from this for specialized data
@@ -306,9 +306,9 @@
 
   bool fPeekKeep_;
   Message msgPeek_;
-  MessageList msgq_ GUARDED_BY(crit_);
-  PriorityQueue dmsgq_ GUARDED_BY(crit_);
-  uint32_t dmsgq_next_num_ GUARDED_BY(crit_);
+  MessageList msgq_ RTC_GUARDED_BY(crit_);
+  PriorityQueue dmsgq_ RTC_GUARDED_BY(crit_);
+  uint32_t dmsgq_next_num_ RTC_GUARDED_BY(crit_);
   CriticalSection crit_;
   bool fInitialized_;
   bool fDestroyed_;
diff --git a/rtc_base/physicalsocketserver.h b/rtc_base/physicalsocketserver.h
index 680b4dd..c91e14e 100644
--- a/rtc_base/physicalsocketserver.h
+++ b/rtc_base/physicalsocketserver.h
@@ -203,7 +203,7 @@
   SOCKET s_;
   bool udp_;
   CriticalSection crit_;
-  int error_ GUARDED_BY(crit_);
+  int error_ RTC_GUARDED_BY(crit_);
   ConnState state_;
   AsyncResolver* resolver_;
 
diff --git a/rtc_base/race_checker.h b/rtc_base/race_checker.h
index b49db53..f0506c8 100644
--- a/rtc_base/race_checker.h
+++ b/rtc_base/race_checker.h
@@ -23,14 +23,14 @@
 
 // Best-effort race-checking implementation. This primitive uses no
 // synchronization at all to be as-fast-as-possible in the non-racy case.
-class LOCKABLE RaceChecker {
+class RTC_LOCKABLE RaceChecker {
  public:
   friend class internal::RaceCheckerScope;
   RaceChecker();
 
  private:
-  bool Acquire() const EXCLUSIVE_LOCK_FUNCTION();
-  void Release() const UNLOCK_FUNCTION();
+  bool Acquire() const RTC_EXCLUSIVE_LOCK_FUNCTION();
+  void Release() const RTC_UNLOCK_FUNCTION();
 
   // Volatile to prevent code being optimized away in Acquire()/Release().
   mutable volatile int access_count_ = 0;
@@ -38,25 +38,25 @@
 };
 
 namespace internal {
-class SCOPED_LOCKABLE RaceCheckerScope {
+class RTC_SCOPED_LOCKABLE RaceCheckerScope {
  public:
   explicit RaceCheckerScope(const RaceChecker* race_checker)
-      EXCLUSIVE_LOCK_FUNCTION(race_checker);
+      RTC_EXCLUSIVE_LOCK_FUNCTION(race_checker);
 
   bool RaceDetected() const;
-  ~RaceCheckerScope() UNLOCK_FUNCTION();
+  ~RaceCheckerScope() RTC_UNLOCK_FUNCTION();
 
  private:
   const RaceChecker* const race_checker_;
   const bool race_check_ok_;
 };
 
-class SCOPED_LOCKABLE RaceCheckerScopeDoNothing {
+class RTC_SCOPED_LOCKABLE RaceCheckerScopeDoNothing {
  public:
   explicit RaceCheckerScopeDoNothing(const RaceChecker* race_checker)
-      EXCLUSIVE_LOCK_FUNCTION(race_checker) {}
+      RTC_EXCLUSIVE_LOCK_FUNCTION(race_checker) {}
 
-  ~RaceCheckerScopeDoNothing() UNLOCK_FUNCTION() {}
+  ~RaceCheckerScopeDoNothing() RTC_UNLOCK_FUNCTION() {}
 };
 
 }  // namespace internal
diff --git a/rtc_base/rate_limiter.h b/rtc_base/rate_limiter.h
index b4b9080..e3f6249 100644
--- a/rtc_base/rate_limiter.h
+++ b/rtc_base/rate_limiter.h
@@ -44,9 +44,9 @@
  private:
   const Clock* const clock_;
   rtc::CriticalSection lock_;
-  RateStatistics current_rate_ GUARDED_BY(lock_);
-  int64_t window_size_ms_ GUARDED_BY(lock_);
-  uint32_t max_rate_bps_ GUARDED_BY(lock_);
+  RateStatistics current_rate_ RTC_GUARDED_BY(lock_);
+  int64_t window_size_ms_ RTC_GUARDED_BY(lock_);
+  uint32_t max_rate_bps_ RTC_GUARDED_BY(lock_);
 
   RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(RateLimiter);
 };
diff --git a/rtc_base/sequenced_task_checker.h b/rtc_base/sequenced_task_checker.h
index 40b07f9..35bd3cd 100644
--- a/rtc_base/sequenced_task_checker.h
+++ b/rtc_base/sequenced_task_checker.h
@@ -54,17 +54,18 @@
 //
 // In Release mode, CalledOnValidThread will always return true.
 #if ENABLE_SEQUENCED_TASK_CHECKER
-class LOCKABLE SequencedTaskChecker : public SequencedTaskCheckerImpl {};
+class RTC_LOCKABLE SequencedTaskChecker : public SequencedTaskCheckerImpl {};
 #else
-class LOCKABLE SequencedTaskChecker : public SequencedTaskCheckerDoNothing {};
+class RTC_LOCKABLE SequencedTaskChecker : public SequencedTaskCheckerDoNothing {
+};
 #endif  // ENABLE_SEQUENCED_TASK_CHECKER_H_
 
 namespace internal {
-class SCOPED_LOCKABLE SequencedTaskCheckerScope {
+class RTC_SCOPED_LOCKABLE SequencedTaskCheckerScope {
  public:
   explicit SequencedTaskCheckerScope(const SequencedTaskChecker* checker)
-      EXCLUSIVE_LOCK_FUNCTION(checker);
-  ~SequencedTaskCheckerScope() UNLOCK_FUNCTION();
+      RTC_EXCLUSIVE_LOCK_FUNCTION(checker);
+  ~SequencedTaskCheckerScope() RTC_UNLOCK_FUNCTION();
 };
 
 }  // namespace internal
diff --git a/rtc_base/sequenced_task_checker_unittest.cc b/rtc_base/sequenced_task_checker_unittest.cc
index 55fc474..73bc8ea 100644
--- a/rtc_base/sequenced_task_checker_unittest.cc
+++ b/rtc_base/sequenced_task_checker_unittest.cc
@@ -237,7 +237,7 @@
   }
 
  private:
-  bool test_var_ GUARDED_BY(&checker_);
+  bool test_var_ RTC_GUARDED_BY(&checker_);
   SequencedTaskChecker checker_;
 };
 
diff --git a/rtc_base/signalthread.h b/rtc_base/signalthread.h
index f6722a7..1c6c876 100644
--- a/rtc_base/signalthread.h
+++ b/rtc_base/signalthread.h
@@ -119,9 +119,9 @@
     RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(Worker);
   };
 
-  class SCOPED_LOCKABLE EnterExit {
+  class RTC_SCOPED_LOCKABLE EnterExit {
    public:
-    explicit EnterExit(SignalThread* t) EXCLUSIVE_LOCK_FUNCTION(t->cs_)
+    explicit EnterExit(SignalThread* t) RTC_EXCLUSIVE_LOCK_FUNCTION(t->cs_)
         : t_(t) {
       t_->cs_.Enter();
       // If refcount_ is zero then the object has already been deleted and we
@@ -129,7 +129,7 @@
       RTC_DCHECK_NE(0, t_->refcount_);
       ++t_->refcount_;
     }
-    ~EnterExit() UNLOCK_FUNCTION() {
+    ~EnterExit() RTC_UNLOCK_FUNCTION() {
       bool d = (0 == --t_->refcount_);
       t_->cs_.Leave();
       if (d)
diff --git a/rtc_base/stream.h b/rtc_base/stream.h
index 8b7b036..e8418ab 100644
--- a/rtc_base/stream.h
+++ b/rtc_base/stream.h
@@ -539,26 +539,30 @@
  private:
   // Helper method that implements ReadOffset. Caller must acquire a lock
   // when calling this method.
-  StreamResult ReadOffsetLocked(void* buffer, size_t bytes, size_t offset,
+  StreamResult ReadOffsetLocked(void* buffer,
+                                size_t bytes,
+                                size_t offset,
                                 size_t* bytes_read)
-      EXCLUSIVE_LOCKS_REQUIRED(crit_);
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_);
 
   // Helper method that implements WriteOffset. Caller must acquire a lock
   // when calling this method.
-  StreamResult WriteOffsetLocked(const void* buffer, size_t bytes,
-                                 size_t offset, size_t* bytes_written)
-      EXCLUSIVE_LOCKS_REQUIRED(crit_);
+  StreamResult WriteOffsetLocked(const void* buffer,
+                                 size_t bytes,
+                                 size_t offset,
+                                 size_t* bytes_written)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_);
 
   // keeps the opened/closed state of the stream
-  StreamState state_ GUARDED_BY(crit_);
+  StreamState state_ RTC_GUARDED_BY(crit_);
   // the allocated buffer
-  std::unique_ptr<char[]> buffer_ GUARDED_BY(crit_);
+  std::unique_ptr<char[]> buffer_ RTC_GUARDED_BY(crit_);
   // size of the allocated buffer
-  size_t buffer_length_ GUARDED_BY(crit_);
+  size_t buffer_length_ RTC_GUARDED_BY(crit_);
   // amount of readable data in the buffer
-  size_t data_length_ GUARDED_BY(crit_);
+  size_t data_length_ RTC_GUARDED_BY(crit_);
   // offset to the readable data
-  size_t read_position_ GUARDED_BY(crit_);
+  size_t read_position_ RTC_GUARDED_BY(crit_);
   // stream callbacks are dispatched on this thread
   Thread* owner_;
   // object lock
diff --git a/rtc_base/swap_queue.h b/rtc_base/swap_queue.h
index 3519686..95bae21 100644
--- a/rtc_base/swap_queue.h
+++ b/rtc_base/swap_queue.h
@@ -192,16 +192,16 @@
 
   // TODO(peah): Change this to use std::function() once we can use C++11 std
   // lib.
-  QueueItemVerifier queue_item_verifier_ GUARDED_BY(crit_queue_);
+  QueueItemVerifier queue_item_verifier_ RTC_GUARDED_BY(crit_queue_);
 
   // (next_read_index_ + num_elements_) % queue_.size() =
   //  next_write_index_
-  size_t next_write_index_ GUARDED_BY(crit_queue_) = 0;
-  size_t next_read_index_ GUARDED_BY(crit_queue_) = 0;
-  size_t num_elements_ GUARDED_BY(crit_queue_) = 0;
+  size_t next_write_index_ RTC_GUARDED_BY(crit_queue_) = 0;
+  size_t next_read_index_ RTC_GUARDED_BY(crit_queue_) = 0;
+  size_t num_elements_ RTC_GUARDED_BY(crit_queue_) = 0;
 
   // queue_.size() is constant.
-  std::vector<T> queue_ GUARDED_BY(crit_queue_);
+  std::vector<T> queue_ RTC_GUARDED_BY(crit_queue_);
 
   RTC_DISALLOW_COPY_AND_ASSIGN(SwapQueue);
 };
diff --git a/rtc_base/task_queue.h b/rtc_base/task_queue.h
index 966b29c..64e6a6c 100644
--- a/rtc_base/task_queue.h
+++ b/rtc_base/task_queue.h
@@ -149,7 +149,7 @@
 // TaskQueue itself has been deleted or it may happen synchronously while the
 // TaskQueue instance is being deleted.  This may vary from one OS to the next
 // so assumptions about lifetimes of pending tasks should not be made.
-class LOCKABLE TaskQueue {
+class RTC_LOCKABLE TaskQueue {
  public:
   // TaskQueue priority levels. On some platforms these will map to thread
   // priorities, on others such as Mac and iOS, GCD queue priorities.
diff --git a/rtc_base/task_queue_libevent.cc b/rtc_base/task_queue_libevent.cc
index 2c60e9e..17da0f0 100644
--- a/rtc_base/task_queue_libevent.cc
+++ b/rtc_base/task_queue_libevent.cc
@@ -150,9 +150,9 @@
   std::unique_ptr<event> wakeup_event_;
   PlatformThread thread_;
   rtc::CriticalSection pending_lock_;
-  std::list<std::unique_ptr<QueuedTask>> pending_ GUARDED_BY(pending_lock_);
+  std::list<std::unique_ptr<QueuedTask>> pending_ RTC_GUARDED_BY(pending_lock_);
   std::list<scoped_refptr<ReplyTaskOwnerRef>> pending_replies_
-      GUARDED_BY(pending_lock_);
+      RTC_GUARDED_BY(pending_lock_);
 };
 
 struct TaskQueue::Impl::QueueContext {
diff --git a/rtc_base/thread.h b/rtc_base/thread.h
index f037d8a..d072fac 100644
--- a/rtc_base/thread.h
+++ b/rtc_base/thread.h
@@ -100,7 +100,7 @@
 
 // WARNING! SUBCLASSES MUST CALL Stop() IN THEIR DESTRUCTORS!  See ~Thread().
 
-class LOCKABLE Thread : public MessageQueue {
+class RTC_LOCKABLE Thread : public MessageQueue {
  public:
   // DEPRECATED.
   // The default constructor should not be used because it hides whether or
diff --git a/rtc_base/thread_checker.h b/rtc_base/thread_checker.h
index 70daf5a..769b7f0 100644
--- a/rtc_base/thread_checker.h
+++ b/rtc_base/thread_checker.h
@@ -71,22 +71,20 @@
 //
 // In Release mode, CalledOnValidThread will always return true.
 #if ENABLE_THREAD_CHECKER
-class LOCKABLE ThreadChecker : public ThreadCheckerImpl {
-};
+class RTC_LOCKABLE ThreadChecker : public ThreadCheckerImpl {};
 #else
-class LOCKABLE ThreadChecker : public ThreadCheckerDoNothing {
-};
+class RTC_LOCKABLE ThreadChecker : public ThreadCheckerDoNothing {};
 #endif  // ENABLE_THREAD_CHECKER
 
 #undef ENABLE_THREAD_CHECKER
 
 namespace internal {
-class SCOPED_LOCKABLE AnnounceOnThread {
+class RTC_SCOPED_LOCKABLE AnnounceOnThread {
  public:
-  template<typename ThreadLikeObject>
+  template <typename ThreadLikeObject>
   explicit AnnounceOnThread(const ThreadLikeObject* thread_like_object)
-      EXCLUSIVE_LOCK_FUNCTION(thread_like_object) {}
-  ~AnnounceOnThread() UNLOCK_FUNCTION() {}
+      RTC_EXCLUSIVE_LOCK_FUNCTION(thread_like_object) {}
+  ~AnnounceOnThread() RTC_UNLOCK_FUNCTION() {}
 
   template<typename ThreadLikeObject>
   static bool IsCurrent(const ThreadLikeObject* thread_like_object) {
diff --git a/rtc_base/thread_unittest.cc b/rtc_base/thread_unittest.cc
index e7701e8..7631f5e 100644
--- a/rtc_base/thread_unittest.cc
+++ b/rtc_base/thread_unittest.cc
@@ -336,7 +336,7 @@
 
    private:
     CriticalSection crit_;
-    bool value_ GUARDED_BY(crit_);
+    bool value_ RTC_GUARDED_BY(crit_);
   };
 
   struct LocalFuncs {