Refactoring WebRTC Java/JNI audio recording in C++ and Java.

This is a big refactoring of the existing C++/JNI/Java support for audio recording in native WebRTC:

- Removes unused code and old WEBRTC logging macros
- Now uses optimal sample rate and buffer size in Java AudioRecord (used hard-coded sample rate before)
- Makes code more inline with the implementation in Chrome
- Adds helper methods for JNI handling to improve readability
- Changes the threading model (high-prio audio thread now lives in Java-land and C++ only works as proxy)
- Adds basic thread checks
- Removes all locks in C++ land
- Removes all locks in Java
- Improves construction/destruction
- Additional cleanup

Tested using AppRTCDemo and WebRTCDemo APKs on N6, N5, N7, Samsung Galaxy S4 and
Samsung Galaxy S4 mini (which uses 44.1kHz as native sample rate).

BUG=NONE
R=magjed@webrtc.org, perkj@webrtc.org, pthatcher@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/33969004

Cr-Commit-Position: refs/heads/master@{#8325}
git-svn-id: http://webrtc.googlecode.com/svn/trunk@8325 4adac7df-926f-26a2-2b94-8c16560cd09d
diff --git a/talk/app/webrtc/peerconnectionfactory.cc b/talk/app/webrtc/peerconnectionfactory.cc
index 9f7e679..1191e5a 100644
--- a/talk/app/webrtc/peerconnectionfactory.cc
+++ b/talk/app/webrtc/peerconnectionfactory.cc
@@ -147,16 +147,16 @@
 
   cricket::DummyDeviceManager* device_manager(
       new cricket::DummyDeviceManager());
+
   // TODO:  Need to make sure only one VoE is created inside
   // WebRtcMediaEngine.
-  cricket::MediaEngineInterface* media_engine(
-      cricket::WebRtcMediaEngineFactory::Create(default_adm_.get(),
-                                                NULL,  // No secondary adm.
-                                                video_encoder_factory_.get(),
-                                                video_decoder_factory_.get()));
+  cricket::MediaEngineInterface* media_engine =
+      worker_thread_->Invoke<cricket::MediaEngineInterface*>(rtc::Bind(
+      &PeerConnectionFactory::CreateMediaEngine_w, this));
 
   channel_manager_.reset(new cricket::ChannelManager(
       media_engine, device_manager, worker_thread_));
+
   channel_manager_->SetVideoRtxEnabled(true);
   if (!channel_manager_->Init()) {
     return false;
@@ -252,4 +252,11 @@
   return worker_thread_;
 }
 
+cricket::MediaEngineInterface* PeerConnectionFactory::CreateMediaEngine_w() {
+  ASSERT(worker_thread_ == rtc::Thread::Current());
+  return cricket::WebRtcMediaEngineFactory::Create(
+      default_adm_.get(), NULL, video_encoder_factory_.get(),
+      video_decoder_factory_.get());
+}
+
 }  // namespace webrtc
diff --git a/talk/app/webrtc/peerconnectionfactory.h b/talk/app/webrtc/peerconnectionfactory.h
index 4826b97..e35c447 100644
--- a/talk/app/webrtc/peerconnectionfactory.h
+++ b/talk/app/webrtc/peerconnectionfactory.h
@@ -90,6 +90,8 @@
   virtual ~PeerConnectionFactory();
 
  private:
+  cricket::MediaEngineInterface* CreateMediaEngine_w();
+
   bool owns_ptrs_;
   bool wraps_current_thread_;
   rtc::Thread* signaling_thread_;
diff --git a/talk/media/webrtc/webrtcvoiceengine.cc b/talk/media/webrtc/webrtcvoiceengine.cc
index 1de22bc..df07cd5 100644
--- a/talk/media/webrtc/webrtcvoiceengine.cc
+++ b/talk/media/webrtc/webrtcvoiceengine.cc
@@ -589,6 +589,7 @@
 }
 
 bool WebRtcVoiceEngine::Init(rtc::Thread* worker_thread) {
+  ASSERT(worker_thread == rtc::Thread::Current());
   LOG(LS_INFO) << "WebRtcVoiceEngine::Init";
   bool res = InitInternal();
   if (res) {
diff --git a/talk/session/media/channelmanager.cc b/talk/session/media/channelmanager.cc
index ecfe851..e3de339 100644
--- a/talk/session/media/channelmanager.cc
+++ b/talk/session/media/channelmanager.cc
@@ -149,6 +149,10 @@
     // shutdown.
     ShutdownSrtp();
   }
+  // Always delete the media engine on the worker thread to match how it was
+  // created.
+  worker_thread_->Invoke<void>(Bind(
+      &ChannelManager::DeleteMediaEngine_w, this));
 }
 
 bool ChannelManager::SetVideoRtxEnabled(bool enable) {
@@ -215,89 +219,102 @@
   if (initialized_) {
     return false;
   }
-
   ASSERT(worker_thread_ != NULL);
-  if (worker_thread_) {
-    if (worker_thread_ != rtc::Thread::Current()) {
-      // Do not allow invoking calls to other threads on the worker thread.
-      worker_thread_->Invoke<bool>(rtc::Bind(
-          &rtc::Thread::SetAllowBlockingCalls, worker_thread_, false));
-    }
-
-    if (media_engine_->Init(worker_thread_)) {
-      initialized_ = true;
-
-      // Now that we're initialized, apply any stored preferences. A preferred
-      // device might have been unplugged. In this case, we fallback to the
-      // default device but keep the user preferences. The preferences are
-      // changed only when the Javascript FE changes them.
-      const std::string preferred_audio_in_device = audio_in_device_;
-      const std::string preferred_audio_out_device = audio_out_device_;
-      const std::string preferred_camera_device = camera_device_;
-      Device device;
-      if (!device_manager_->GetAudioInputDevice(audio_in_device_, &device)) {
-        LOG(LS_WARNING) << "The preferred microphone '" << audio_in_device_
-                        << "' is unavailable. Fall back to the default.";
-        audio_in_device_ = DeviceManagerInterface::kDefaultDeviceName;
-      }
-      if (!device_manager_->GetAudioOutputDevice(audio_out_device_, &device)) {
-        LOG(LS_WARNING) << "The preferred speaker '" << audio_out_device_
-                        << "' is unavailable. Fall back to the default.";
-        audio_out_device_ = DeviceManagerInterface::kDefaultDeviceName;
-      }
-      if (!device_manager_->GetVideoCaptureDevice(camera_device_, &device)) {
-        if (!camera_device_.empty()) {
-          LOG(LS_WARNING) << "The preferred camera '" << camera_device_
-                          << "' is unavailable. Fall back to the default.";
-        }
-        camera_device_ = DeviceManagerInterface::kDefaultDeviceName;
-      }
-
-      if (!SetAudioOptions(audio_in_device_, audio_out_device_,
-                           audio_options_, audio_delay_offset_)) {
-        LOG(LS_WARNING) << "Failed to SetAudioOptions with"
-                        << " microphone: " << audio_in_device_
-                        << " speaker: " << audio_out_device_
-                        << " options: " << audio_options_.ToString()
-                        << " delay: " << audio_delay_offset_;
-      }
-
-      // If audio_output_volume_ has been set via SetOutputVolume(), set the
-      // audio output volume of the engine.
-      if (kNotSetOutputVolume != audio_output_volume_ &&
-          !SetOutputVolume(audio_output_volume_)) {
-        LOG(LS_WARNING) << "Failed to SetOutputVolume to "
-                        << audio_output_volume_;
-      }
-      if (!SetCaptureDevice(camera_device_) && !camera_device_.empty()) {
-        LOG(LS_WARNING) << "Failed to SetCaptureDevice with camera: "
-                        << camera_device_;
-      }
-
-      // Restore the user preferences.
-      audio_in_device_ = preferred_audio_in_device;
-      audio_out_device_ = preferred_audio_out_device;
-      camera_device_ = preferred_camera_device;
-
-      // Now apply the default video codec that has been set earlier.
-      if (default_video_encoder_config_.max_codec.id != 0) {
-        SetDefaultVideoEncoderConfig(default_video_encoder_config_);
-      }
-    }
+  if (!worker_thread_) {
+    return false;
   }
+  if (worker_thread_ != rtc::Thread::Current()) {
+    // Do not allow invoking calls to other threads on the worker thread.
+    worker_thread_->Invoke<bool>(rtc::Bind(
+        &rtc::Thread::SetAllowBlockingCalls, worker_thread_, false));
+  }
+
+  initialized_ = worker_thread_->Invoke<bool>(Bind(
+      &ChannelManager::InitMediaEngine_w, this));
+  ASSERT(initialized_);
+  if (!initialized_) {
+    return false;
+  }
+
+  // Now that we're initialized, apply any stored preferences. A preferred
+  // device might have been unplugged. In this case, we fallback to the
+  // default device but keep the user preferences. The preferences are
+  // changed only when the Javascript FE changes them.
+  const std::string preferred_audio_in_device = audio_in_device_;
+  const std::string preferred_audio_out_device = audio_out_device_;
+  const std::string preferred_camera_device = camera_device_;
+  Device device;
+  if (!device_manager_->GetAudioInputDevice(audio_in_device_, &device)) {
+    LOG(LS_WARNING) << "The preferred microphone '" << audio_in_device_
+                    << "' is unavailable. Fall back to the default.";
+    audio_in_device_ = DeviceManagerInterface::kDefaultDeviceName;
+  }
+  if (!device_manager_->GetAudioOutputDevice(audio_out_device_, &device)) {
+    LOG(LS_WARNING) << "The preferred speaker '" << audio_out_device_
+                    << "' is unavailable. Fall back to the default.";
+    audio_out_device_ = DeviceManagerInterface::kDefaultDeviceName;
+  }
+  if (!device_manager_->GetVideoCaptureDevice(camera_device_, &device)) {
+    if (!camera_device_.empty()) {
+      LOG(LS_WARNING) << "The preferred camera '" << camera_device_
+                      << "' is unavailable. Fall back to the default.";
+    }
+    camera_device_ = DeviceManagerInterface::kDefaultDeviceName;
+  }
+
+  if (!SetAudioOptions(audio_in_device_, audio_out_device_,
+                       audio_options_, audio_delay_offset_)) {
+    LOG(LS_WARNING) << "Failed to SetAudioOptions with"
+                    << " microphone: " << audio_in_device_
+                    << " speaker: " << audio_out_device_
+                    << " options: " << audio_options_.ToString()
+                    << " delay: " << audio_delay_offset_;
+  }
+
+  // If audio_output_volume_ has been set via SetOutputVolume(), set the
+  // audio output volume of the engine.
+  if (kNotSetOutputVolume != audio_output_volume_ &&
+      !SetOutputVolume(audio_output_volume_)) {
+    LOG(LS_WARNING) << "Failed to SetOutputVolume to "
+                    << audio_output_volume_;
+  }
+  if (!SetCaptureDevice(camera_device_) && !camera_device_.empty()) {
+    LOG(LS_WARNING) << "Failed to SetCaptureDevice with camera: "
+                    << camera_device_;
+  }
+
+  // Restore the user preferences.
+  audio_in_device_ = preferred_audio_in_device;
+  audio_out_device_ = preferred_audio_out_device;
+  camera_device_ = preferred_camera_device;
+
+  // Now apply the default video codec that has been set earlier.
+  if (default_video_encoder_config_.max_codec.id != 0) {
+    SetDefaultVideoEncoderConfig(default_video_encoder_config_);
+  }
+
   return initialized_;
 }
 
+bool ChannelManager::InitMediaEngine_w() {
+  ASSERT(worker_thread_ == rtc::Thread::Current());
+  return (media_engine_->Init(worker_thread_));
+}
+
 void ChannelManager::Terminate() {
   ASSERT(initialized_);
   if (!initialized_) {
     return;
   }
   worker_thread_->Invoke<void>(Bind(&ChannelManager::Terminate_w, this));
-  media_engine_->Terminate();
   initialized_ = false;
 }
 
+void ChannelManager::DeleteMediaEngine_w() {
+  ASSERT(worker_thread_ == rtc::Thread::Current());
+  media_engine_.reset(NULL);
+}
+
 void ChannelManager::Terminate_w() {
   ASSERT(worker_thread_ == rtc::Thread::Current());
   // Need to destroy the voice/video channels
@@ -313,6 +330,7 @@
   if (!SetCaptureDevice_w(NULL)) {
     LOG(LS_WARNING) << "failed to delete video capturer";
   }
+  media_engine_->Terminate();
 }
 
 VoiceChannel* ChannelManager::CreateVoiceChannel(
diff --git a/talk/session/media/channelmanager.h b/talk/session/media/channelmanager.h
index 764451d..7e18031 100644
--- a/talk/session/media/channelmanager.h
+++ b/talk/session/media/channelmanager.h
@@ -264,6 +264,8 @@
                  DeviceManagerInterface* dm,
                  CaptureManager* cm,
                  rtc::Thread* worker_thread);
+  bool InitMediaEngine_w();
+  void DeleteMediaEngine_w();
   void Terminate_w();
   VoiceChannel* CreateVoiceChannel_w(
       BaseSession* session, const std::string& content_name, bool rtcp);
diff --git a/webrtc/examples/android/media_demo/jni/jni_helpers.cc b/webrtc/examples/android/media_demo/jni/jni_helpers.cc
index d7e3267..6179678 100644
--- a/webrtc/examples/android/media_demo/jni/jni_helpers.cc
+++ b/webrtc/examples/android/media_demo/jni/jni_helpers.cc
@@ -19,7 +19,7 @@
 jmethodID GetMethodID(JNIEnv* jni, jclass c, const std::string& name,
                       const char* signature) {
   jmethodID m = jni->GetMethodID(c, name.c_str(), signature);
-  CHECK_EXCEPTION(jni, "error during GetMethodID");
+  CHECK_JNI_EXCEPTION(jni, "error during GetMethodID");
   return m;
 }
 
@@ -37,11 +37,11 @@
 // Given a (UTF-16) jstring return a new UTF-8 native string.
 std::string JavaToStdString(JNIEnv* jni, const jstring& j_string) {
   const jchar* jchars = jni->GetStringChars(j_string, NULL);
-  CHECK_EXCEPTION(jni, "Error during GetStringChars");
+  CHECK_JNI_EXCEPTION(jni, "Error during GetStringChars");
   UnicodeString ustr(jchars, jni->GetStringLength(j_string));
-  CHECK_EXCEPTION(jni, "Error during GetStringLength");
+  CHECK_JNI_EXCEPTION(jni, "Error during GetStringLength");
   jni->ReleaseStringChars(j_string, jchars);
-  CHECK_EXCEPTION(jni, "Error during ReleaseStringChars");
+  CHECK_JNI_EXCEPTION(jni, "Error during ReleaseStringChars");
   std::string ret;
   return ustr.toUTF8String(ret);
 }
@@ -72,10 +72,10 @@
 
 void ClassReferenceHolder::LoadClass(JNIEnv* jni, const std::string& name) {
   jclass localRef = jni->FindClass(name.c_str());
-  CHECK_EXCEPTION(jni, "Could not load class");
+  CHECK_JNI_EXCEPTION(jni, "Could not load class");
   CHECK(localRef, name.c_str());
   jclass globalRef = reinterpret_cast<jclass>(jni->NewGlobalRef(localRef));
-  CHECK_EXCEPTION(jni, "error during NewGlobalRef");
+  CHECK_JNI_EXCEPTION(jni, "error during NewGlobalRef");
   CHECK(globalRef, name.c_str());
   bool inserted = classes_.insert(std::make_pair(name, globalRef)).second;
   CHECK(inserted, "Duplicate class name");
diff --git a/webrtc/examples/android/media_demo/jni/jni_helpers.h b/webrtc/examples/android/media_demo/jni/jni_helpers.h
index a4d4d96..ea1a7a6 100644
--- a/webrtc/examples/android/media_demo/jni/jni_helpers.h
+++ b/webrtc/examples/android/media_demo/jni/jni_helpers.h
@@ -34,7 +34,7 @@
 
 // Abort the process if |jni| has a Java exception pending, emitting |msg| to
 // logcat.
-#define CHECK_EXCEPTION(jni, msg) \
+#define CHECK_JNI_EXCEPTION(jni, msg) \
   if (0) {                        \
   } else {                        \
     if (jni->ExceptionCheck()) {  \
diff --git a/webrtc/examples/android/media_demo/jni/video_engine_jni.cc b/webrtc/examples/android/media_demo/jni/video_engine_jni.cc
index d389cda..412a516 100644
--- a/webrtc/examples/android/media_demo/jni/video_engine_jni.cc
+++ b/webrtc/examples/android/media_demo/jni/video_engine_jni.cc
@@ -115,7 +115,7 @@
     jmethodID j_codec_ctor = GetMethodID(jni, j_codec_class, "<init>", "(J)V");
     jobject j_codec =
         jni->NewObject(j_codec_class, j_codec_ctor, jlongFromPointer(codec));
-    CHECK_EXCEPTION(jni, "error during NewObject");
+    CHECK_JNI_EXCEPTION(jni, "error during NewObject");
     jni->CallVoidMethod(j_observer_, incoming_codec_changed_, video_channel,
                         j_codec);
   }
@@ -456,7 +456,7 @@
   jmethodID j_codec_ctor = GetMethodID(jni, j_codec_class, "<init>", "(J)V");
   jobject j_codec =
       jni->NewObject(j_codec_class, j_codec_ctor, jlongFromPointer(codec));
-  CHECK_EXCEPTION(jni, "error during NewObject");
+  CHECK_JNI_EXCEPTION(jni, "error during NewObject");
   return j_codec;
 }
 
@@ -515,7 +515,7 @@
   jmethodID j_camera_ctor = GetMethodID(jni, j_camera_class, "<init>", "(J)V");
   jobject j_camera = jni->NewObject(j_camera_class, j_camera_ctor,
                                     jlongFromPointer(camera_info));
-  CHECK_EXCEPTION(jni, "error during NewObject");
+  CHECK_JNI_EXCEPTION(jni, "error during NewObject");
   return j_camera;
 }
 
@@ -610,7 +610,7 @@
       jni->NewObject(j_rtcp_statistics_class, j_rtcp_statistics_ctor,
                      fraction_lost, cumulative_lost, extended_max, jitter,
                      static_cast<int>(rtt_ms));
-  CHECK_EXCEPTION(jni, "error during NewObject");
+  CHECK_JNI_EXCEPTION(jni, "error during NewObject");
   return j_rtcp_statistics;
 }
 
diff --git a/webrtc/examples/android/media_demo/jni/voice_engine_jni.cc b/webrtc/examples/android/media_demo/jni/voice_engine_jni.cc
index 85ad84f..8159245 100644
--- a/webrtc/examples/android/media_demo/jni/voice_engine_jni.cc
+++ b/webrtc/examples/android/media_demo/jni/voice_engine_jni.cc
@@ -327,7 +327,7 @@
   jmethodID j_codec_ctor = GetMethodID(jni, j_codec_class, "<init>", "(J)V");
   jobject j_codec =
       jni->NewObject(j_codec_class, j_codec_ctor, jlongFromPointer(codec));
-  CHECK_EXCEPTION(jni, "error during NewObject");
+  CHECK_JNI_EXCEPTION(jni, "error during NewObject");
   return j_codec;
 }
 
diff --git a/webrtc/modules/audio_device/BUILD.gn b/webrtc/modules/audio_device/BUILD.gn
index 1418bdc..784c75c 100644
--- a/webrtc/modules/audio_device/BUILD.gn
+++ b/webrtc/modules/audio_device/BUILD.gn
@@ -178,6 +178,7 @@
   }
 
   deps = [
+    "../../base:rtc_base_approved",
     "../../common_audio",
     "../../system_wrappers",
     "../utility",
diff --git a/webrtc/modules/audio_device/android/audio_common.h b/webrtc/modules/audio_device/android/audio_common.h
index f873b04..4a5303f 100644
--- a/webrtc/modules/audio_device/android/audio_common.h
+++ b/webrtc/modules/audio_device/android/audio_common.h
@@ -15,6 +15,7 @@
 
 enum {
   kDefaultSampleRate = 44100,
+  kBitsPerSample = 16,
   kNumChannels = 1,
   kDefaultBufSizeInSamples = kDefaultSampleRate * 10 / 1000,
 };
diff --git a/webrtc/modules/audio_device/android/audio_device_template.h b/webrtc/modules/audio_device/android/audio_device_template.h
index 10e3191..914a8e8 100644
--- a/webrtc/modules/audio_device/android/audio_device_template.h
+++ b/webrtc/modules/audio_device/android/audio_device_template.h
@@ -11,8 +11,8 @@
 #ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_TEMPLATE_H_
 #define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_TEMPLATE_H_
 
+#include "webrtc/base/checks.h"
 #include "webrtc/modules/audio_device/audio_device_generic.h"
-
 #include "webrtc/system_wrappers/interface/trace.h"
 
 namespace webrtc {
@@ -22,13 +22,11 @@
 template <class InputType, class OutputType>
 class AudioDeviceTemplate : public AudioDeviceGeneric {
  public:
-  static int32_t SetAndroidAudioDeviceObjects(void* javaVM,
-                                       void* env,
-                                       void* context) {
-    if (OutputType::SetAndroidAudioDeviceObjects(javaVM, env, context) == -1) {
-      return -1;
-    }
-    return InputType::SetAndroidAudioDeviceObjects(javaVM, env, context);
+  static void SetAndroidAudioDeviceObjects(void* javaVM,
+                                           void* env,
+                                           void* context) {
+    OutputType::SetAndroidAudioDeviceObjects(javaVM, env, context);
+    InputType::SetAndroidAudioDeviceObjects(javaVM, env, context);
   }
 
   static void ClearAndroidAudioDeviceObjects() {
@@ -38,7 +36,8 @@
 
   explicit AudioDeviceTemplate(const int32_t id)
       : output_(id),
-        input_(id, &output_) {
+        // TODO(henrika): provide proper delay estimate using input_(&output_).
+        input_() {
   }
 
   virtual ~AudioDeviceTemplate() {
@@ -59,7 +58,7 @@
   }
 
   bool Initialized() const {
-    return output_.Initialized() && input_.Initialized();
+    return output_.Initialized();
   }
 
   int16_t PlayoutDevices() {
@@ -67,7 +66,7 @@
   }
 
   int16_t RecordingDevices() {
-    return input_.RecordingDevices();
+    return 1;
   }
 
   int32_t PlayoutDeviceName(
@@ -81,7 +80,7 @@
       uint16_t index,
       char name[kAdmMaxDeviceNameSize],
       char guid[kAdmMaxGuidSize]) {
-    return input_.RecordingDeviceName(index, name, guid);
+    return -1;
   }
 
   int32_t SetPlayoutDevice(uint16_t index) {
@@ -94,12 +93,15 @@
   }
 
   int32_t SetRecordingDevice(uint16_t index) {
-    return input_.SetRecordingDevice(index);
+    // OK to use but it has no effect currently since device selection is
+    // done using Andoid APIs instead.
+    return 0;
   }
 
   int32_t SetRecordingDevice(
       AudioDeviceModule::WindowsDeviceType device) {
-    return input_.SetRecordingDevice(device);
+    FATAL() << "Should never be called";
+    return -1;
   }
 
   int32_t PlayoutIsAvailable(
@@ -117,7 +119,8 @@
 
   int32_t RecordingIsAvailable(
       bool& available) {  // NOLINT
-    return input_.RecordingIsAvailable(available);
+    available = true;
+    return 0;
   }
 
   int32_t InitRecording() {
@@ -153,17 +156,19 @@
   }
 
   int32_t SetAGC(bool enable) {
-    return input_.SetAGC(enable);
+    if (enable) {
+      FATAL() << "Should never be called";
+    }
+    return -1;
   }
 
   bool AGC() const {
-    return input_.AGC();
+    return false;
   }
 
   int32_t SetWaveOutVolume(uint16_t volumeLeft,
                            uint16_t volumeRight) {
-    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, 0,
-                 "  API call not supported on this platform");
+     FATAL() << "Should never be called";
     return -1;
   }
 
@@ -184,11 +189,11 @@
   }
 
   int32_t InitMicrophone() {
-    return input_.InitMicrophone();
+    return 0;
   }
 
   bool MicrophoneIsInitialized() const {
-    return input_.MicrophoneIsInitialized();
+    return true;
   }
 
   int32_t SpeakerVolumeIsAvailable(
@@ -222,31 +227,38 @@
 
   int32_t MicrophoneVolumeIsAvailable(
       bool& available) {  // NOLINT
-    return input_.MicrophoneVolumeIsAvailable(available);
+    available = false;
+    FATAL() << "Should never be called";
+    return -1;
   }
 
   int32_t SetMicrophoneVolume(uint32_t volume) {
-    return input_.SetMicrophoneVolume(volume);
+    FATAL() << "Should never be called";
+    return -1;
   }
 
   int32_t MicrophoneVolume(
       uint32_t& volume) const {  // NOLINT
-    return input_.MicrophoneVolume(volume);
+    FATAL() << "Should never be called";
+    return -1;
   }
 
   int32_t MaxMicrophoneVolume(
       uint32_t& maxVolume) const {  // NOLINT
-    return input_.MaxMicrophoneVolume(maxVolume);
+    FATAL() << "Should never be called";
+    return -1;
   }
 
   int32_t MinMicrophoneVolume(
       uint32_t& minVolume) const {  // NOLINT
-    return input_.MinMicrophoneVolume(minVolume);
+    FATAL() << "Should never be called";
+    return -1;
   }
 
   int32_t MicrophoneVolumeStepSize(
       uint16_t& stepSize) const {  // NOLINT
-    return input_.MicrophoneVolumeStepSize(stepSize);
+    FATAL() << "Should never be called";
+    return -1;
   }
 
   int32_t SpeakerMuteIsAvailable(
@@ -265,30 +277,36 @@
 
   int32_t MicrophoneMuteIsAvailable(
       bool& available) {  // NOLINT
-    return input_.MicrophoneMuteIsAvailable(available);
+    FATAL() << "Not implemented";
+    return -1;
   }
 
   int32_t SetMicrophoneMute(bool enable) {
-    return input_.SetMicrophoneMute(enable);
+    FATAL() << "Not implemented";
+    return -1;
   }
 
   int32_t MicrophoneMute(
       bool& enabled) const {  // NOLINT
-    return input_.MicrophoneMute(enabled);
+    FATAL() << "Not implemented";
+    return -1;
   }
 
   int32_t MicrophoneBoostIsAvailable(
       bool& available) {  // NOLINT
-    return input_.MicrophoneBoostIsAvailable(available);
+    FATAL() << "Should never be called";
+    return -1;
   }
 
   int32_t SetMicrophoneBoost(bool enable) {
-    return input_.SetMicrophoneBoost(enable);
+    FATAL() << "Should never be called";
+    return -1;
   }
 
   int32_t MicrophoneBoost(
       bool& enabled) const {  // NOLINT
-    return input_.MicrophoneBoost(enabled);
+    FATAL() << "Should never be called";
+    return -1;
   }
 
   int32_t StereoPlayoutIsAvailable(
@@ -307,16 +325,18 @@
 
   int32_t StereoRecordingIsAvailable(
       bool& available) {  // NOLINT
-    return input_.StereoRecordingIsAvailable(available);
+    available = false;
+    return 0;
   }
 
   int32_t SetStereoRecording(bool enable) {
-    return input_.SetStereoRecording(enable);
+    return -1;
   }
 
   int32_t StereoRecording(
       bool& enabled) const {  // NOLINT
-    return input_.StereoRecording(enabled);
+    enabled = false;
+    return 0;
   }
 
   int32_t SetPlayoutBuffer(
@@ -343,8 +363,7 @@
 
   int32_t CPULoad(
       uint16_t& load) const {  // NOLINT
-    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, 0,
-                 "  API call not supported on this platform");
+    FATAL() << "Should never be called";
     return -1;
   }
 
@@ -357,11 +376,11 @@
   }
 
   bool RecordingWarning() const {
-    return input_.RecordingWarning();
+    return false;
   }
 
   bool RecordingError() const {
-    return input_.RecordingError();
+    return false;
   }
 
   void ClearPlayoutWarning() {
@@ -372,13 +391,9 @@
     return output_.ClearPlayoutError();
   }
 
-  void ClearRecordingWarning() {
-    return input_.ClearRecordingWarning();
-  }
+  void ClearRecordingWarning() {}
 
-  void ClearRecordingError() {
-    return input_.ClearRecordingError();
-  }
+  void ClearRecordingError() {}
 
   void AttachAudioBuffer(
       AudioDeviceBuffer* audioBuffer) {
@@ -386,11 +401,6 @@
     input_.AttachAudioBuffer(audioBuffer);
   }
 
-  int32_t SetRecordingSampleRate(
-      const uint32_t samplesPerSec) {
-    return input_.SetRecordingSampleRate(samplesPerSec);
-  }
-
   int32_t SetPlayoutSampleRate(
       const uint32_t samplesPerSec) {
     return output_.SetPlayoutSampleRate(samplesPerSec);
diff --git a/webrtc/modules/audio_device/android/audio_device_utility_android.cc b/webrtc/modules/audio_device/android/audio_device_utility_android.cc
index 73c9417..f8c2662 100644
--- a/webrtc/modules/audio_device/android/audio_device_utility_android.cc
+++ b/webrtc/modules/audio_device/android/audio_device_utility_android.cc
@@ -8,43 +8,17 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-/*
- *  Android audio device utility implementation
- */
 
 #include "webrtc/modules/audio_device/android/audio_device_utility_android.h"
 
-#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
-#include "webrtc/system_wrappers/interface/trace.h"
+namespace webrtc {
 
-namespace webrtc
-{
+AudioDeviceUtilityAndroid::AudioDeviceUtilityAndroid(const int32_t id) {}
 
-AudioDeviceUtilityAndroid::AudioDeviceUtilityAndroid(const int32_t id) :
-    _critSect(*CriticalSectionWrapper::CreateCriticalSection()), _id(id)
-{
-    WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id,
-                 "%s created", __FUNCTION__);
-}
+AudioDeviceUtilityAndroid::~AudioDeviceUtilityAndroid() {}
 
-AudioDeviceUtilityAndroid::~AudioDeviceUtilityAndroid()
-{
-    WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
-                 "%s destroyed", __FUNCTION__);
-    {
-        CriticalSectionScoped lock(&_critSect);
-    }
-
-    delete &_critSect;
-}
-
-int32_t AudioDeviceUtilityAndroid::Init()
-{
-
-    WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
-                 "  OS info: %s", "Android");
-
-    return 0;
+int32_t AudioDeviceUtilityAndroid::Init() {
+  return 0;
 }
 
 }  // namespace webrtc
diff --git a/webrtc/modules/audio_device/android/audio_device_utility_android.h b/webrtc/modules/audio_device/android/audio_device_utility_android.h
index c677099..1c1ce1c 100644
--- a/webrtc/modules/audio_device/android/audio_device_utility_android.h
+++ b/webrtc/modules/audio_device/android/audio_device_utility_android.h
@@ -15,24 +15,22 @@
 #ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_ANDROID_H
 #define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_ANDROID_H
 
+#include <jni.h>
+
+#include "webrtc/base/checks.h"
 #include "webrtc/modules/audio_device/audio_device_utility.h"
 #include "webrtc/modules/audio_device/include/audio_device.h"
 
-namespace webrtc
-{
-class CriticalSectionWrapper;
+namespace webrtc {
 
-class AudioDeviceUtilityAndroid: public AudioDeviceUtility
-{
-public:
+// TODO(henrika): this utility class is not used but I would like to keep this
+// file for the other helper methods which are unique for Android.
+class AudioDeviceUtilityAndroid: public AudioDeviceUtility {
+ public:
     AudioDeviceUtilityAndroid(const int32_t id);
     ~AudioDeviceUtilityAndroid();
 
     virtual int32_t Init();
-
-private:
-    CriticalSectionWrapper& _critSect;
-    int32_t _id;
 };
 
 }  // namespace webrtc
diff --git a/webrtc/modules/audio_device/android/audio_record_jni.cc b/webrtc/modules/audio_device/android/audio_record_jni.cc
index 6aa949f..392703b 100644
--- a/webrtc/modules/audio_device/android/audio_record_jni.cc
+++ b/webrtc/modules/audio_device/android/audio_record_jni.cc
@@ -8,1285 +8,310 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-/*
- *  Android audio device implementation (JNI/AudioRecord usage)
- */
-
-// TODO(xians): Break out attach and detach current thread to JVM to
-// separate functions.
-
 #include "webrtc/modules/audio_device/android/audio_record_jni.h"
 
 #include <android/log.h>
-#include <assert.h>
-#include <stdlib.h>
 
+#include "webrtc/base/arraysize.h"
+#include "webrtc/base/checks.h"
 #include "webrtc/modules/audio_device/android/audio_common.h"
-#include "webrtc/modules/audio_device/audio_device_config.h"
-#include "webrtc/modules/audio_device/audio_device_utility.h"
-#include "webrtc/system_wrappers/interface/event_wrapper.h"
-#include "webrtc/system_wrappers/interface/thread_wrapper.h"
-#include "webrtc/system_wrappers/interface/trace.h"
+
+#define TAG "AudioRecordJni"
+#define ALOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__)
+#define ALOGD(...) __android_log_print(ANDROID_LOG_DEBUG, TAG, __VA_ARGS__)
+#define ALOGE(...) __android_log_print(ANDROID_LOG_ERROR, TAG, __VA_ARGS__)
+#define ALOGW(...) __android_log_print(ANDROID_LOG_WARN, TAG, __VA_ARGS__)
+#define ALOGI(...) __android_log_print(ANDROID_LOG_INFO, TAG, __VA_ARGS__)
 
 namespace webrtc {
 
-JavaVM* AudioRecordJni::globalJvm = NULL;
-JNIEnv* AudioRecordJni::globalJNIEnv = NULL;
-jobject AudioRecordJni::globalContext = NULL;
-jclass AudioRecordJni::globalScClass = NULL;
+// Number of bytes per audio frame.
+// Example: 16-bit PCM in mono => 1*(16/8)=2 [bytes/frame]
+static const int kBytesPerFrame = kNumChannels * (kBitsPerSample / 8);
 
-int32_t AudioRecordJni::SetAndroidAudioDeviceObjects(void* javaVM, void* env,
-                                                     void* context) {
-  assert(env);
-  globalJvm = reinterpret_cast<JavaVM*>(javaVM);
-  globalJNIEnv = reinterpret_cast<JNIEnv*>(env);
-  // Get java class type (note path to class packet).
-  jclass javaScClassLocal = globalJNIEnv->FindClass(
-      "org/webrtc/voiceengine/WebRtcAudioRecord");
-  if (!javaScClassLocal) {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
-                 "%s: could not find java class", __FUNCTION__);
-    return -1; // exception thrown
-  }
+// We are unable to obtain exact measurements of the hardware delay on Android.
+// Instead, a lower bound (based on measurements) is used.
+// TODO(henrika): is it possible to improve this?
+static const int kHardwareDelayInMilliseconds = 100;
 
-  // Create a global reference to the class (to tell JNI that we are
-  // referencing it after this function has returned).
-  globalScClass = reinterpret_cast<jclass> (
-      globalJNIEnv->NewGlobalRef(javaScClassLocal));
-  if (!globalScClass) {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
-                 "%s: could not create reference", __FUNCTION__);
-    return -1;
-  }
+static JavaVM* g_jvm = NULL;
+static jobject g_context = NULL;
+static jclass g_audio_record_class = NULL;
 
-  globalContext = globalJNIEnv->NewGlobalRef(
-      reinterpret_cast<jobject>(context));
-  if (!globalContext) {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
-                 "%s: could not create context reference", __FUNCTION__);
-    return -1;
-  }
+void AudioRecordJni::SetAndroidAudioDeviceObjects(void* jvm, void* env,
+                                                  void* context) {
+  ALOGD("SetAndroidAudioDeviceObjects%s", GetThreadInfo().c_str());
 
-  // Delete local class ref, we only use the global ref
-  globalJNIEnv->DeleteLocalRef(javaScClassLocal);
+  CHECK(jvm);
+  CHECK(env);
+  CHECK(context);
 
-  return 0;
+  g_jvm = reinterpret_cast<JavaVM*>(jvm);
+  JNIEnv* jni = GetEnv(g_jvm);
+  CHECK(jni) << "AttachCurrentThread must be called on this tread";
+
+  // Protect context from being deleted during garbage collection.
+  g_context = NewGlobalRef(jni, reinterpret_cast<jobject>(context));
+
+  // Load the locally-defined WebRtcAudioRecord class and create a new global
+  // reference to it.
+  jclass local_class = FindClass(
+      jni, "org/webrtc/voiceengine/WebRtcAudioRecord");
+  g_audio_record_class = reinterpret_cast<jclass>(
+      NewGlobalRef(jni, local_class));
+
+  // Register native methods with the WebRtcAudioRecord class. These methods
+  // are declared private native in WebRtcAudioRecord.java.
+  JNINativeMethod native_methods[] = {
+      {"nativeCacheDirectBufferAddress", "(Ljava/nio/ByteBuffer;J)V",
+          reinterpret_cast<void*>(
+       &webrtc::AudioRecordJni::CacheDirectBufferAddress)},
+      {"nativeDataIsRecorded", "(IJ)V",
+          reinterpret_cast<void*>(&webrtc::AudioRecordJni::DataIsRecorded)}};
+  jni->RegisterNatives(g_audio_record_class,
+                       native_methods, arraysize(native_methods));
+  CHECK_EXCEPTION(jni) << "Error during RegisterNatives";
 }
 
 void AudioRecordJni::ClearAndroidAudioDeviceObjects() {
-  WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, -1,
-               "%s: env is NULL, assuming deinit", __FUNCTION__);
-
-  globalJvm = NULL;;
-  if (!globalJNIEnv) {
-    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, -1,
-                 "%s: saved env already NULL", __FUNCTION__);
-    return;
-  }
-
-  globalJNIEnv->DeleteGlobalRef(globalContext);
-  globalContext = reinterpret_cast<jobject>(NULL);
-
-  globalJNIEnv->DeleteGlobalRef(globalScClass);
-  globalScClass = reinterpret_cast<jclass>(NULL);
-
-  globalJNIEnv = reinterpret_cast<JNIEnv*>(NULL);
+  ALOGD("ClearAndroidAudioDeviceObjects%s", GetThreadInfo().c_str());
+  JNIEnv* jni = GetEnv(g_jvm);
+  CHECK(jni) << "AttachCurrentThread must be called on this tread";
+  jni->UnregisterNatives(g_audio_record_class);
+  CHECK_EXCEPTION(jni) << "Error during UnregisterNatives";
+  DeleteGlobalRef(jni, g_audio_record_class);
+  g_audio_record_class = NULL;
+  DeleteGlobalRef(jni, g_context);
+  g_context = NULL;
+  g_jvm = NULL;
 }
 
-AudioRecordJni::AudioRecordJni(
-    const int32_t id, PlayoutDelayProvider* delay_provider)
-    : _javaVM(NULL),
-      _jniEnvRec(NULL),
-      _javaScClass(0),
-      _javaScObj(0),
-      _javaRecBuffer(0),
-      _javaDirectRecBuffer(NULL),
-      _javaMidRecAudio(0),
-      _ptrAudioBuffer(NULL),
-      _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
-      _id(id),
-      _delay_provider(delay_provider),
-      _initialized(false),
-      _timeEventRec(*EventWrapper::Create()),
-      _recStartStopEvent(*EventWrapper::Create()),
-      _ptrThreadRec(NULL),
-      _recThreadID(0),
-      _recThreadIsInitialized(false),
-      _shutdownRecThread(false),
-      _recordingDeviceIsSpecified(false),
-      _recording(false),
-      _recIsInitialized(false),
-      _micIsInitialized(false),
-      _startRec(false),
-      _recWarning(0),
-      _recError(0),
-      _delayRecording(0),
-      _AGC(false),
-      _samplingFreqIn((N_REC_SAMPLES_PER_SEC/1000)),
-      _recAudioSource(1) { // 1 is AudioSource.MIC which is our default
-  memset(_recBuffer, 0, sizeof(_recBuffer));
+AudioRecordJni::AudioRecordJni()
+    : j_audio_record_(NULL),
+      direct_buffer_address_(NULL),
+      direct_buffer_capacity_in_bytes_(0),
+      frames_per_buffer_(0),
+      initialized_(false),
+      recording_(false),
+      audio_device_buffer_(NULL),
+      sample_rate_hz_(0) {
+  ALOGD("ctor%s", GetThreadInfo().c_str());
+  CHECK(HasDeviceObjects());
+  CreateJavaInstance();
+  // Detach from this thread since we want to use the checker to verify calls
+  // from the Java based audio thread.
+  thread_checker_java_.DetachFromThread();
 }
 
 AudioRecordJni::~AudioRecordJni() {
-  WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
-               "%s destroyed", __FUNCTION__);
-
+  ALOGD("~dtor%s", GetThreadInfo().c_str());
+  DCHECK(thread_checker_.CalledOnValidThread());
   Terminate();
-
-  delete &_recStartStopEvent;
-  delete &_timeEventRec;
-  delete &_critSect;
+  AttachThreadScoped ats(g_jvm);
+  JNIEnv* jni = ats.env();
+  jni->DeleteGlobalRef(j_audio_record_);
+  j_audio_record_ = NULL;
 }
 
 int32_t AudioRecordJni::Init() {
-  CriticalSectionScoped lock(&_critSect);
-
-  if (_initialized)
-  {
-    return 0;
-  }
-
-  _recWarning = 0;
-  _recError = 0;
-
-  // Init Java member variables
-  // and set up JNI interface to
-  // AudioDeviceAndroid java class
-  if (InitJavaResources() != 0)
-  {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                 "%s: Failed to init Java resources", __FUNCTION__);
-    return -1;
-  }
-
-  // Check the sample rate to be used for playback and recording
-  // and the max playout volume
-  if (InitSampleRate() != 0)
-  {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                 "%s: Failed to init samplerate", __FUNCTION__);
-    return -1;
-  }
-
-  const char* threadName = "jni_audio_capture_thread";
-  _ptrThreadRec = ThreadWrapper::CreateThread(RecThreadFunc, this,
-                                              kRealtimePriority, threadName);
-  if (_ptrThreadRec == NULL)
-  {
-    WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
-                 "  failed to create the rec audio thread");
-    return -1;
-  }
-
-  unsigned int threadID(0);
-  if (!_ptrThreadRec->Start(threadID))
-  {
-    WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
-                 "  failed to start the rec audio thread");
-    delete _ptrThreadRec;
-    _ptrThreadRec = NULL;
-    return -1;
-  }
-  _recThreadID = threadID;
-  _initialized = true;
-
+  ALOGD("Init%s", GetThreadInfo().c_str());
+  DCHECK(thread_checker_.CalledOnValidThread());
   return 0;
 }
 
 int32_t AudioRecordJni::Terminate() {
-  CriticalSectionScoped lock(&_critSect);
-
-  if (!_initialized)
-  {
-    return 0;
-  }
-
+  ALOGD("Terminate%s", GetThreadInfo().c_str());
+  DCHECK(thread_checker_.CalledOnValidThread());
   StopRecording();
-  _shutdownRecThread = true;
-  _timeEventRec.Set(); // Release rec thread from waiting state
-  if (_ptrThreadRec)
-  {
-    // First, the thread must detach itself from Java VM
-    _critSect.Leave();
-    if (kEventSignaled != _recStartStopEvent.Wait(5000))
-    {
-      WEBRTC_TRACE(
-          kTraceError,
-          kTraceAudioDevice,
-          _id,
-          "%s: Recording thread shutdown timed out, cannot "
-          "terminate thread",
-          __FUNCTION__);
-      // If we close thread anyway, the app will crash
-      return -1;
-    }
-    _recStartStopEvent.Reset();
-    _critSect.Enter();
-
-    // Close down rec thread
-    ThreadWrapper* tmpThread = _ptrThreadRec;
-    _ptrThreadRec = NULL;
-    _critSect.Leave();
-    // Release again, we might have returned to waiting state
-    _timeEventRec.Set();
-    if (tmpThread->Stop())
-    {
-      delete tmpThread;
-      _jniEnvRec = NULL;
-    }
-    else
-    {
-      WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                   "  failed to close down the rec audio thread");
-    }
-    _critSect.Enter();
-
-    _recThreadIsInitialized = false;
-  }
-  _micIsInitialized = false;
-  _recordingDeviceIsSpecified = false;
-
-  // get the JNI env for this thread
-  JNIEnv *env;
-  bool isAttached = false;
-
-  // get the JNI env for this thread
-  if (_javaVM->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK)
-  {
-    // try to attach the thread and get the env
-    // Attach this thread to JVM
-    jint res = _javaVM->AttachCurrentThread(&env, NULL);
-    if ((res < 0) || !env)
-    {
-      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                   "%s: Could not attach thread to JVM (%d, %p)",
-                   __FUNCTION__, res, env);
-      return -1;
-    }
-    isAttached = true;
-  }
-
-  // Make method IDs and buffer pointers unusable
-  _javaMidRecAudio = 0;
-  _javaDirectRecBuffer = NULL;
-
-  // Delete the references to the java buffers, this allows the
-  // garbage collector to delete them
-  env->DeleteGlobalRef(_javaRecBuffer);
-  _javaRecBuffer = 0;
-
-  // Delete the references to the java object and class, this allows the
-  // garbage collector to delete them
-  env->DeleteGlobalRef(_javaScObj);
-  _javaScObj = 0;
-  _javaScClass = 0;
-
-  // Detach this thread if it was attached
-  if (isAttached)
-  {
-    if (_javaVM->DetachCurrentThread() < 0)
-    {
-      WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                   "%s: Could not detach thread from JVM", __FUNCTION__);
-    }
-  }
-
-  _initialized = false;
-
   return 0;
 }
 
-int32_t AudioRecordJni::RecordingDeviceName(uint16_t index,
-                                            char name[kAdmMaxDeviceNameSize],
-                                            char guid[kAdmMaxGuidSize]) {
-  if (0 != index)
-  {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                 "  Device index is out of range [0,0]");
-    return -1;
-  }
-
-  // Return empty string
-  memset(name, 0, kAdmMaxDeviceNameSize);
-
-  if (guid)
-  {
-    memset(guid, 0, kAdmMaxGuidSize);
-  }
-
-  return 0;
-}
-
-int32_t AudioRecordJni::SetRecordingDevice(uint16_t index) {
-  if (_recIsInitialized)
-  {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                 "  Recording already initialized");
-    return -1;
-  }
-
-  // Recording device index will be used for specifying recording
-  // audio source, allow any value
-  _recAudioSource = index;
-  _recordingDeviceIsSpecified = true;
-
-  return 0;
-}
-
-int32_t AudioRecordJni::SetRecordingDevice(
-    AudioDeviceModule::WindowsDeviceType device) {
-  WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-               "  API call not supported on this platform");
-  return -1;
-}
-
-int32_t AudioRecordJni::RecordingIsAvailable(bool& available) {  // NOLINT
-  available = false;
-
-  // Try to initialize the playout side
-  int32_t res = InitRecording();
-
-  // Cancel effect of initialization
-  StopRecording();
-
-  if (res != -1)
-  {
-    available = true;
-  }
-
-  return res;
-}
-
 int32_t AudioRecordJni::InitRecording() {
-  CriticalSectionScoped lock(&_critSect);
-  if (!_initialized)
-  {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                 "  Not initialized");
+  ALOGD("InitRecording%s", GetThreadInfo().c_str());
+  DCHECK(thread_checker_.CalledOnValidThread());
+  DCHECK(!initialized_);
+  DCHECK(!recording_);
+  if (initialized_ || recording_) {
     return -1;
   }
-
-  if (_recording)
-  {
-    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                 "  Recording already started");
+  AttachThreadScoped ats(g_jvm);
+  JNIEnv* jni = ats.env();
+  jmethodID initRecordingID = GetMethodID(
+      jni, g_audio_record_class, "InitRecording", "(I)I");
+  jint frames_per_buffer = jni->CallIntMethod(
+      j_audio_record_, initRecordingID, sample_rate_hz_);
+  CHECK_EXCEPTION(jni);
+  if (frames_per_buffer < 0) {
+    ALOGE("InitRecording failed!");
     return -1;
   }
-
-  if (!_recordingDeviceIsSpecified)
-  {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                 "  Recording device is not specified");
-    return -1;
-  }
-
-  if (_recIsInitialized)
-  {
-    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
-                 "  Recording already initialized");
-    return 0;
-  }
-
-  // Initialize the microphone
-  if (InitMicrophone() == -1)
-  {
-    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                 "  InitMicrophone() failed");
-  }
-
-  // get the JNI env for this thread
-  JNIEnv *env;
-  bool isAttached = false;
-
-  // get the JNI env for this thread
-  if (_javaVM->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK)
-  {
-    // try to attach the thread and get the env
-    // Attach this thread to JVM
-    jint res = _javaVM->AttachCurrentThread(&env, NULL);
-    if ((res < 0) || !env)
-    {
-      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                   "  Could not attach thread to JVM (%d, %p)", res, env);
-      return -1;
-    }
-    isAttached = true;
-  }
-
-  // get the method ID
-  jmethodID initRecordingID = env->GetMethodID(_javaScClass, "InitRecording",
-                                               "(II)I");
-
-  int samplingFreq = 44100;
-  if (_samplingFreqIn != 44)
-  {
-    samplingFreq = _samplingFreqIn * 1000;
-  }
-
-  int retVal = -1;
-
-  // call java sc object method
-  jint res = env->CallIntMethod(_javaScObj, initRecordingID, _recAudioSource,
-                                samplingFreq);
-  if (res < 0)
-  {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                 "InitRecording failed (%d)", res);
-  }
-  else
-  {
-    // Set the audio device buffer sampling rate
-    _ptrAudioBuffer->SetRecordingSampleRate(_samplingFreqIn * 1000);
-
-    // the init rec function returns a fixed delay
-    _delayRecording = res / _samplingFreqIn;
-
-    _recIsInitialized = true;
-    retVal = 0;
-  }
-
-  // Detach this thread if it was attached
-  if (isAttached)
-  {
-    if (_javaVM->DetachCurrentThread() < 0)
-    {
-      WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                   "  Could not detach thread from JVM");
-    }
-  }
-
-  return retVal;
+  frames_per_buffer_ = frames_per_buffer;
+  ALOGD("frames_per_buffer: %d", frames_per_buffer_);
+  CHECK_EQ(direct_buffer_capacity_in_bytes_,
+           frames_per_buffer_ * kBytesPerFrame);
+  initialized_ = true;
+  return 0;
 }
 
 int32_t AudioRecordJni::StartRecording() {
-  CriticalSectionScoped lock(&_critSect);
-
-  if (!_recIsInitialized)
-  {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                 "  Recording not initialized");
+  ALOGD("StartRecording%s", GetThreadInfo().c_str());
+  DCHECK(thread_checker_.CalledOnValidThread());
+  DCHECK(initialized_);
+  DCHECK(!recording_);
+  if (!initialized_ || recording_) {
     return -1;
   }
-
-  if (_recording)
-  {
-    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
-                 "  Recording already started");
-    return 0;
-  }
-
-  // get the JNI env for this thread
-  JNIEnv *env;
-  bool isAttached = false;
-
-  // get the JNI env for this thread
-  if (_javaVM->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK)
-  {
-    // try to attach the thread and get the env
-    // Attach this thread to JVM
-    jint res = _javaVM->AttachCurrentThread(&env, NULL);
-    if ((res < 0) || !env)
-    {
-      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                   "  Could not attach thread to JVM (%d, %p)", res, env);
-      return -1;
-    }
-    isAttached = true;
-  }
-
-  // get the method ID
-  jmethodID startRecordingID = env->GetMethodID(_javaScClass,
-                                                "StartRecording", "()I");
-
-  // Call java sc object method
-  jint res = env->CallIntMethod(_javaScObj, startRecordingID);
-  if (res < 0)
-  {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                 "StartRecording failed (%d)", res);
+  AttachThreadScoped ats(g_jvm);
+  JNIEnv* jni = ats.env();
+  jmethodID startRecordingID = GetMethodID(
+      jni, g_audio_record_class, "StartRecording", "()Z");
+  jboolean res = jni->CallBooleanMethod(j_audio_record_, startRecordingID);
+  CHECK_EXCEPTION(jni);
+  if (!res) {
+    ALOGE("StartRecording failed!");
     return -1;
   }
-
-  _recWarning = 0;
-  _recError = 0;
-
-  // Signal to recording thread that we want to start
-  _startRec = true;
-  _timeEventRec.Set(); // Release thread from waiting state
-  _critSect.Leave();
-  // Wait for thread to init
-  if (kEventSignaled != _recStartStopEvent.Wait(5000))
-  {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                 "  Timeout or error starting");
-  }
-  _recStartStopEvent.Reset();
-  _critSect.Enter();
-
-  // Detach this thread if it was attached
-  if (isAttached)
-  {
-    if (_javaVM->DetachCurrentThread() < 0)
-    {
-      WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                   "  Could not detach thread from JVM");
-    }
-  }
-
   return 0;
-
 }
 
 int32_t AudioRecordJni::StopRecording() {
-  CriticalSectionScoped lock(&_critSect);
-
-  if (!_recIsInitialized)
-  {
-    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
-                 "  Recording is not initialized");
+  ALOGD("StopRecording%s", GetThreadInfo().c_str());
+  DCHECK(thread_checker_.CalledOnValidThread());
+  if (!initialized_) {
     return 0;
   }
-
-  // make sure we don't start recording (it's asynchronous),
-  // assuming that we are under lock
-  _startRec = false;
-
-  // get the JNI env for this thread
-  JNIEnv *env;
-  bool isAttached = false;
-
-  // get the JNI env for this thread
-  if (_javaVM->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK)
-  {
-    // try to attach the thread and get the env
-    // Attach this thread to JVM
-    jint res = _javaVM->AttachCurrentThread(&env, NULL);
-    if ((res < 0) || !env)
-    {
-      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                   "  Could not attach thread to JVM (%d, %p)", res, env);
-      return -1;
-    }
-    isAttached = true;
-  }
-
-  // get the method ID
-  jmethodID stopRecordingID = env->GetMethodID(_javaScClass, "StopRecording",
-                                               "()I");
-
-  // Call java sc object method
-  jint res = env->CallIntMethod(_javaScObj, stopRecordingID);
-  if (res < 0)
-  {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                 "StopRecording failed (%d)", res);
-  }
-
-  _recIsInitialized = false;
-  _recording = false;
-  _recWarning = 0;
-  _recError = 0;
-
-  // Detach this thread if it was attached
-  if (isAttached)
-  {
-    if (_javaVM->DetachCurrentThread() < 0)
-    {
-      WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                   "  Could not detach thread from JVM");
-    }
-  }
-
-  return 0;
-
-}
-
-int32_t AudioRecordJni::SetAGC(bool enable) {
-  _AGC = enable;
-  return 0;
-}
-
-int32_t AudioRecordJni::InitMicrophone() {
-  CriticalSectionScoped lock(&_critSect);
-
-  if (_recording)
-  {
-    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                 "  Recording already started");
+  AttachThreadScoped ats(g_jvm);
+  JNIEnv* jni = ats.env();
+  jmethodID stopRecordingID = GetMethodID(
+      jni, g_audio_record_class, "StopRecording", "()Z");
+  jboolean res = jni->CallBooleanMethod(j_audio_record_, stopRecordingID);
+  CHECK_EXCEPTION(jni);
+  if (!res) {
+    ALOGE("StopRecording failed!");
     return -1;
   }
-
-  if (!_recordingDeviceIsSpecified)
-  {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                 "  Recording device is not specified");
-    return -1;
-  }
-
-  // Nothing needs to be done here, we use a flag to have consistent
-  // behavior with other platforms
-  _micIsInitialized = true;
-
+  // If we don't detach here, we will hit a DCHECK in OnDataIsRecorded() next
+  // time StartRecording() is called since it will create a new Java thread.
+  thread_checker_java_.DetachFromThread();
+  initialized_ = false;
+  recording_ = false;
   return 0;
-}
 
-int32_t AudioRecordJni::MicrophoneVolumeIsAvailable(
-    bool& available) {  // NOLINT
-  available = false;  // Mic volume not supported on Android
-  return 0;
-}
-
-int32_t AudioRecordJni::SetMicrophoneVolume( uint32_t /*volume*/) {
-
-  WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-               "  API call not supported on this platform");
-  return -1;
-}
-
-int32_t AudioRecordJni::MicrophoneVolume(uint32_t& volume) const {  // NOLINT
-  WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-               "  API call not supported on this platform");
-  return -1;
-}
-
-int32_t AudioRecordJni::MaxMicrophoneVolume(
-    uint32_t& maxVolume) const {  // NOLINT
-  WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-               "  API call not supported on this platform");
-  return -1;
-}
-
-int32_t AudioRecordJni::MinMicrophoneVolume(
-    uint32_t& minVolume) const {  // NOLINT
-  WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-               "  API call not supported on this platform");
-  return -1;
-}
-
-int32_t AudioRecordJni::MicrophoneVolumeStepSize(
-    uint16_t& stepSize) const {
-  WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-               "  API call not supported on this platform");
-  return -1;
-}
-
-int32_t AudioRecordJni::MicrophoneMuteIsAvailable(bool& available) {  // NOLINT
-  available = false; // Mic mute not supported on Android
-  return 0;
-}
-
-int32_t AudioRecordJni::SetMicrophoneMute(bool enable) {
-  WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-               "  API call not supported on this platform");
-  return -1;
-}
-
-int32_t AudioRecordJni::MicrophoneMute(bool& enabled) const {  // NOLINT
-  WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-               "  API call not supported on this platform");
-  return -1;
-}
-
-int32_t AudioRecordJni::MicrophoneBoostIsAvailable(bool& available) {  // NOLINT
-  available = false; // Mic boost not supported on Android
-  return 0;
-}
-
-int32_t AudioRecordJni::SetMicrophoneBoost(bool enable) {
-  if (!_micIsInitialized)
-  {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                 "  Microphone not initialized");
-    return -1;
-  }
-
-  if (enable)
-  {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                 "  Enabling not available");
-    return -1;
-  }
-
-  return 0;
-}
-
-int32_t AudioRecordJni::MicrophoneBoost(bool& enabled) const {  // NOLINT
-  if (!_micIsInitialized)
-  {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                 "  Microphone not initialized");
-    return -1;
-  }
-
-  enabled = false;
-
-  return 0;
-}
-
-int32_t AudioRecordJni::StereoRecordingIsAvailable(bool& available) {  // NOLINT
-  available = false; // Stereo recording not supported on Android
-  return 0;
-}
-
-int32_t AudioRecordJni::SetStereoRecording(bool enable) {
-  if (enable)
-  {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                 "  Enabling not available");
-    return -1;
-  }
-
-  return 0;
-}
-
-int32_t AudioRecordJni::StereoRecording(bool& enabled) const {  // NOLINT
-  enabled = false;
-  return 0;
 }
 
 int32_t AudioRecordJni::RecordingDelay(uint16_t& delayMS) const {  // NOLINT
-  delayMS = _delayRecording;
+  // TODO(henrika): is it possible to improve this estimate?
+  delayMS = kHardwareDelayInMilliseconds;
   return 0;
 }
 
-bool AudioRecordJni::RecordingWarning() const {
-  return (_recWarning > 0);
-}
-
-bool AudioRecordJni::RecordingError() const {
-  return (_recError > 0);
-}
-
-void AudioRecordJni::ClearRecordingWarning() {
-  _recWarning = 0;
-}
-
-void AudioRecordJni::ClearRecordingError() {
-  _recError = 0;
-}
-
 void AudioRecordJni::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
-  CriticalSectionScoped lock(&_critSect);
-  _ptrAudioBuffer = audioBuffer;
-  // inform the AudioBuffer about default settings for this implementation
-  _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC);
-  _ptrAudioBuffer->SetRecordingChannels(N_REC_CHANNELS);
-}
-
-int32_t AudioRecordJni::SetRecordingSampleRate(const uint32_t samplesPerSec) {
-  if (samplesPerSec > 48000 || samplesPerSec < 8000)
-  {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                 "  Invalid sample rate");
-    return -1;
-  }
-
-  // set the recording sample rate to use
-  if (samplesPerSec == 44100)
-  {
-    _samplingFreqIn = 44;
-  }
-  else
-  {
-    _samplingFreqIn = samplesPerSec / 1000;
-  }
-
-  // Update the AudioDeviceBuffer
-  _ptrAudioBuffer->SetRecordingSampleRate(samplesPerSec);
-
-  return 0;
+  ALOGD("AttachAudioBuffer");
+  DCHECK(thread_checker_.CalledOnValidThread());
+  audio_device_buffer_ = audioBuffer;
+  sample_rate_hz_ = GetNativeSampleRate();
+  ALOGD("SetRecordingSampleRate(%d)", sample_rate_hz_);
+  audio_device_buffer_->SetRecordingSampleRate(sample_rate_hz_);
+  audio_device_buffer_->SetRecordingChannels(kNumChannels);
 }
 
 bool AudioRecordJni::BuiltInAECIsAvailable() const {
-  assert(_javaVM);
-
-  JNIEnv* env = NULL;
-  bool isAttached = false;
-
-  // Get the JNI env for this thread
-  if (_javaVM->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) {
-    jint res = _javaVM->AttachCurrentThread(&env, NULL);
-    if ((res < 0) || !env) {
-      return false;
-    }
-    isAttached = true;
-  }
-
-  // Get method ID for BuiltInAECIsAvailable
-  jmethodID builtInAECIsAvailable = env->GetStaticMethodID(
-      _javaScClass, "BuiltInAECIsAvailable", "()Z");
-  if (builtInAECIsAvailable == NULL) {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                 "%s: Unable to get BuiltInAECIsAvailable ID", __FUNCTION__);
-    return false;
-  }
-
-  // Call the static BuiltInAECIsAvailable method
-  jboolean hw_aec = env->CallStaticBooleanMethod(_javaScClass,
+  ALOGD("BuiltInAECIsAvailable%s", GetThreadInfo().c_str());
+  AttachThreadScoped ats(g_jvm);
+  JNIEnv* jni = ats.env();
+  jmethodID builtInAECIsAvailable = jni->GetStaticMethodID(
+      g_audio_record_class, "BuiltInAECIsAvailable", "()Z");
+  CHECK_EXCEPTION(jni);
+  CHECK(builtInAECIsAvailable);
+  jboolean hw_aec = jni->CallStaticBooleanMethod(g_audio_record_class,
                                                  builtInAECIsAvailable);
-
-  // Detach this thread if it was attached
-  if (isAttached) {
-    if (_javaVM->DetachCurrentThread() < 0) {
-      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                 "%s: Could not detach thread from JVM", __FUNCTION__);
-    }
-  }
-
+  CHECK_EXCEPTION(jni);
   return hw_aec;
 }
 
 int32_t AudioRecordJni::EnableBuiltInAEC(bool enable) {
-  assert(_javaVM);
-
-  jint res = 0;
-  JNIEnv* env = NULL;
-  bool isAttached = false;
-
-  // Get the JNI env for this thread
-  if (_javaVM->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) {
-    res = _javaVM->AttachCurrentThread(&env, NULL);
-    if ((res < 0) || !env) {
-      return false;
-    }
-    isAttached = true;
-  }
-
-  // Get method ID for EnableBuiltInAEC "(argument-types)return-type"
-  jmethodID enableBuiltInAEC = env->GetMethodID(_javaScClass,
-                                                "EnableBuiltInAEC",
-                                                "(Z)I");
-
-  // Call the EnableBuiltInAEC method
-  res = env->CallIntMethod(_javaScObj, enableBuiltInAEC, enable);
-  if (res < 0) {
-    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                 "EnableBuiltInAEC failed (%d)", res);
-  }
-
-  // Detach this thread if it was attached
-  if (isAttached) {
-    if (_javaVM->DetachCurrentThread() < 0) {
-      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                   "%s: Could not detach thread from JVM", __FUNCTION__);
-    }
-  }
-
-  return res;
-}
-
-int32_t AudioRecordJni::InitJavaResources() {
-  // todo: Check if we already have created the java object
-  _javaVM = globalJvm;
-  _javaScClass = globalScClass;
-
-  // use the jvm that has been set
-  if (!_javaVM)
-  {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                 "%s: Not a valid Java VM pointer", __FUNCTION__);
+  ALOGD("EnableBuiltInAEC%s", GetThreadInfo().c_str());
+  DCHECK(thread_checker_.CalledOnValidThread());
+  AttachThreadScoped ats(g_jvm);
+  JNIEnv* jni = ats.env();
+  jmethodID enableBuiltInAEC = GetMethodID(
+      jni, g_audio_record_class, "EnableBuiltInAEC", "(Z)Z");
+  jboolean res = jni->CallBooleanMethod(
+      j_audio_record_, enableBuiltInAEC, enable);
+  CHECK_EXCEPTION(jni);
+  if (!res) {
+    ALOGE("EnableBuiltInAEC failed!");
     return -1;
   }
-
-  // get the JNI env for this thread
-  JNIEnv *env;
-  bool isAttached = false;
-
-  // get the JNI env for this thread
-  if (_javaVM->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK)
-  {
-    // try to attach the thread and get the env
-    // Attach this thread to JVM
-    jint res = _javaVM->AttachCurrentThread(&env, NULL);
-    if ((res < 0) || !env)
-    {
-      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                   "%s: Could not attach thread to JVM (%d, %p)",
-                   __FUNCTION__, res, env);
-      return -1;
-    }
-    isAttached = true;
-  }
-
-  WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
-               "get method id");
-
-  // get the method ID for the void(void) constructor
-  jmethodID cid = env->GetMethodID(_javaScClass, "<init>", "()V");
-  if (cid == NULL)
-  {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                 "%s: could not get constructor ID", __FUNCTION__);
-    return -1; /* exception thrown */
-  }
-
-  WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
-               "construct object", __FUNCTION__);
-
-  // construct the object
-  jobject javaScObjLocal = env->NewObject(_javaScClass, cid);
-  if (!javaScObjLocal)
-  {
-    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                 "%s: could not create Java sc object", __FUNCTION__);
-    return -1;
-  }
-
-  // Create a reference to the object (to tell JNI that we are referencing it
-  // after this function has returned).
-  _javaScObj = env->NewGlobalRef(javaScObjLocal);
-  if (!_javaScObj)
-  {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                 "%s: could not create Java sc object reference",
-                 __FUNCTION__);
-    return -1;
-  }
-
-  // Delete local object ref, we only use the global ref.
-  env->DeleteLocalRef(javaScObjLocal);
-
-  //////////////////////
-  // AUDIO MANAGEMENT
-
-  // This is not mandatory functionality
-  if (globalContext) {
-    jfieldID context_id = env->GetFieldID(globalScClass,
-                                          "_context",
-                                          "Landroid/content/Context;");
-    if (!context_id) {
-      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                   "%s: could not get _context id", __FUNCTION__);
-      return -1;
-    }
-
-    env->SetObjectField(_javaScObj, context_id, globalContext);
-    jobject javaContext = env->GetObjectField(_javaScObj, context_id);
-    if (!javaContext) {
-      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                   "%s: could not set or get _context", __FUNCTION__);
-      return -1;
-    }
-  }
-  else {
-    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                 "%s: did not set Context - some functionality is not "
-                 "supported",
-                 __FUNCTION__);
-  }
-
-  // Get rec buffer field ID.
-  jfieldID fidRecBuffer = env->GetFieldID(_javaScClass, "_recBuffer",
-                                          "Ljava/nio/ByteBuffer;");
-  if (!fidRecBuffer)
-  {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                 "%s: could not get rec buffer fid", __FUNCTION__);
-    return -1;
-  }
-
-  // Get rec buffer object.
-  jobject javaRecBufferLocal = env->GetObjectField(_javaScObj, fidRecBuffer);
-  if (!javaRecBufferLocal)
-  {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                 "%s: could not get rec buffer", __FUNCTION__);
-    return -1;
-  }
-
-  // Create a global reference to the object (to tell JNI that we are
-  // referencing it after this function has returned)
-  // NOTE: we are referencing it only through the direct buffer (see below).
-  _javaRecBuffer = env->NewGlobalRef(javaRecBufferLocal);
-  if (!_javaRecBuffer)
-  {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                 "%s: could not get rec buffer reference", __FUNCTION__);
-    return -1;
-  }
-
-  // Delete local object ref, we only use the global ref.
-  env->DeleteLocalRef(javaRecBufferLocal);
-
-  // Get direct buffer.
-  _javaDirectRecBuffer = env->GetDirectBufferAddress(_javaRecBuffer);
-  if (!_javaDirectRecBuffer)
-  {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                 "%s: could not get direct rec buffer", __FUNCTION__);
-    return -1;
-  }
-
-  // Get the rec audio method ID.
-  _javaMidRecAudio = env->GetMethodID(_javaScClass, "RecordAudio", "(I)I");
-  if (!_javaMidRecAudio)
-  {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                 "%s: could not get rec audio mid", __FUNCTION__);
-    return -1;
-  }
-
-  // Detach this thread if it was attached.
-  if (isAttached)
-  {
-    if (_javaVM->DetachCurrentThread() < 0)
-    {
-      WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                   "%s: Could not detach thread from JVM", __FUNCTION__);
-    }
-  }
-
-  return 0;
-
-}
-
-int32_t AudioRecordJni::InitSampleRate() {
-  int samplingFreq = 44100;
-  jint res = 0;
-
-  // get the JNI env for this thread
-  JNIEnv *env;
-  bool isAttached = false;
-
-  // get the JNI env for this thread
-  if (_javaVM->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK)
-  {
-    // try to attach the thread and get the env
-    // Attach this thread to JVM
-    jint res = _javaVM->AttachCurrentThread(&env, NULL);
-    if ((res < 0) || !env)
-    {
-      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                   "%s: Could not attach thread to JVM (%d, %p)",
-                   __FUNCTION__, res, env);
-      return -1;
-    }
-    isAttached = true;
-  }
-
-  if (_samplingFreqIn > 0)
-  {
-    // read the configured sampling rate
-    samplingFreq = 44100;
-    if (_samplingFreqIn != 44)
-    {
-      samplingFreq = _samplingFreqIn * 1000;
-    }
-    WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
-                 "  Trying configured recording sampling rate %d",
-                 samplingFreq);
-  }
-
-  // get the method ID
-  jmethodID initRecordingID = env->GetMethodID(_javaScClass, "InitRecording",
-                                               "(II)I");
-
-  bool keepTrying = true;
-  while (keepTrying)
-  {
-    // call java sc object method
-    res = env->CallIntMethod(_javaScObj, initRecordingID, _recAudioSource,
-                             samplingFreq);
-    if (res < 0)
-    {
-      switch (samplingFreq)
-      {
-        case 44100:
-          samplingFreq = 16000;
-          break;
-        case 16000:
-          samplingFreq = 8000;
-          break;
-        default: // error
-          WEBRTC_TRACE(kTraceError,
-                       kTraceAudioDevice, _id,
-                       "%s: InitRecording failed (%d)", __FUNCTION__,
-                       res);
-          return -1;
-      }
-    }
-    else
-    {
-      keepTrying = false;
-    }
-  }
-
-  // set the recording sample rate to use
-  if (samplingFreq == 44100)
-  {
-    _samplingFreqIn = 44;
-  }
-  else
-  {
-    _samplingFreqIn = samplingFreq / 1000;
-  }
-
-  WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
-               "Recording sample rate set to (%d)", _samplingFreqIn);
-
-  // get the method ID
-  jmethodID stopRecordingID = env->GetMethodID(_javaScClass, "StopRecording",
-                                               "()I");
-
-  // Call java sc object method
-  res = env->CallIntMethod(_javaScObj, stopRecordingID);
-  if (res < 0)
-  {
-    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                 "StopRecording failed (%d)", res);
-  }
-
-  // Detach this thread if it was attached
-  if (isAttached)
-  {
-    if (_javaVM->DetachCurrentThread() < 0)
-    {
-      WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                   "%s: Could not detach thread from JVM", __FUNCTION__);
-    }
-  }
-
   return 0;
 }
 
-bool AudioRecordJni::RecThreadFunc(void* pThis)
-{
-  return (static_cast<AudioRecordJni*> (pThis)->RecThreadProcess());
+void JNICALL AudioRecordJni::CacheDirectBufferAddress(
+    JNIEnv* env, jobject obj, jobject byte_buffer, jlong nativeAudioRecord) {
+  webrtc::AudioRecordJni* this_object =
+      reinterpret_cast<webrtc::AudioRecordJni*> (nativeAudioRecord);
+  this_object->OnCacheDirectBufferAddress(env, byte_buffer);
 }
 
-bool AudioRecordJni::RecThreadProcess()
-{
-  if (!_recThreadIsInitialized)
-  {
-    // Do once when thread is started
+void AudioRecordJni::OnCacheDirectBufferAddress(
+    JNIEnv* env, jobject byte_buffer) {
+  ALOGD("OnCacheDirectBufferAddress");
+  DCHECK(thread_checker_.CalledOnValidThread());
+  direct_buffer_address_ =
+      env->GetDirectBufferAddress(byte_buffer);
+  jlong capacity = env->GetDirectBufferCapacity(byte_buffer);
+  ALOGD("direct buffer capacity: %lld", capacity);
+  direct_buffer_capacity_in_bytes_ = static_cast<int> (capacity);
+}
 
-    // Attach this thread to JVM
-    jint res = _javaVM->AttachCurrentThread(&_jniEnvRec, NULL);
+void JNICALL AudioRecordJni::DataIsRecorded(
+  JNIEnv* env, jobject obj, jint length, jlong nativeAudioRecord) {
+  webrtc::AudioRecordJni* this_object =
+      reinterpret_cast<webrtc::AudioRecordJni*> (nativeAudioRecord  );
+  this_object->OnDataIsRecorded(length);
+}
 
-    // Get the JNI env for this thread
-    if ((res < 0) || !_jniEnvRec)
-    {
-      WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice,
-                   _id, "Could not attach rec thread to JVM (%d, %p)",
-                   res, _jniEnvRec);
-      return false; // Close down thread
-    }
+// This method is called on a high-priority thread from Java. The name of
+// the thread is 'AudioRecordThread'.
+void AudioRecordJni::OnDataIsRecorded(int length) {
+  DCHECK(thread_checker_java_.CalledOnValidThread());
+  audio_device_buffer_->SetRecordedBuffer(direct_buffer_address_,
+                                          frames_per_buffer_);
+  // TODO(henrika): improve playout delay estimate.
+  audio_device_buffer_->SetVQEData(0, kHardwareDelayInMilliseconds, 0);
+  audio_device_buffer_->DeliverRecordedData();
+}
 
-    _recThreadIsInitialized = true;
-  }
+bool AudioRecordJni::HasDeviceObjects() {
+  return (g_jvm && g_context && g_audio_record_class);
+}
 
-  // just sleep if rec has not started
-  if (!_recording)
-  {
-    switch (_timeEventRec.Wait(1000))
-    {
-      case kEventSignaled:
-        WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice,
-                     _id, "Recording thread event signal");
-        _timeEventRec.Reset();
-        break;
-      case kEventError:
-        WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice,
-                     _id, "Recording thread event error");
-        return true;
-      case kEventTimeout:
-        WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice,
-                     _id, "Recording thread event timeout");
-        return true;
-    }
-  }
+void AudioRecordJni::CreateJavaInstance() {
+  ALOGD("CreateJavaInstance");
+  AttachThreadScoped ats(g_jvm);
+  JNIEnv* jni = ats.env();
+  jmethodID constructorID = GetMethodID(
+      jni, g_audio_record_class, "<init>", "(Landroid/content/Context;J)V");
+  j_audio_record_ = jni->NewObject(g_audio_record_class,
+                                   constructorID,
+                                   g_context,
+                                   reinterpret_cast<intptr_t>(this));
+  CHECK_EXCEPTION(jni) << "Error during NewObject";
+  CHECK(j_audio_record_);
+  j_audio_record_ = jni->NewGlobalRef(j_audio_record_);
+  CHECK_EXCEPTION(jni) << "Error during NewGlobalRef";
+  CHECK(j_audio_record_);
+}
 
-  Lock();
-
-  if (_startRec)
-  {
-    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
-                 "_startRec true, performing initial actions");
-    _startRec = false;
-    _recording = true;
-    _recWarning = 0;
-    _recError = 0;
-    _recStartStopEvent.Set();
-  }
-
-  if (_recording)
-  {
-    uint32_t samplesToRec = _samplingFreqIn * 10;
-
-    // Call java sc object method to record data to direct buffer
-    // Will block until data has been recorded (see java sc class),
-    // therefore we must release the lock
-    UnLock();
-    jint recDelayInSamples = _jniEnvRec->CallIntMethod(_javaScObj,
-                                                        _javaMidRecAudio,
-                                                        2 * samplesToRec);
-    if (recDelayInSamples < 0)
-    {
-      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                   "RecordAudio failed");
-      _recWarning = 1;
-    }
-    else
-    {
-      _delayRecording = recDelayInSamples / _samplingFreqIn;
-    }
-    Lock();
-
-    // Check again since recording may have stopped during Java call
-    if (_recording)
-    {
-      //            WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
-      //                         "total delay is %d", msPlayDelay + _delayRecording);
-
-      // Copy data to our direct buffer (held by java sc object)
-      // todo: Give _javaDirectRecBuffer directly to VoE?
-      // todo: Check count <= 480 ?
-      memcpy(_recBuffer, _javaDirectRecBuffer, 2 * samplesToRec);
-
-      // store the recorded buffer (no action will be taken if the
-      // #recorded samples is not a full buffer)
-      _ptrAudioBuffer->SetRecordedBuffer(_recBuffer, samplesToRec);
-
-      // store vqe delay values
-      _ptrAudioBuffer->SetVQEData(_delay_provider->PlayoutDelayMs(),
-                                  _delayRecording, 0);
-
-      // deliver recorded samples at specified sample rate, mic level
-      // etc. to the observer using callback
-      UnLock();
-      _ptrAudioBuffer->DeliverRecordedData();
-      Lock();
-    }
-
-  }  // _recording
-
-  if (_shutdownRecThread)
-  {
-    WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
-                 "Detaching rec thread from Java VM");
-
-    // Detach thread from Java VM
-    if (_javaVM->DetachCurrentThread() < 0)
-    {
-      WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice,
-                   _id, "Could not detach recording thread from JVM");
-      _shutdownRecThread = false;
-      // If we say OK (i.e. set event) and close thread anyway,
-      // app will crash
-    }
-    else
-    {
-      _jniEnvRec = NULL;
-      _shutdownRecThread = false;
-      _recStartStopEvent.Set(); // Signal to Terminate() that we are done
-
-      WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
-                   "Sent signal rec");
-    }
-  }
-
-  UnLock();
-  return true;
+int AudioRecordJni::GetNativeSampleRate() {
+  AttachThreadScoped ats(g_jvm);
+  JNIEnv* jni = ats.env();
+  jmethodID getNativeSampleRate = GetMethodID(
+      jni, g_audio_record_class, "GetNativeSampleRate", "()I");
+  jint sample_rate_hz = jni->CallIntMethod(
+      j_audio_record_, getNativeSampleRate);
+  CHECK_EXCEPTION(jni);
+  return sample_rate_hz;
 }
 
 }  // namespace webrtc
diff --git a/webrtc/modules/audio_device/android/audio_record_jni.h b/webrtc/modules/audio_device/android/audio_record_jni.h
index 363d544..3260104 100644
--- a/webrtc/modules/audio_device/android/audio_record_jni.h
+++ b/webrtc/modules/audio_device/android/audio_record_jni.h
@@ -13,166 +13,143 @@
 
 #include <jni.h>
 
-#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
+#include "webrtc/base/thread_checker.h"
 #include "webrtc/modules/audio_device/include/audio_device_defines.h"
 #include "webrtc/modules/audio_device/audio_device_generic.h"
+#include "webrtc/modules/utility/interface/helpers_android.h"
 
 namespace webrtc {
 
-class EventWrapper;
-class ThreadWrapper;
 class PlayoutDelayProvider;
 
-const uint32_t N_REC_SAMPLES_PER_SEC = 16000; // Default is 16 kHz
-const uint32_t N_REC_CHANNELS = 1; // default is mono recording
-const uint32_t REC_BUF_SIZE_IN_SAMPLES = 480; // Handle max 10 ms @ 48 kHz
-
+// Implements 16-bit mono PCM audio input support for Android using the Java
+// AudioRecord interface. Most of the work is done by its Java counterpart in
+// WebRtcAudioRecord.java. This class is created and lives on a thread in
+// C++-land, but recorded audio buffers are delivered on a high-priority
+// thread managed by the Java class.
+//
+// The Java class makes use of AudioEffect features (mainly AEC) which are
+// first available in Jelly Bean. If it is instantiated running against earlier
+// SDKs, the AEC provided by the APM in WebRTC must be used and enabled
+// separately instead.
+//
+// An instance must be created and destroyed on one and the same thread.
+// All public methods must also be called on the same thread. A thread checker
+// will DCHECK if any method is called on an invalid thread.
+// It is possible to call the two static methods (SetAndroidAudioDeviceObjects
+// and ClearAndroidAudioDeviceObjects) from a different thread but both will
+// CHECK that the calling thread is attached to a Java VM.
+//
+// All methods use AttachThreadScoped to attach to a Java VM if needed and then
+// detach when method goes out of scope. We do so beacuse this class does not
+// own the thread is is created and called on and other objects on the same
+// thread might put us in a detached state at any time.
 class AudioRecordJni {
  public:
-  static int32_t SetAndroidAudioDeviceObjects(void* javaVM, void* env,
-                                              void* context);
+  // Use the invocation API to allow the native application to use the JNI
+  // interface pointer to access VM features.
+  // |jvm| denotes the Java VM, |env| is a pointer to the JNI interface pointer
+  // and |context| corresponds to android.content.Context in Java.
+  // This method also sets a global jclass object, |g_audio_record_class| for
+  // the "org/webrtc/voiceengine/WebRtcAudioRecord"-class.
+  static void SetAndroidAudioDeviceObjects(void* jvm, void* env, void* context);
+  // Always call this method after the object has been destructed. It deletes
+  // existing global references and enables garbage collection.
   static void ClearAndroidAudioDeviceObjects();
 
-  AudioRecordJni(const int32_t id, PlayoutDelayProvider* delay_provider);
+  AudioRecordJni();
   ~AudioRecordJni();
 
-  // Main initializaton and termination
   int32_t Init();
   int32_t Terminate();
-  bool Initialized() const { return _initialized; }
 
-  // Device enumeration
-  int16_t RecordingDevices() { return 1; }  // There is one device only
-  int32_t RecordingDeviceName(uint16_t index,
-                              char name[kAdmMaxDeviceNameSize],
-                              char guid[kAdmMaxGuidSize]);
-
-  // Device selection
-  int32_t SetRecordingDevice(uint16_t index);
-  int32_t SetRecordingDevice(
-      AudioDeviceModule::WindowsDeviceType device);
-
-  // Audio transport initialization
-  int32_t RecordingIsAvailable(bool& available);  // NOLINT
   int32_t InitRecording();
-  bool RecordingIsInitialized() const { return _recIsInitialized; }
+  bool RecordingIsInitialized() const { return initialized_; }
 
-  // Audio transport control
   int32_t StartRecording();
-  int32_t StopRecording();
-  bool Recording() const { return _recording; }
+  int32_t StopRecording ();
+  bool Recording() const { return recording_; }
 
-  // Microphone Automatic Gain Control (AGC)
-  int32_t SetAGC(bool enable);
-  bool AGC() const { return _AGC; }
+  int32_t RecordingDelay(uint16_t& delayMS) const;
 
-  // Audio mixer initialization
-  int32_t InitMicrophone();
-  bool MicrophoneIsInitialized() const { return _micIsInitialized; }
-
-  // Microphone volume controls
-  int32_t MicrophoneVolumeIsAvailable(bool& available);  // NOLINT
-  // TODO(leozwang): Add microphone volume control when OpenSL APIs
-  // are available.
-  int32_t SetMicrophoneVolume(uint32_t volume);
-  int32_t MicrophoneVolume(uint32_t& volume) const;  // NOLINT
-  int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const;  // NOLINT
-  int32_t MinMicrophoneVolume(uint32_t& minVolume) const;  // NOLINT
-  int32_t MicrophoneVolumeStepSize(
-      uint16_t& stepSize) const;  // NOLINT
-
-  // Microphone mute control
-  int32_t MicrophoneMuteIsAvailable(bool& available);  // NOLINT
-  int32_t SetMicrophoneMute(bool enable);
-  int32_t MicrophoneMute(bool& enabled) const;  // NOLINT
-
-  // Microphone boost control
-  int32_t MicrophoneBoostIsAvailable(bool& available);  // NOLINT
-  int32_t SetMicrophoneBoost(bool enable);
-  int32_t MicrophoneBoost(bool& enabled) const;  // NOLINT
-
-  // Stereo support
-  int32_t StereoRecordingIsAvailable(bool& available);  // NOLINT
-  int32_t SetStereoRecording(bool enable);
-  int32_t StereoRecording(bool& enabled) const;  // NOLINT
-
-  // Delay information and control
-  int32_t RecordingDelay(uint16_t& delayMS) const;  // NOLINT
-
-  bool RecordingWarning() const;
-  bool RecordingError() const;
-  void ClearRecordingWarning();
-  void ClearRecordingError();
-
-  // Attach audio buffer
   void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
 
-  int32_t SetRecordingSampleRate(const uint32_t samplesPerSec);
-
   bool BuiltInAECIsAvailable() const;
   int32_t EnableBuiltInAEC(bool enable);
 
  private:
-  void Lock() EXCLUSIVE_LOCK_FUNCTION(_critSect) {
-    _critSect.Enter();
-  }
-  void UnLock() UNLOCK_FUNCTION(_critSect) {
-    _critSect.Leave();
-  }
+  // Called from Java side so we can cache the address of the Java-manged
+  // |byte_buffer| in |direct_buffer_address_|. The size of the buffer
+  // is also stored in |direct_buffer_capacity_in_bytes_|.
+  // This method will be called by the WebRtcAudioRecord constructor, i.e.,
+  // on the same thread that this object is created on.
+  static void JNICALL CacheDirectBufferAddress(
+    JNIEnv* env, jobject obj, jobject byte_buffer, jlong nativeAudioRecord);
+  void OnCacheDirectBufferAddress(JNIEnv* env, jobject byte_buffer);
 
-  int32_t InitJavaResources();
-  int32_t InitSampleRate();
+  // Called periodically by the Java based WebRtcAudioRecord object when
+  // recording has started. Each call indicates that there are |length| new
+  // bytes recorded in the memory area |direct_buffer_address_| and it is
+  // now time to send these to the consumer.
+  // This method is called on a high-priority thread from Java. The name of
+  // the thread is 'AudioRecordThread'.
+  static void JNICALL DataIsRecorded(
+    JNIEnv* env, jobject obj, jint length, jlong nativeAudioRecord);
+  void OnDataIsRecorded(int length);
 
-  static bool RecThreadFunc(void*);
-  bool RecThreadProcess();
+  // Returns true if SetAndroidAudioDeviceObjects() has been called
+  // successfully.
+  bool HasDeviceObjects();
 
-  // TODO(leozwang): Android holds only one JVM, all these jni handling
-  // will be consolidated into a single place to make it consistant and
-  // reliable. Chromium has a good example at base/android.
-  static JavaVM* globalJvm;
-  static JNIEnv* globalJNIEnv;
-  static jobject globalContext;
-  static jclass globalScClass;
+  // Called from the constructor. Defines the |j_audio_record_| member.
+  void CreateJavaInstance();
 
-  JavaVM* _javaVM; // denotes a Java VM
-  JNIEnv* _jniEnvRec; // The JNI env for recording thread
-  jclass _javaScClass; // AudioDeviceAndroid class
-  jobject _javaScObj; // AudioDeviceAndroid object
-  jobject _javaRecBuffer;
-  void* _javaDirectRecBuffer; // Direct buffer pointer to rec buffer
-  jmethodID _javaMidRecAudio; // Method ID of rec in AudioDeviceAndroid
+  // Returns the native, or optimal, sample rate reported by the audio input
+  // device.
+  int GetNativeSampleRate();
 
-  AudioDeviceBuffer* _ptrAudioBuffer;
-  CriticalSectionWrapper& _critSect;
-  int32_t _id;
-  PlayoutDelayProvider* _delay_provider;
-  bool _initialized;
+  // Stores thread ID in constructor.
+  // We can then use ThreadChecker::CalledOnValidThread() to ensure that
+  // other methods are called from the same thread.
+  // Currently only does DCHECK(thread_checker_.CalledOnValidThread()).
+  rtc::ThreadChecker thread_checker_;
 
-  EventWrapper& _timeEventRec;
-  EventWrapper& _recStartStopEvent;
-  ThreadWrapper* _ptrThreadRec;
-  uint32_t _recThreadID;
-  bool _recThreadIsInitialized;
-  bool _shutdownRecThread;
+  // Stores thread ID in first call to OnDataIsRecorded() from high-priority
+  // thread in Java. Detached during construction of this object.
+  rtc::ThreadChecker thread_checker_java_;
 
-  int8_t _recBuffer[2 * REC_BUF_SIZE_IN_SAMPLES];
-  bool _recordingDeviceIsSpecified;
 
-  bool _recording;
-  bool _recIsInitialized;
-  bool _micIsInitialized;
+  // Should return the current playout delay.
+  // TODO(henrika): fix on Android. Reports zero today.
+  // PlayoutDelayProvider* delay_provider_;
 
-  bool _startRec;
+  // The Java WebRtcAudioRecord instance.
+  jobject j_audio_record_;
 
-  uint16_t _recWarning;
-  uint16_t _recError;
+  // Cached copy of address to direct audio buffer owned by |j_audio_record_|.
+  void* direct_buffer_address_;
 
-  uint16_t _delayRecording;
+  // Number of bytes in the direct audio buffer owned by |j_audio_record_|.
+  int direct_buffer_capacity_in_bytes_;
 
-  bool _AGC;
+  // Number audio frames per audio buffer. Each audio frame corresponds to
+  // one sample of PCM mono data at 16 bits per sample. Hence, each audio
+  // frame contains 2 bytes (given that the Java layer only supports mono).
+  // Example: 480 for 48000 Hz or 441 for 44100 Hz.
+  int frames_per_buffer_;
 
-  uint16_t _samplingFreqIn; // Sampling frequency for Mic
-  int _recAudioSource;
+  bool initialized_;
+
+  bool recording_;
+
+  // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
+  // AudioDeviceModuleImpl class and called by AudioDeviceModuleImpl::Create().
+  AudioDeviceBuffer* audio_device_buffer_;
+
+  // Native sample rate set in AttachAudioBuffer() which uses JNI to ask the
+  // Java layer for the best possible sample rate for this particular device
+  // and audio configuration.
+  int sample_rate_hz_;
 
 };
 
diff --git a/webrtc/modules/audio_device/android/audio_track_jni.h b/webrtc/modules/audio_device/android/audio_track_jni.h
index 9c12cbd..1871c78 100644
--- a/webrtc/modules/audio_device/android/audio_track_jni.h
+++ b/webrtc/modules/audio_device/android/audio_track_jni.h
@@ -107,6 +107,7 @@
   int32_t GetLoudspeakerStatus(bool& enable) const;  // NOLINT
 
  protected:
+  // TODO(henrika): improve this estimate.
   virtual int PlayoutDelayMs() { return 0; }
 
  private:
diff --git a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java
index 6014e71..e730384 100644
--- a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java
+++ b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java
@@ -10,8 +10,13 @@
 
 package org.webrtc.voiceengine;
 
+import java.lang.System;
+import java.lang.Thread;
 import java.nio.ByteBuffer;
-import java.util.concurrent.locks.ReentrantLock;
+import java.util.concurrent.TimeUnit;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
 
 import android.content.Context;
 import android.media.AudioFormat;
@@ -22,258 +27,330 @@
 import android.media.AudioRecord;
 import android.media.MediaRecorder.AudioSource;
 import android.os.Build;
+import android.os.Process;
+import android.os.SystemClock;
 import android.util.Log;
 
-class WebRtcAudioRecord {
-    private AudioRecord _audioRecord = null;
+class  WebRtcAudioRecord {
+  private static final boolean DEBUG = false;
 
-    private Context _context;
+  private static final String TAG = "WebRtcAudioRecord";
 
-    private ByteBuffer _recBuffer;
-    private byte[] _tempBufRec;
+  // Use 44.1kHz as the default sampling rate.
+  private static final int SAMPLE_RATE_HZ = 44100;
 
-    private final ReentrantLock _recLock = new ReentrantLock();
+  // Mono recording is default.
+  private static final int CHANNELS = 1;
 
-    private boolean _doRecInit = true;
-    private boolean _isRecording = false;
+  // Default audio data format is PCM 16 bit per sample.
+  // Guaranteed to be supported by all devices.
+  private static final int BITS_PER_SAMPLE = 16;
 
-    private int _bufferedRecSamples = 0;
+  // Number of bytes per audio frame.
+  // Example: 16-bit PCM in stereo => 2*(16/8)=4 [bytes/frame]
+  private static final int BYTES_PER_FRAME = CHANNELS * (BITS_PER_SAMPLE / 8);
 
-    private AcousticEchoCanceler _aec = null;
-    private boolean _useBuiltInAEC = false;
+  // Requested size of each recorded buffer provided to the client.
+  private static final int CALLBACK_BUFFER_SIZE_MS = 10;
 
-    private static boolean runningOnJellyBeanOrHigher() {
-      return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN;
+  // Average number of callbacks per second.
+  private static final int BUFFERS_PER_SECOND = 1000 / CALLBACK_BUFFER_SIZE_MS;
+
+  private ByteBuffer byteBuffer;
+  private final int bytesPerBuffer;
+  private final int framesPerBuffer;
+  private final int sampleRate;
+
+  private final long nativeAudioRecord;
+  private final AudioManager audioManager;
+  private final Context context;
+
+  private AudioRecord audioRecord = null;
+  private AudioRecordThread audioThread = null;
+
+  private AcousticEchoCanceler aec = null;
+  private boolean useBuiltInAEC = false;
+
+  private final Set<Long> threadIds = new HashSet<Long>();
+
+  private static boolean runningOnJellyBeanOrHigher() {
+    return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN;
+  }
+
+  private static boolean runningOnJellyBeanMR1OrHigher() {
+    return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR1;
+  }
+
+  /**
+   * Audio thread which keeps calling ByteBuffer.read() waiting for audio
+   * to be recorded. Feeds recorded data to the native counterpart as a
+   * periodic sequence of callbacks using DataIsRecorded().
+   * This thread uses a Process.THREAD_PRIORITY_URGENT_AUDIO priority.
+   */
+  private class AudioRecordThread extends Thread {
+    private volatile boolean keepAlive = true;
+
+    public AudioRecordThread(String name) {
+      super(name);
     }
 
-    WebRtcAudioRecord() {
-        try {
-            _recBuffer = ByteBuffer.allocateDirect(2 * 480); // Max 10 ms @ 48
-                                                             // kHz
-        } catch (Exception e) {
-            DoLog(e.getMessage());
+    @Override
+    public void run() {
+      Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO);
+      DoLog("AudioRecordThread" + getThreadInfo());
+      AddThreadId();
+
+      try {
+        audioRecord.startRecording();
+      } catch (IllegalStateException e) {
+          DoLogErr("AudioRecord.startRecording failed: " + e.getMessage());
+        return;
+      }
+      assertIsTrue(audioRecord.getRecordingState()
+          == AudioRecord.RECORDSTATE_RECORDING);
+
+      long lastTime = System.nanoTime();
+      while (keepAlive) {
+        int bytesRead = audioRecord.read(byteBuffer, byteBuffer.capacity());
+        if (bytesRead == byteBuffer.capacity()) {
+          nativeDataIsRecorded(bytesRead, nativeAudioRecord);
+        } else {
+          DoLogErr("AudioRecord.read failed: " + bytesRead);
+          if (bytesRead == AudioRecord.ERROR_INVALID_OPERATION) {
+            keepAlive = false;
+          }
         }
+        if (DEBUG) {
+          long nowTime = System.nanoTime();
+          long durationInMs =
+              TimeUnit.NANOSECONDS.toMillis((nowTime - lastTime));
+          lastTime = nowTime;
+          DoLog("bytesRead[" + durationInMs + "] " + bytesRead);
+        }
+      }
 
-        _tempBufRec = new byte[2 * 480];
+      try {
+        audioRecord.stop();
+      } catch (IllegalStateException e) {
+        DoLogErr("AudioRecord.stop failed: " + e.getMessage());
+      }
+      RemoveThreadId();
     }
 
-    public static boolean BuiltInAECIsAvailable() {
-      // AcousticEchoCanceler was added in API level 16 (Jelly Bean).
-      if (!runningOnJellyBeanOrHigher()) {
+    public void joinThread() {
+      keepAlive = false;
+      while (isAlive()) {
+        try {
+          join();
+        } catch (InterruptedException e) {
+          // Ignore.
+        }
+      }
+    }
+  }
+
+  WebRtcAudioRecord(Context context, long nativeAudioRecord) {
+    DoLog("ctor" + getThreadInfo());
+    this.context = context;
+    this.nativeAudioRecord = nativeAudioRecord;
+    audioManager = ((AudioManager) context.getSystemService(
+        Context.AUDIO_SERVICE));
+    sampleRate = GetNativeSampleRate();
+    bytesPerBuffer = BYTES_PER_FRAME * (sampleRate / BUFFERS_PER_SECOND);
+    framesPerBuffer = sampleRate / BUFFERS_PER_SECOND;
+    byteBuffer = byteBuffer.allocateDirect(bytesPerBuffer);
+    DoLog("byteBuffer.capacity: " + byteBuffer.capacity());
+
+    // Rather than passing the ByteBuffer with every callback (requiring
+    // the potentially expensive GetDirectBufferAddress) we simply have the
+    // the native class cache the address to the memory once.
+    nativeCacheDirectBufferAddress(byteBuffer, nativeAudioRecord);
+    AddThreadId();
+  }
+
+  /**
+   * Returns the native or optimal input sample rate for this device's
+   * primary input stream. Unit is in Hz.
+   * Note that we actually query the output device but the same result is
+   * also valid for input.
+   */
+  private int GetNativeSampleRate() {
+    if (!runningOnJellyBeanMR1OrHigher()) {
+      return SAMPLE_RATE_HZ;
+    }
+    String sampleRateString = audioManager.getProperty(
+        AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
+    return (sampleRateString == null) ?
+        SAMPLE_RATE_HZ : Integer.parseInt(sampleRateString);
+  }
+
+  public static boolean BuiltInAECIsAvailable() {
+    // AcousticEchoCanceler was added in API level 16 (Jelly Bean).
+    if (!runningOnJellyBeanOrHigher()) {
+      return false;
+    }
+    // TODO(henrika): add black-list based on device name. We could also
+    // use uuid to exclude devices but that would require a session ID from
+    // an existing AudioRecord object.
+    return AcousticEchoCanceler.isAvailable();
+  }
+
+  private boolean EnableBuiltInAEC(boolean enable) {
+    DoLog("EnableBuiltInAEC(" + enable + ')');
+    AddThreadId();
+    // AcousticEchoCanceler was added in API level 16 (Jelly Bean).
+    if (!runningOnJellyBeanOrHigher()) {
+      return false;
+    }
+    // Store the AEC state.
+    useBuiltInAEC = enable;
+    // Set AEC state if AEC has already been created.
+    if (aec != null) {
+      int ret = aec.setEnabled(enable);
+      if (ret != AudioEffect.SUCCESS) {
+        DoLogErr("AcousticEchoCanceler.setEnabled failed");
         return false;
       }
-      // TODO(henrika): add black-list based on device name. We could also
-      // use uuid to exclude devices but that would require a session ID from
-      // an existing AudioRecord object.
-      return AcousticEchoCanceler.isAvailable();
+      DoLog("AcousticEchoCanceler.getEnabled: " + aec.getEnabled());
+    }
+    return true;
+  }
+
+  private int InitRecording(int sampleRate) {
+    DoLog("InitRecording(sampleRate=" + sampleRate + ")");
+    AddThreadId();
+    // Get the minimum buffer size required for the successful creation of
+    // an AudioRecord object, in byte units.
+    // Note that this size doesn't guarantee a smooth recording under load.
+    // TODO(henrika): Do we need to make this larger to avoid underruns?
+    int minBufferSize = AudioRecord.getMinBufferSize(
+          sampleRate,
+          AudioFormat.CHANNEL_IN_MONO,
+          AudioFormat.ENCODING_PCM_16BIT);
+    DoLog("AudioRecord.getMinBufferSize: " + minBufferSize);
+
+    if (aec != null) {
+      aec.release();
+      aec = null;
+    }
+    if (audioRecord != null) {
+      audioRecord.release();
+      audioRecord = null;
     }
 
-    private int EnableBuiltInAEC(boolean enable) {
-      DoLog("EnableBuiltInAEC(" + enable + ')');
-      // AcousticEchoCanceler was added in API level 16 (Jelly Bean).
-      if (!runningOnJellyBeanOrHigher()) {
-        return -1;
-      }
+    int bufferSizeInBytes = Math.max(byteBuffer.capacity(), minBufferSize);
+    DoLog("bufferSizeInBytes: " + bufferSizeInBytes);
+    try {
+      audioRecord = new AudioRecord(AudioSource.VOICE_COMMUNICATION,
+                                    sampleRate,
+                                    AudioFormat.CHANNEL_IN_MONO,
+                                    AudioFormat.ENCODING_PCM_16BIT,
+                                    bufferSizeInBytes);
 
-      _useBuiltInAEC = enable;
+    } catch (IllegalArgumentException e) {
+      DoLog(e.getMessage());
+      return -1;
+    }
+    assertIsTrue(audioRecord.getState() == AudioRecord.STATE_INITIALIZED);
 
-      // Set AEC state if AEC has already been created.
-      if (_aec != null) {
-        int ret = _aec.setEnabled(enable);
-        if (ret != AudioEffect.SUCCESS) {
-          DoLogErr("AcousticEchoCanceler.setEnabled failed");
-          return -1;
-        }
-        DoLog("AcousticEchoCanceler.getEnabled: " + _aec.getEnabled());
-      }
-
-      return 0;
+    DoLog("AudioRecord " +
+          "session ID: " + audioRecord.getAudioSessionId() + ", " +
+          "audio format: " + audioRecord.getAudioFormat() + ", " +
+          "channels: " + audioRecord.getChannelCount() + ", " +
+          "sample rate: " + audioRecord.getSampleRate());
+    DoLog("AcousticEchoCanceler.isAvailable: " + BuiltInAECIsAvailable());
+    if (!BuiltInAECIsAvailable()) {
+      return framesPerBuffer;
     }
 
-    @SuppressWarnings("unused")
-    private int InitRecording(int audioSource, int sampleRate) {
-        DoLog("InitRecording");
-        audioSource = AudioSource.VOICE_COMMUNICATION;
-        // get the minimum buffer size that can be used
-        int minRecBufSize = AudioRecord.getMinBufferSize(
-            sampleRate,
-            AudioFormat.CHANNEL_IN_MONO,
-            AudioFormat.ENCODING_PCM_16BIT);
-
-        // DoLog("min rec buf size is " + minRecBufSize);
-
-        // double size to be more safe
-        int recBufSize = minRecBufSize * 2;
-        // On average half of the samples have been recorded/buffered and the
-        // recording interval is 1/100s.
-        _bufferedRecSamples = sampleRate / 200;
-        // DoLog("rough rec delay set to " + _bufferedRecSamples);
-
-        if (_aec != null) {
-            _aec.release();
-            _aec = null;
-        }
-
-        // release the object
-        if (_audioRecord != null) {
-            _audioRecord.release();
-            _audioRecord = null;
-        }
-
-        try {
-            _audioRecord = new AudioRecord(
-                            audioSource,
-                            sampleRate,
-                            AudioFormat.CHANNEL_IN_MONO,
-                            AudioFormat.ENCODING_PCM_16BIT,
-                            recBufSize);
-
-        } catch (Exception e) {
-            DoLog(e.getMessage());
-            return -1;
-        }
-
-        // check that the audioRecord is ready to be used
-        if (_audioRecord.getState() != AudioRecord.STATE_INITIALIZED) {
-            // DoLog("rec not initialized " + sampleRate);
-            return -1;
-        }
-
-        // DoLog("rec sample rate set to " + sampleRate);
-
-        DoLog("AcousticEchoCanceler.isAvailable: " + BuiltInAECIsAvailable());
-        if (!BuiltInAECIsAvailable()) {
-            return _bufferedRecSamples;
-        }
-
-        _aec = AcousticEchoCanceler.create(_audioRecord.getAudioSessionId());
-        if (_aec == null) {
-            DoLogErr("AcousticEchoCanceler.create failed");
-            return -1;
-        }
-
-        int ret = _aec.setEnabled(_useBuiltInAEC);
-        if (ret != AudioEffect.SUCCESS) {
-            DoLogErr("AcousticEchoCanceler.setEnabled failed");
-            return -1;
-        }
-
-        Descriptor descriptor = _aec.getDescriptor();
-        DoLog("AcousticEchoCanceler " +
-              "name: " + descriptor.name + ", " +
-              "implementor: " + descriptor.implementor + ", " +
-              "uuid: " + descriptor.uuid);
-        DoLog("AcousticEchoCanceler.getEnabled: " + _aec.getEnabled());
-
-        return _bufferedRecSamples;
+    aec = AcousticEchoCanceler.create(audioRecord.getAudioSessionId());
+    if (aec == null) {
+      DoLogErr("AcousticEchoCanceler.create failed");
+      return -1;
     }
-
-    @SuppressWarnings("unused")
-    private int StartRecording() {
-        DoLog("StartRecording");
-        // start recording
-        try {
-            _audioRecord.startRecording();
-
-        } catch (IllegalStateException e) {
-            e.printStackTrace();
-            return -1;
-        }
-
-        _isRecording = true;
-        return 0;
+    int ret = aec.setEnabled(useBuiltInAEC);
+    if (ret != AudioEffect.SUCCESS) {
+      DoLogErr("AcousticEchoCanceler.setEnabled failed");
+      return -1;
     }
+    Descriptor descriptor = aec.getDescriptor();
+    DoLog("AcousticEchoCanceler " +
+          "name: " + descriptor.name + ", " +
+          "implementor: " + descriptor.implementor + ", " +
+          "uuid: " + descriptor.uuid);
+    DoLog("AcousticEchoCanceler.getEnabled: " + aec.getEnabled());
+    return framesPerBuffer;
+  }
 
-    @SuppressWarnings("unused")
-    private int StopRecording() {
-        DoLog("StopRecording");
-        _recLock.lock();
-        try {
-            // only stop if we are recording
-            if (_audioRecord.getRecordingState() ==
-              AudioRecord.RECORDSTATE_RECORDING) {
-                // stop recording
-                try {
-                    _audioRecord.stop();
-                } catch (IllegalStateException e) {
-                    e.printStackTrace();
-                    return -1;
-                }
-            }
-
-            // Release the AEC object.
-            if (_aec != null) {
-                _aec.release();
-                _aec = null;
-            }
-
-            // Release the AudioRecord object.
-            _audioRecord.release();
-            _audioRecord = null;
-
-        } finally {
-            // Ensure we always unlock, both for success, exception or error
-            // return.
-            _doRecInit = true;
-            _recLock.unlock();
-        }
-
-        _isRecording = false;
-        return 0;
+  private boolean StartRecording() {
+    DoLog("StartRecording");
+    AddThreadId();
+    if (audioRecord == null) {
+      DoLogErr("start() called before init()");
+      return false;
     }
-
-    @SuppressWarnings("unused")
-    private int RecordAudio(int lengthInBytes) {
-        _recLock.lock();
-
-        try {
-            if (_audioRecord == null) {
-                return -2; // We have probably closed down while waiting for rec
-                           // lock
-            }
-
-            // Set priority, only do once
-            if (_doRecInit == true) {
-                try {
-                    android.os.Process.setThreadPriority(
-                        android.os.Process.THREAD_PRIORITY_URGENT_AUDIO);
-                } catch (Exception e) {
-                    DoLog("Set rec thread priority failed: " + e.getMessage());
-                }
-                _doRecInit = false;
-            }
-
-            int readBytes = 0;
-            _recBuffer.rewind(); // Reset the position to start of buffer
-            readBytes = _audioRecord.read(_tempBufRec, 0, lengthInBytes);
-            // DoLog("read " + readBytes + "from SC");
-            _recBuffer.put(_tempBufRec);
-
-            if (readBytes != lengthInBytes) {
-                // DoLog("Could not read all data from sc (read = " + readBytes
-                // + ", length = " + lengthInBytes + ")");
-                return -1;
-            }
-
-        } catch (Exception e) {
-            DoLogErr("RecordAudio try failed: " + e.getMessage());
-
-        } finally {
-            // Ensure we always unlock, both for success, exception or error
-            // return.
-            _recLock.unlock();
-        }
-
-        return _bufferedRecSamples;
+    if (audioThread != null) {
+      DoLogErr("start() was already called");
+      return false;
     }
+    audioThread = new AudioRecordThread("AudioRecordJavaThread");
+    audioThread.start();
+    return true;
+  }
 
-    final String logTag = "WebRtcAudioRecord-Java";
-
-    private void DoLog(String msg) {
-        Log.d(logTag, msg);
+  private boolean StopRecording() {
+    DoLog("StopRecording");
+    AddThreadId();
+    if (audioThread == null) {
+      DoLogErr("start() was never called, or stop() was already called");
+      return false;
     }
-
-    private void DoLogErr(String msg) {
-        Log.e(logTag, msg);
+    audioThread.joinThread();
+    audioThread = null;
+    if (aec != null) {
+      aec.release();
+      aec = null;
     }
+    if (audioRecord != null) {
+      audioRecord.release();
+      audioRecord = null;
+    }
+    return true;
+  }
+
+  private void DoLog(String msg) {
+    Log.d(TAG, msg);
+  }
+
+  private void DoLogErr(String msg) {
+    Log.e(TAG, msg);
+  }
+
+  /** Helper method for building a string of thread information.*/
+  private static String getThreadInfo() {
+    return "@[name=" + Thread.currentThread().getName()
+        + ", id=" + Thread.currentThread().getId() + "]";
+  }
+
+  /** Helper method which throws an exception when an assertion has failed. */
+  private static void assertIsTrue(boolean condition) {
+    if (!condition) {
+      throw new AssertionError("Expected condition to be true");
+    }
+  }
+
+  private void AddThreadId() {
+    threadIds.add(Thread.currentThread().getId());
+    DoLog("threadIds: " + threadIds + " (#threads=" + threadIds.size() + ")");
+  }
+
+  private void RemoveThreadId() {
+    threadIds.remove(Thread.currentThread().getId());
+    DoLog("threadIds: " + threadIds + " (#threads=" + threadIds.size() + ")");
+  }
+
+  private native void nativeCacheDirectBufferAddress(
+      ByteBuffer byteBuffer, long nativeAudioRecord);
+
+  private native void nativeDataIsRecorded(int bytes, long nativeAudioRecord);
 }
diff --git a/webrtc/modules/audio_device/audio_device.gypi b/webrtc/modules/audio_device/audio_device.gypi
index 1681fca..816a26a 100644
--- a/webrtc/modules/audio_device/audio_device.gypi
+++ b/webrtc/modules/audio_device/audio_device.gypi
@@ -13,6 +13,7 @@
       'type': 'static_library',
       'dependencies': [
         'webrtc_utility',
+        '<(webrtc_root)/base/base.gyp:rtc_base_approved',
         '<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
         '<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
       ],
diff --git a/webrtc/modules/utility/interface/helpers_android.h b/webrtc/modules/utility/interface/helpers_android.h
index d0796ec..3424e28 100644
--- a/webrtc/modules/utility/interface/helpers_android.h
+++ b/webrtc/modules/utility/interface/helpers_android.h
@@ -12,9 +12,37 @@
 #define WEBRTC_MODULES_UTILITY_INTERFACE_HELPERS_ANDROID_H_
 
 #include <jni.h>
+#include <string>
+
+// Abort the process if |jni| has a Java exception pending.
+// TODO(henrika): merge with CHECK_JNI_EXCEPTION() in jni_helpers.h.
+#define CHECK_EXCEPTION(jni)    \
+  CHECK(!jni->ExceptionCheck()) \
+      << (jni->ExceptionDescribe(), jni->ExceptionClear(), "")
 
 namespace webrtc {
 
+// Return a |JNIEnv*| usable on this thread or NULL if this thread is detached.
+JNIEnv* GetEnv(JavaVM* jvm);
+
+// JNIEnv-helper methods that wraps the API which uses the JNI interface
+// pointer (JNIEnv*). It allows us to CHECK success and that no Java exception
+// is thrown while calling the method.
+jmethodID GetMethodID (
+    JNIEnv* jni, jclass c, const std::string& name, const char* signature);
+
+jclass FindClass(JNIEnv* jni, const std::string& name);
+
+jobject NewGlobalRef(JNIEnv* jni, jobject o);
+
+void DeleteGlobalRef(JNIEnv* jni, jobject o);
+
+// Return thread ID as a string.
+std::string GetThreadId();
+
+// Return thread ID as string suitable for debug logging.
+std::string GetThreadInfo();
+
 // Attach thread to JVM if necessary and detach at scope end if originally
 // attached.
 class AttachThreadScoped {
@@ -29,6 +57,23 @@
   JNIEnv* env_;
 };
 
+// Scoped holder for global Java refs.
+template<class T>  // T is jclass, jobject, jintArray, etc.
+class ScopedGlobalRef {
+ public:
+  ScopedGlobalRef(JNIEnv* jni, T obj)
+      : jni_(jni), obj_(static_cast<T>(NewGlobalRef(jni, obj))) {}
+  ~ScopedGlobalRef() {
+    DeleteGlobalRef(jni_, obj_);
+  }
+  T operator*() const {
+    return obj_;
+  }
+ private:
+  JNIEnv* jni_;
+  T obj_;
+};
+
 }  // namespace webrtc
 
 #endif  // WEBRTC_MODULES_UTILITY_INTERFACE_HELPERS_ANDROID_H_
diff --git a/webrtc/modules/utility/source/helpers_android.cc b/webrtc/modules/utility/source/helpers_android.cc
index 6acc77e..f429db1 100644
--- a/webrtc/modules/utility/source/helpers_android.cc
+++ b/webrtc/modules/utility/source/helpers_android.cc
@@ -8,27 +8,89 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
+#include "webrtc/base/checks.h"
 #include "webrtc/modules/utility/interface/helpers_android.h"
 
+#include <android/log.h>
 #include <assert.h>
+#include <pthread.h>
 #include <stddef.h>
+#include <unistd.h>
+
+#define TAG "HelpersAndroid"
+#define ALOGD(...) __android_log_print(ANDROID_LOG_DEBUG, TAG, __VA_ARGS__)
 
 namespace webrtc {
 
+JNIEnv* GetEnv(JavaVM* jvm) {
+  void* env = NULL;
+  jint status = jvm->GetEnv(&env, JNI_VERSION_1_6);
+  CHECK(((env != NULL) && (status == JNI_OK)) ||
+        ((env == NULL) && (status == JNI_EDETACHED)))
+      << "Unexpected GetEnv return: " << status << ":" << env;
+  return reinterpret_cast<JNIEnv*>(env);
+}
+
+jmethodID GetMethodID (
+    JNIEnv* jni, jclass c, const std::string& name, const char* signature) {
+  jmethodID m = jni->GetMethodID(c, name.c_str(), signature);
+  CHECK_EXCEPTION(jni) << "Error during GetMethodID: " << name << ", "
+                       << signature;
+  CHECK(m) << name << ", " << signature;
+  return m;
+}
+
+jclass FindClass(JNIEnv* jni, const std::string& name) {
+  jclass c = jni->FindClass(name.c_str());
+  CHECK_EXCEPTION(jni) << "Error during FindClass: " << name;
+  CHECK(c) << name;
+  return c;
+}
+
+jobject NewGlobalRef(JNIEnv* jni, jobject o) {
+  jobject ret = jni->NewGlobalRef(o);
+  CHECK_EXCEPTION(jni) << "Error during NewGlobalRef";
+  CHECK(ret);
+  return ret;
+}
+
+void DeleteGlobalRef(JNIEnv* jni, jobject o) {
+  jni->DeleteGlobalRef(o);
+  CHECK_EXCEPTION(jni) << "Error during DeleteGlobalRef";
+}
+
+std::string GetThreadId() {
+  char buf[21];  // Big enough to hold a kuint64max plus terminating NULL.
+  int thread_id = gettid();
+  CHECK_LT(snprintf(buf, sizeof(buf), "%i", thread_id),
+      static_cast<int>(sizeof(buf))) << "Thread id is bigger than uint64??";
+  return std::string(buf);
+}
+
+std::string GetThreadInfo() {
+  return "@[tid=" + GetThreadId() + "]";
+}
+
 AttachThreadScoped::AttachThreadScoped(JavaVM* jvm)
     : attached_(false), jvm_(jvm), env_(NULL) {
-  jint ret_val = jvm->GetEnv(reinterpret_cast<void**>(&env_), JNI_VERSION_1_4);
-  if (ret_val == JNI_EDETACHED) {
-    // Attach the thread to the Java VM.
-    ret_val = jvm_->AttachCurrentThread(&env_, NULL);
-    attached_ = ret_val == JNI_OK;
-    assert(attached_);
+  env_ = GetEnv(jvm);
+  if (!env_) {
+    // Adding debug log here so we can track down potential leaks and figure
+    // out why we sometimes see "Native thread exiting without having called
+    // DetachCurrentThread" in logcat outputs.
+    ALOGD("Attaching thread to JVM%s", GetThreadInfo().c_str());
+    jint res = jvm->AttachCurrentThread(&env_, NULL);
+    attached_ = (res == JNI_OK);
+    CHECK(attached_) << "AttachCurrentThread failed: " << res;
   }
 }
 
 AttachThreadScoped::~AttachThreadScoped() {
-  if (attached_ && (jvm_->DetachCurrentThread() < 0)) {
-    assert(false);
+  if (attached_) {
+    ALOGD("Detaching thread from JVM%s", GetThreadInfo().c_str());
+    jint res = jvm_->DetachCurrentThread();
+    CHECK(res == JNI_OK) << "DetachCurrentThread failed: " << res;
+    CHECK(!GetEnv(jvm_));
   }
 }