Adds support for OpenSL ES based audio capture on Android.

NOTE: the new code is disabled by default in the WebRtcAudioManager to ensure that
OpenSL ES is not accidentally activated in existing clients. There are still some
unresolved issues to sort out before it can be utilized.

Enables possibility to use OpenSL ES based audio in both directions for WebRTC.
All unit tests and demo clients have been tested with the new implementation but
the new support is behind a flag (see above).

More testing is needed before it can be used in the field and additional support for
hardware effects is still missing.

BUG=webrtc:5925
R=tommi@webrtc.org

Review URL: https://codereview.webrtc.org/2119633004 .

Cr-Commit-Position: refs/heads/master@{#14290}
diff --git a/webrtc/modules/audio_device/BUILD.gn b/webrtc/modules/audio_device/BUILD.gn
index 06b3129a..2771876 100644
--- a/webrtc/modules/audio_device/BUILD.gn
+++ b/webrtc/modules/audio_device/BUILD.gn
@@ -114,6 +114,8 @@
         "android/opensles_common.h",
         "android/opensles_player.cc",
         "android/opensles_player.h",
+        "android/opensles_recorder.cc",
+        "android/opensles_recorder.h",
       ]
       libs = [
         "log",
diff --git a/webrtc/modules/audio_device/android/audio_device_template.h b/webrtc/modules/audio_device/android/audio_device_template.h
index 40e15ca..9f12e18 100644
--- a/webrtc/modules/audio_device/android/audio_device_template.h
+++ b/webrtc/modules/audio_device/android/audio_device_template.h
@@ -485,11 +485,22 @@
 
   // Returns true if the device both supports built in AEC and the device
   // is not blacklisted.
+  // Currently, if OpenSL ES is used in both directions, this method will still
+  // report the correct value and it has the correct effect. As an example:
+  // a device supports built in AEC and this method returns true. Libjingle
+  // will then disable the WebRTC based AEC and that will work for all devices
+  // (mainly Nexus) even when OpenSL ES is used for input since our current
+  // implementation will enable built-in AEC by default also for OpenSL ES.
+  // The only "bad" thing that happens today is that when Libjingle calls
+  // OpenSLESRecorder::EnableBuiltInAEC() it will not have any real effect and
+  // a "Not Implemented" log will be filed. This non-perfect state will remain
+  // until I have added full support for audio effects based on OpenSL ES APIs.
   bool BuiltInAECIsAvailable() const override {
     LOG(INFO) << __FUNCTION__;
     return audio_manager_->IsAcousticEchoCancelerSupported();
   }
 
+  // TODO(henrika): add implementation for OpenSL ES based audio as well.
   int32_t EnableBuiltInAEC(bool enable) override {
     LOG(INFO) << __FUNCTION__ << "(" << enable << ")";
     RTC_CHECK(BuiltInAECIsAvailable()) << "HW AEC is not available";
@@ -498,11 +509,14 @@
 
   // Returns true if the device both supports built in AGC and the device
   // is not blacklisted.
+  // TODO(henrika): add implementation for OpenSL ES based audio as well.
+  // In addition, see comments for BuiltInAECIsAvailable().
   bool BuiltInAGCIsAvailable() const override {
     LOG(INFO) << __FUNCTION__;
     return audio_manager_->IsAutomaticGainControlSupported();
   }
 
+  // TODO(henrika): add implementation for OpenSL ES based audio as well.
   int32_t EnableBuiltInAGC(bool enable) override {
     LOG(INFO) << __FUNCTION__ << "(" << enable << ")";
     RTC_CHECK(BuiltInAGCIsAvailable()) << "HW AGC is not available";
@@ -511,11 +525,14 @@
 
   // Returns true if the device both supports built in NS and the device
   // is not blacklisted.
+  // TODO(henrika): add implementation for OpenSL ES based audio as well.
+  // In addition, see comments for BuiltInAECIsAvailable().
   bool BuiltInNSIsAvailable() const override {
     LOG(INFO) << __FUNCTION__;
     return audio_manager_->IsNoiseSuppressorSupported();
   }
 
+  // TODO(henrika): add implementation for OpenSL ES based audio as well.
   int32_t EnableBuiltInNS(bool enable) override {
     LOG(INFO) << __FUNCTION__ << "(" << enable << ")";
     RTC_CHECK(BuiltInNSIsAvailable()) << "HW NS is not available";
diff --git a/webrtc/modules/audio_device/android/audio_device_unittest.cc b/webrtc/modules/audio_device/android/audio_device_unittest.cc
index 78c83e9..e3a4920 100644
--- a/webrtc/modules/audio_device/android/audio_device_unittest.cc
+++ b/webrtc/modules/audio_device/android/audio_device_unittest.cc
@@ -42,7 +42,6 @@
 using ::testing::NiceMock;
 using ::testing::NotNull;
 using ::testing::Return;
-using ::testing::TestWithParam;
 
 // #define ENABLE_DEBUG_PRINTF
 #ifdef ENABLE_DEBUG_PRINTF
@@ -719,15 +718,22 @@
 
 // We always ask for a default audio layer when the ADM is constructed. But the
 // ADM will then internally set the best suitable combination of audio layers,
-// for input and output based on if low-latency output audio in combination
-// with OpenSL ES is supported or not. This test ensures that the correct
-// selection is done.
+// for input and output based on if low-latency output and/or input audio in
+// combination with OpenSL ES is supported or not. This test ensures that the
+// correct selection is done.
 TEST_F(AudioDeviceTest, VerifyDefaultAudioLayer) {
   const AudioDeviceModule::AudioLayer audio_layer = GetActiveAudioLayer();
   bool low_latency_output = audio_manager()->IsLowLatencyPlayoutSupported();
-  AudioDeviceModule::AudioLayer expected_audio_layer = low_latency_output ?
-      AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio :
-      AudioDeviceModule::kAndroidJavaAudio;
+  bool low_latency_input = audio_manager()->IsLowLatencyRecordSupported();
+  AudioDeviceModule::AudioLayer expected_audio_layer;
+  if (low_latency_output && low_latency_input) {
+    expected_audio_layer = AudioDeviceModule::kAndroidOpenSLESAudio;
+  } else if (low_latency_output && !low_latency_input) {
+    expected_audio_layer =
+        AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio;
+  } else {
+    expected_audio_layer = AudioDeviceModule::kAndroidJavaAudio;
+  }
   EXPECT_EQ(expected_audio_layer, audio_layer);
 }
 
@@ -750,6 +756,14 @@
   EXPECT_EQ(expected_layer, active_layer);
 }
 
+TEST_F(AudioDeviceTest, CorrectAudioLayerIsUsedForOpenSLInBothDirections) {
+  AudioDeviceModule::AudioLayer expected_layer =
+      AudioDeviceModule::kAndroidOpenSLESAudio;
+  AudioDeviceModule::AudioLayer active_layer =
+      TestActiveAudioLayer(expected_layer);
+  EXPECT_EQ(expected_layer, active_layer);
+}
+
 // The Android ADM supports two different delay reporting modes. One for the
 // low-latency output path (in combination with OpenSL ES), and one for the
 // high-latency output path (Java backends in both directions). These two tests
@@ -863,7 +877,7 @@
 // Verify that calling StopPlayout() will leave us in an uninitialized state
 // which will require a new call to InitPlayout(). This test does not call
 // StartPlayout() while being uninitialized since doing so will hit a
-// RTC_DCHECK.
+// RTC_DCHECK and death tests are not supported on Android.
 TEST_F(AudioDeviceTest, StopPlayoutRequiresInitToRestart) {
   EXPECT_EQ(0, audio_device()->InitPlayout());
   EXPECT_EQ(0, audio_device()->StartPlayout());
@@ -871,6 +885,17 @@
   EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
 }
 
+// Verify that calling StopRecording() will leave us in an uninitialized state
+// which will require a new call to InitRecording(). This test does not call
+// StartRecording() while being uninitialized since doing so will hit a
+// RTC_DCHECK and death tests are not supported on Android.
+TEST_F(AudioDeviceTest, StopRecordingRequiresInitToRestart) {
+  EXPECT_EQ(0, audio_device()->InitRecording());
+  EXPECT_EQ(0, audio_device()->StartRecording());
+  EXPECT_EQ(0, audio_device()->StopRecording());
+  EXPECT_FALSE(audio_device()->RecordingIsInitialized());
+}
+
 // Start playout and verify that the native audio layer starts asking for real
 // audio samples to play out using the NeedMorePlayData callback.
 TEST_F(AudioDeviceTest, StartPlayoutVerifyCallbacks) {
diff --git a/webrtc/modules/audio_device/android/audio_manager.cc b/webrtc/modules/audio_device/android/audio_manager.cc
index 9b1ee0a..d8b7640 100644
--- a/webrtc/modules/audio_device/android/audio_manager.cc
+++ b/webrtc/modules/audio_device/android/audio_manager.cc
@@ -73,12 +73,12 @@
       hardware_agc_(false),
       hardware_ns_(false),
       low_latency_playout_(false),
+      low_latency_record_(false),
       delay_estimate_in_milliseconds_(0) {
   ALOGD("ctor%s", GetThreadInfo().c_str());
   RTC_CHECK(j_environment_);
   JNINativeMethod native_methods[] = {
-      {"nativeCacheAudioParameters",
-       "(IIZZZZZIIJ)V",
+      {"nativeCacheAudioParameters", "(IIZZZZZZIIJ)V",
        reinterpret_cast<void*>(&webrtc::AudioManager::CacheAudioParameters)}};
   j_native_registration_ = j_environment_->RegisterNatives(
       "org/webrtc/voiceengine/WebRtcAudioManager", native_methods,
@@ -206,6 +206,12 @@
       false : low_latency_playout_;
 }
 
+bool AudioManager::IsLowLatencyRecordSupported() const {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  ALOGD("IsLowLatencyRecordSupported()");
+  return low_latency_record_;
+}
+
 bool AudioManager::IsProAudioSupported() const {
   RTC_DCHECK(thread_checker_.CalledOnValidThread());
   ALOGD("IsProAudioSupported()");
@@ -227,6 +233,7 @@
                                                 jboolean hardware_agc,
                                                 jboolean hardware_ns,
                                                 jboolean low_latency_output,
+                                                jboolean low_latency_input,
                                                 jboolean pro_audio,
                                                 jint output_buffer_size,
                                                 jint input_buffer_size,
@@ -235,7 +242,8 @@
       reinterpret_cast<webrtc::AudioManager*>(native_audio_manager);
   this_object->OnCacheAudioParameters(
       env, sample_rate, channels, hardware_aec, hardware_agc, hardware_ns,
-      low_latency_output, pro_audio, output_buffer_size, input_buffer_size);
+      low_latency_output, low_latency_input, pro_audio, output_buffer_size,
+      input_buffer_size);
 }
 
 void AudioManager::OnCacheAudioParameters(JNIEnv* env,
@@ -245,6 +253,7 @@
                                           jboolean hardware_agc,
                                           jboolean hardware_ns,
                                           jboolean low_latency_output,
+                                          jboolean low_latency_input,
                                           jboolean pro_audio,
                                           jint output_buffer_size,
                                           jint input_buffer_size) {
@@ -253,6 +262,7 @@
   ALOGD("hardware_agc: %d", hardware_agc);
   ALOGD("hardware_ns: %d", hardware_ns);
   ALOGD("low_latency_output: %d", low_latency_output);
+  ALOGD("low_latency_input: %d", low_latency_input);
   ALOGD("pro_audio: %d", pro_audio);
   ALOGD("sample_rate: %d", sample_rate);
   ALOGD("channels: %d", channels);
@@ -263,6 +273,7 @@
   hardware_agc_ = hardware_agc;
   hardware_ns_ = hardware_ns;
   low_latency_playout_ = low_latency_output;
+  low_latency_record_ = low_latency_input;
   pro_audio_ = pro_audio;
   // TODO(henrika): add support for stereo output.
   playout_parameters_.reset(sample_rate, static_cast<size_t>(channels),
diff --git a/webrtc/modules/audio_device/android/audio_manager.h b/webrtc/modules/audio_device/android/audio_manager.h
index 808417c..341d426 100644
--- a/webrtc/modules/audio_device/android/audio_manager.h
+++ b/webrtc/modules/audio_device/android/audio_manager.h
@@ -101,6 +101,7 @@
   // Returns true if the device supports the low-latency audio paths in
   // combination with OpenSL ES.
   bool IsLowLatencyPlayoutSupported() const;
+  bool IsLowLatencyRecordSupported() const;
 
   // Returns true if the device supports pro-audio features in combination with
   // OpenSL ES.
@@ -124,6 +125,7 @@
                                            jboolean hardware_agc,
                                            jboolean hardware_ns,
                                            jboolean low_latency_output,
+                                           jboolean low_latency_input,
                                            jboolean pro_audio,
                                            jint output_buffer_size,
                                            jint input_buffer_size,
@@ -135,6 +137,7 @@
                               jboolean hardware_agc,
                               jboolean hardware_ns,
                               jboolean low_latency_output,
+                              jboolean low_latency_input,
                               jboolean pro_audio,
                               jint output_buffer_size,
                               jint input_buffer_size);
@@ -178,9 +181,12 @@
   // True if device supports hardware (or built-in) NS.
   bool hardware_ns_;
 
-  // True if device supports the low-latency OpenSL ES audio path.
+  // True if device supports the low-latency OpenSL ES audio path for output.
   bool low_latency_playout_;
 
+  // True if device supports the low-latency OpenSL ES audio path for input.
+  bool low_latency_record_;
+
   // True if device supports the low-latency OpenSL ES pro-audio path.
   bool pro_audio_;
 
diff --git a/webrtc/modules/audio_device/android/audio_manager_unittest.cc b/webrtc/modules/audio_device/android/audio_manager_unittest.cc
index 0249ab9..a655c1b 100644
--- a/webrtc/modules/audio_device/android/audio_manager_unittest.cc
+++ b/webrtc/modules/audio_device/android/audio_manager_unittest.cc
@@ -8,7 +8,6 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include <memory>
 #include <SLES/OpenSLES_Android.h>
 
 #include "testing/gtest/include/gtest/gtest.h"
@@ -128,6 +127,11 @@
         audio_manager()->IsLowLatencyPlayoutSupported() ? "Yes" : "No");
 }
 
+TEST_F(AudioManagerTest, IsLowLatencyRecordSupported) {
+  PRINT("%sLow latency input support: %s\n", kTag,
+        audio_manager()->IsLowLatencyRecordSupported() ? "Yes" : "No");
+}
+
 TEST_F(AudioManagerTest, IsProAudioSupported) {
   PRINT("%sPro audio support: %s\n", kTag,
         audio_manager()->IsProAudioSupported() ? "Yes" : "No");
@@ -135,6 +139,7 @@
 
 TEST_F(AudioManagerTest, ShowAudioParameterInfo) {
   const bool low_latency_out = audio_manager()->IsLowLatencyPlayoutSupported();
+  const bool low_latency_in = audio_manager()->IsLowLatencyRecordSupported();
   PRINT("PLAYOUT:\n");
   PRINT("%saudio layer: %s\n", kTag,
         low_latency_out ? "Low latency OpenSL" : "Java/JNI based AudioTrack");
@@ -144,7 +149,8 @@
         playout_parameters_.frames_per_buffer(),
         playout_parameters_.GetBufferSizeInMilliseconds());
   PRINT("RECORD: \n");
-  PRINT("%saudio layer: %s\n", kTag, "Java/JNI based AudioRecord");
+  PRINT("%saudio layer: %s\n", kTag,
+        low_latency_in ? "Low latency OpenSL" : "Java/JNI based AudioRecord");
   PRINT("%ssample rate: %d Hz\n", kTag, record_parameters_.sample_rate());
   PRINT("%schannels: %" PRIuS "\n", kTag, record_parameters_.channels());
   PRINT("%sframes per buffer: %" PRIuS " <=> %.2f ms\n", kTag,
@@ -152,6 +158,21 @@
         record_parameters_.GetBufferSizeInMilliseconds());
 }
 
+// The audio device module only suppors the same sample rate in both directions.
+// In addition, in full-duplex low-latency mode (OpenSL ES), both input and
+// output must use the same native buffer size to allow for usage of the fast
+// audio track in Android.
+TEST_F(AudioManagerTest, VerifyAudioParameters) {
+  const bool low_latency_out = audio_manager()->IsLowLatencyPlayoutSupported();
+  const bool low_latency_in = audio_manager()->IsLowLatencyRecordSupported();
+  EXPECT_EQ(playout_parameters_.sample_rate(),
+            record_parameters_.sample_rate());
+  if (low_latency_out && low_latency_in) {
+    EXPECT_EQ(playout_parameters_.frames_per_buffer(),
+              record_parameters_.frames_per_buffer());
+  }
+}
+
 // Add device-specific information to the test for logging purposes.
 TEST_F(AudioManagerTest, ShowDeviceInfo) {
   BuildInfo build_info;
@@ -167,7 +188,7 @@
   PRINT("%sbuild release: %s\n", kTag, build_info.GetBuildRelease().c_str());
   PRINT("%sbuild id: %s\n", kTag, build_info.GetAndroidBuildId().c_str());
   PRINT("%sbuild type: %s\n", kTag, build_info.GetBuildType().c_str());
-  PRINT("%sSDK version: %s\n", kTag, build_info.GetSdkVersion().c_str());
+  PRINT("%sSDK version: %d\n", kTag, build_info.GetSdkVersion());
 }
 
 // Basic test of the AudioParameters class using default construction where
diff --git a/webrtc/modules/audio_device/android/build_info.cc b/webrtc/modules/audio_device/android/build_info.cc
index 455c12f..3628e40 100644
--- a/webrtc/modules/audio_device/android/build_info.cc
+++ b/webrtc/modules/audio_device/android/build_info.cc
@@ -50,8 +50,10 @@
   return GetStringFromJava("getBuildRelease");
 }
 
-std::string BuildInfo::GetSdkVersion() {
-  return GetStringFromJava("getSdkVersion");
+SdkCode BuildInfo::GetSdkVersion() {
+  jmethodID id = j_build_info_.GetStaticMethodId("getSdkVersion", "()I");
+  jint j_version = j_build_info_.CallStaticIntMethod(id);
+  return static_cast<SdkCode>(j_version);
 }
 
 }  // namespace webrtc
diff --git a/webrtc/modules/audio_device/android/build_info.h b/webrtc/modules/audio_device/android/build_info.h
index 4a4c30e..d9640dd 100644
--- a/webrtc/modules/audio_device/android/build_info.h
+++ b/webrtc/modules/audio_device/android/build_info.h
@@ -19,6 +19,22 @@
 
 namespace webrtc {
 
+// This enumeration maps to the values returned by BuildInfo::GetSdkVersion(),
+// indicating the Android release associated with a given SDK version.
+// See https://developer.android.com/guide/topics/manifest/uses-sdk-element.html
+// for details.
+enum SdkCode {
+  SDK_CODE_JELLY_BEAN = 16,      // Android 4.1
+  SDK_CODE_JELLY_BEAN_MR1 = 17,  // Android 4.2
+  SDK_CODE_JELLY_BEAN_MR2 = 18,  // Android 4.3
+  SDK_CODE_KITKAT = 19,          // Android 4.4
+  SDK_CODE_WATCH = 20,           // Android 4.4W
+  SDK_CODE_LOLLIPOP = 21,        // Android 5.0
+  SDK_CODE_LOLLIPOP_MR1 = 22,    // Android 5.1
+  SDK_CODE_MARSHMALLOW = 23,     // Android 6.0
+  SDK_CODE_N = 24,
+};
+
 // Utility class used to query the Java class (org/webrtc/voiceengine/BuildInfo)
 // for device and Android build information.
 // The calling thread is attached to the JVM at construction if needed and a
@@ -42,8 +58,9 @@
   std::string GetBuildType();
   // The user-visible version string (e.g. "5.1").
   std::string GetBuildRelease();
-  // The user-visible SDK version of the framework (e.g. 21).
-  std::string GetSdkVersion();
+  // The user-visible SDK version of the framework (e.g. 21). See SdkCode enum
+  // for translation.
+  SdkCode GetSdkVersion();
 
  private:
   // Helper method which calls a static getter method with |name| and returns
diff --git a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/BuildInfo.java b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/BuildInfo.java
index 95ff585..aaf07f1 100644
--- a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/BuildInfo.java
+++ b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/BuildInfo.java
@@ -45,7 +45,7 @@
     return Build.VERSION.RELEASE;
   }
 
-  public static String getSdkVersion() {
-    return Integer.toString(Build.VERSION.SDK_INT);
+  public static int getSdkVersion() {
+      return Build.VERSION.SDK_INT;
   }
 }
diff --git a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java
index f24e470..c1a9fa0 100644
--- a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java
+++ b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java
@@ -134,6 +134,7 @@
   private boolean hardwareAGC;
   private boolean hardwareNS;
   private boolean lowLatencyOutput;
+  private boolean lowLatencyInput;
   private boolean proAudio;
   private int sampleRate;
   private int channels;
@@ -153,10 +154,9 @@
     }
     volumeLogger = new VolumeLogger(audioManager);
     storeAudioParameters();
-    nativeCacheAudioParameters(
-        sampleRate, channels, hardwareAEC, hardwareAGC, hardwareNS,
-        lowLatencyOutput, proAudio, outputBufferSize, inputBufferSize,
-        nativeAudioManager);
+    nativeCacheAudioParameters(sampleRate, channels, hardwareAEC, hardwareAGC, hardwareNS,
+            lowLatencyOutput, lowLatencyInput, proAudio, outputBufferSize, inputBufferSize,
+            nativeAudioManager);
   }
 
   private boolean init() {
@@ -201,12 +201,13 @@
     hardwareAGC = isAutomaticGainControlSupported();
     hardwareNS = isNoiseSuppressorSupported();
     lowLatencyOutput = isLowLatencyOutputSupported();
+    lowLatencyInput = isLowLatencyInputSupported();
     proAudio = isProAudioSupported();
     outputBufferSize = lowLatencyOutput ?
         getLowLatencyOutputFramesPerBuffer() :
         getMinOutputFrameSize(sampleRate, channels);
-    // TODO(henrika): add support for low-latency input.
-    inputBufferSize = getMinInputFrameSize(sampleRate, channels);
+    inputBufferSize = lowLatencyInput ? getLowLatencyInputFramesPerBuffer()
+                                      : getMinInputFrameSize(sampleRate, channels);
   }
 
   // Gets the current earpiece state.
@@ -223,13 +224,15 @@
   }
 
   // Returns true if low-latency audio input is supported.
+  // TODO(henrika): remove the hardcoded false return value when OpenSL ES
+  // input performance has been evaluated and tested more.
   public boolean isLowLatencyInputSupported() {
     // TODO(henrika): investigate if some sort of device list is needed here
     // as well. The NDK doc states that: "As of API level 21, lower latency
     // audio input is supported on select devices. To take advantage of this
     // feature, first confirm that lower latency output is available".
-    return WebRtcAudioUtils.runningOnLollipopOrHigher()
-        && isLowLatencyOutputSupported();
+    return false;
+    // return WebRtcAudioUtils.runningOnLollipopOrHigher() && isLowLatencyOutputSupported();
   }
 
   // Returns true if the device has professional audio level of functionality
@@ -353,8 +356,8 @@
     }
   }
 
-  private native void nativeCacheAudioParameters(
-    int sampleRate, int channels, boolean hardwareAEC, boolean hardwareAGC,
-    boolean hardwareNS, boolean lowLatencyOutput, boolean proAudio,
-    int outputBufferSize, int inputBufferSize, long nativeAudioManager);
+  private native void nativeCacheAudioParameters(int sampleRate, int channels, boolean hardwareAEC,
+          boolean hardwareAGC, boolean hardwareNS, boolean lowLatencyOutput,
+          boolean lowLatencyInput, boolean proAudio, int outputBufferSize, int inputBufferSize,
+          long nativeAudioManager);
 }
diff --git a/webrtc/modules/audio_device/android/opensles_common.cc b/webrtc/modules/audio_device/android/opensles_common.cc
index 9e3cbf7..e25f863 100644
--- a/webrtc/modules/audio_device/android/opensles_common.cc
+++ b/webrtc/modules/audio_device/android/opensles_common.cc
@@ -10,13 +10,10 @@
 
 #include "webrtc/modules/audio_device/android/opensles_common.h"
 
-#include <assert.h>
 #include <SLES/OpenSLES.h>
 
 #include "webrtc/base/arraysize.h"
-#include "webrtc/modules/audio_device/android/audio_common.h"
-
-using webrtc::kNumChannels;
+#include "webrtc/base/checks.h"
 
 namespace webrtc {
 
@@ -49,24 +46,58 @@
   return sl_error_strings[code];
 }
 
-SLDataFormat_PCM CreatePcmConfiguration(int sample_rate) {
-  SLDataFormat_PCM configuration;
-  configuration.formatType = SL_DATAFORMAT_PCM;
-  configuration.numChannels = kNumChannels;
-  // According to the opensles documentation in the ndk:
-  // samplesPerSec is actually in units of milliHz, despite the misleading name.
-  // It further recommends using constants. However, this would lead to a lot
-  // of boilerplate code so it is not done here.
-  configuration.samplesPerSec = sample_rate * 1000;
-  configuration.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16;
-  configuration.containerSize = SL_PCMSAMPLEFORMAT_FIXED_16;
-  configuration.channelMask = SL_SPEAKER_FRONT_CENTER;
-  if (2 == configuration.numChannels) {
-    configuration.channelMask =
-        SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
+SLDataFormat_PCM CreatePCMConfiguration(size_t channels,
+                                        int sample_rate,
+                                        size_t bits_per_sample) {
+  RTC_CHECK_EQ(bits_per_sample, SL_PCMSAMPLEFORMAT_FIXED_16);
+  SLDataFormat_PCM format;
+  format.formatType = SL_DATAFORMAT_PCM;
+  format.numChannels = static_cast<SLuint32>(channels);
+  // Note that, the unit of sample rate is actually in milliHertz and not Hertz.
+  switch (sample_rate) {
+    case 8000:
+      format.samplesPerSec = SL_SAMPLINGRATE_8;
+      break;
+    case 16000:
+      format.samplesPerSec = SL_SAMPLINGRATE_16;
+      break;
+    case 22050:
+      format.samplesPerSec = SL_SAMPLINGRATE_22_05;
+      break;
+    case 32000:
+      format.samplesPerSec = SL_SAMPLINGRATE_32;
+      break;
+    case 44100:
+      format.samplesPerSec = SL_SAMPLINGRATE_44_1;
+      break;
+    case 48000:
+      format.samplesPerSec = SL_SAMPLINGRATE_48;
+      break;
+    case 64000:
+      format.samplesPerSec = SL_SAMPLINGRATE_64;
+      break;
+    case 88200:
+      format.samplesPerSec = SL_SAMPLINGRATE_88_2;
+      break;
+    case 96000:
+      format.samplesPerSec = SL_SAMPLINGRATE_96;
+      break;
+    default:
+      RTC_CHECK(false) << "Unsupported sample rate: " << sample_rate;
+      break;
   }
-  configuration.endianness = SL_BYTEORDER_LITTLEENDIAN;
-  return configuration;
+  format.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16;
+  format.containerSize = SL_PCMSAMPLEFORMAT_FIXED_16;
+  format.endianness = SL_BYTEORDER_LITTLEENDIAN;
+  if (format.numChannels == 1) {
+    format.channelMask = SL_SPEAKER_FRONT_CENTER;
+  } else if (format.numChannels == 2) {
+    format.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
+  } else {
+    RTC_CHECK(false) << "Unsupported number of channels: "
+                     << format.numChannels;
+  }
+  return format;
 }
 
-}  // namespace webrtc_opensl
+}  // namespace webrtc
diff --git a/webrtc/modules/audio_device/android/opensles_common.h b/webrtc/modules/audio_device/android/opensles_common.h
index 5ff295b..b0859c2 100644
--- a/webrtc/modules/audio_device/android/opensles_common.h
+++ b/webrtc/modules/audio_device/android/opensles_common.h
@@ -11,17 +11,19 @@
 #ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_COMMON_H_
 #define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_COMMON_H_
 
+#include <stddef.h>
 #include <SLES/OpenSLES.h>
 
-#include "webrtc/base/checks.h"
-
 namespace webrtc {
 
 // Returns a string representation given an integer SL_RESULT_XXX code.
 // The mapping can be found in <SLES/OpenSLES.h>.
 const char* GetSLErrorString(size_t code);
 
-SLDataFormat_PCM CreatePcmConfiguration(int sample_rate);
+// Configures an SL_DATAFORMAT_PCM structure based on native audio parameters.
+SLDataFormat_PCM CreatePCMConfiguration(size_t channels,
+                                        int sample_rate,
+                                        size_t bits_per_sample);
 
 // Helper class for using SLObjectItf interfaces.
 template <typename SLType, typename SLDerefType>
@@ -53,6 +55,6 @@
 
 typedef ScopedSLObject<SLObjectItf, const SLObjectItf_*> ScopedSLObjectItf;
 
-}  // namespace webrtc_opensl
+}  // namespace webrtc
 
 #endif  // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_COMMON_H_
diff --git a/webrtc/modules/audio_device/android/opensles_player.cc b/webrtc/modules/audio_device/android/opensles_player.cc
index b3ad64e..a63b8c1 100644
--- a/webrtc/modules/audio_device/android/opensles_player.cc
+++ b/webrtc/modules/audio_device/android/opensles_player.cc
@@ -44,7 +44,6 @@
       audio_device_buffer_(nullptr),
       initialized_(false),
       playing_(false),
-      bytes_per_buffer_(0),
       buffer_index_(0),
       engine_(nullptr),
       player_(nullptr),
@@ -94,11 +93,13 @@
   RTC_DCHECK(thread_checker_.CalledOnValidThread());
   RTC_DCHECK(!initialized_);
   RTC_DCHECK(!playing_);
-  ObtainEngineInterface();
+  if (!ObtainEngineInterface()) {
+    ALOGE("Failed to obtain SL Engine interface");
+    return -1;
+  }
   CreateMix();
   initialized_ = true;
   buffer_index_ = 0;
-  last_play_time_ = rtc::Time();
   return 0;
 }
 
@@ -107,6 +108,9 @@
   RTC_DCHECK(thread_checker_.CalledOnValidThread());
   RTC_DCHECK(initialized_);
   RTC_DCHECK(!playing_);
+  if (fine_audio_buffer_) {
+    fine_audio_buffer_->ResetPlayout();
+  }
   // The number of lower latency audio players is limited, hence we create the
   // audio player in Start() and destroy it in Stop().
   CreateAudioPlayer();
@@ -114,6 +118,7 @@
   // starts when mode is later changed to SL_PLAYSTATE_PLAYING.
   // TODO(henrika): we can save some delay by only making one call to
   // EnqueuePlayoutData. Most likely not worth the risk of adding a glitch.
+  last_play_time_ = rtc::Time();
   for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
     EnqueuePlayoutData();
   }
@@ -187,77 +192,29 @@
   AllocateDataBuffers();
 }
 
-SLDataFormat_PCM OpenSLESPlayer::CreatePCMConfiguration(
-    size_t channels,
-    int sample_rate,
-    size_t bits_per_sample) {
-  ALOGD("CreatePCMConfiguration");
-  RTC_CHECK_EQ(bits_per_sample, SL_PCMSAMPLEFORMAT_FIXED_16);
-  SLDataFormat_PCM format;
-  format.formatType = SL_DATAFORMAT_PCM;
-  format.numChannels = static_cast<SLuint32>(channels);
-  // Note that, the unit of sample rate is actually in milliHertz and not Hertz.
-  switch (sample_rate) {
-    case 8000:
-      format.samplesPerSec = SL_SAMPLINGRATE_8;
-      break;
-    case 16000:
-      format.samplesPerSec = SL_SAMPLINGRATE_16;
-      break;
-    case 22050:
-      format.samplesPerSec = SL_SAMPLINGRATE_22_05;
-      break;
-    case 32000:
-      format.samplesPerSec = SL_SAMPLINGRATE_32;
-      break;
-    case 44100:
-      format.samplesPerSec = SL_SAMPLINGRATE_44_1;
-      break;
-    case 48000:
-      format.samplesPerSec = SL_SAMPLINGRATE_48;
-      break;
-    default:
-      RTC_CHECK(false) << "Unsupported sample rate: " << sample_rate;
-  }
-  format.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16;
-  format.containerSize = SL_PCMSAMPLEFORMAT_FIXED_16;
-  format.endianness = SL_BYTEORDER_LITTLEENDIAN;
-  if (format.numChannels == 1)
-    format.channelMask = SL_SPEAKER_FRONT_CENTER;
-  else if (format.numChannels == 2)
-    format.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
-  else
-    RTC_CHECK(false) << "Unsupported number of channels: "
-                     << format.numChannels;
-  return format;
-}
-
 void OpenSLESPlayer::AllocateDataBuffers() {
   ALOGD("AllocateDataBuffers");
   RTC_DCHECK(thread_checker_.CalledOnValidThread());
   RTC_DCHECK(!simple_buffer_queue_);
   RTC_CHECK(audio_device_buffer_);
-  // Don't use the lowest possible size as native buffer size. Instead,
-  // use 10ms to better match the frame size that WebRTC uses. It will result
-  // in a reduced risk for audio glitches and also in a more "clean" sequence
-  // of callbacks from the OpenSL ES thread in to WebRTC when asking for audio
-  // to render.
-  ALOGD("lowest possible buffer size: %" PRIuS,
-      audio_parameters_.GetBytesPerBuffer());
-  bytes_per_buffer_ = audio_parameters_.GetBytesPerFrame() *
-      audio_parameters_.frames_per_10ms_buffer();
-  RTC_DCHECK_GE(bytes_per_buffer_, audio_parameters_.GetBytesPerBuffer());
-  ALOGD("native buffer size: %" PRIuS, bytes_per_buffer_);
   // Create a modified audio buffer class which allows us to ask for any number
   // of samples (and not only multiple of 10ms) to match the native OpenSL ES
-  // buffer size.
-  fine_buffer_.reset(new FineAudioBuffer(audio_device_buffer_,
-                                         bytes_per_buffer_,
-                                         audio_parameters_.sample_rate()));
+  // buffer size. The native buffer size corresponds to the
+  // PROPERTY_OUTPUT_FRAMES_PER_BUFFER property which is the number of audio
+  // frames that the HAL (Hardware Abstraction Layer) buffer can hold. It is
+  // recommended to construct audio buffers so that they contain an exact
+  // multiple of this number. If so, callbacks will occur at regular intervals,
+  // which reduces jitter.
+  ALOGD("native buffer size: %" PRIuS, audio_parameters_.GetBytesPerBuffer());
+  ALOGD("native buffer size in ms: %.2f",
+        audio_parameters_.GetBufferSizeInMilliseconds());
+  fine_audio_buffer_.reset(new FineAudioBuffer(
+      audio_device_buffer_, audio_parameters_.GetBytesPerBuffer(),
+      audio_parameters_.sample_rate()));
   // Each buffer must be of this size to avoid unnecessary memcpy while caching
   // data between successive callbacks.
   const size_t required_buffer_size =
-      fine_buffer_->RequiredPlayoutBufferSizeBytes();
+      fine_audio_buffer_->RequiredPlayoutBufferSizeBytes();
   ALOGD("required buffer size: %" PRIuS, required_buffer_size);
   for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
     audio_buffers_[i].reset(new SLint8[required_buffer_size]);
@@ -267,7 +224,8 @@
 bool OpenSLESPlayer::ObtainEngineInterface() {
   ALOGD("ObtainEngineInterface");
   RTC_DCHECK(thread_checker_.CalledOnValidThread());
-  RTC_DCHECK(!engine_);
+  if (engine_)
+    return true;
   // Get access to (or create if not already existing) the global OpenSL Engine
   // object.
   SLObjectItf engine_object = audio_manager_->GetOpenSLEngine();
@@ -395,6 +353,8 @@
   RTC_DCHECK(thread_checker_.CalledOnValidThread());
   if (!player_object_.Get())
     return;
+  (*simple_buffer_queue_)
+      ->RegisterCallback(simple_buffer_queue_, nullptr, nullptr);
   player_object_.Reset();
   player_ = nullptr;
   simple_buffer_queue_ = nullptr;
@@ -422,10 +382,10 @@
 void OpenSLESPlayer::EnqueuePlayoutData() {
   // Check delta time between two successive callbacks and provide a warning
   // if it becomes very large.
-  // TODO(henrika): using 100ms as upper limit but this value is rather random.
+  // TODO(henrika): using 150ms as upper limit but this value is rather random.
   const uint32_t current_time = rtc::Time();
   const uint32_t diff = current_time - last_play_time_;
-  if (diff > 100) {
+  if (diff > 150) {
     ALOGW("Bad OpenSL ES playout timing, dT=%u [ms]", diff);
   }
   last_play_time_ = current_time;
@@ -433,11 +393,11 @@
   // to adjust for differences in buffer size between WebRTC (10ms) and native
   // OpenSL ES.
   SLint8* audio_ptr = audio_buffers_[buffer_index_].get();
-  fine_buffer_->GetPlayoutData(audio_ptr);
+  fine_audio_buffer_->GetPlayoutData(audio_ptr);
   // Enqueue the decoded audio buffer for playback.
-  SLresult err =
-      (*simple_buffer_queue_)
-          ->Enqueue(simple_buffer_queue_, audio_ptr, bytes_per_buffer_);
+  SLresult err = (*simple_buffer_queue_)
+                     ->Enqueue(simple_buffer_queue_, audio_ptr,
+                               audio_parameters_.GetBytesPerBuffer());
   if (SL_RESULT_SUCCESS != err) {
     ALOGE("Enqueue failed: %d", err);
   }
diff --git a/webrtc/modules/audio_device/android/opensles_player.h b/webrtc/modules/audio_device/android/opensles_player.h
index e3978a1..1b2b1d7 100644
--- a/webrtc/modules/audio_device/android/opensles_player.h
+++ b/webrtc/modules/audio_device/android/opensles_player.h
@@ -11,8 +11,6 @@
 #ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_PLAYER_H_
 #define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_PLAYER_H_
 
-#include <memory>
-
 #include <SLES/OpenSLES.h>
 #include <SLES/OpenSLES_Android.h>
 #include <SLES/OpenSLES_AndroidConfiguration.h>
@@ -49,11 +47,13 @@
 // the output latency may be higher.
 class OpenSLESPlayer {
  public:
-  // The lower output latency path is used only if the application requests a
-  // buffer count of 2 or more, and a buffer size and sample rate that are
-  // compatible with the device's native output configuration provided via the
-  // audio manager at construction.
-  static const int kNumOfOpenSLESBuffers = 4;
+  // Beginning with API level 17 (Android 4.2), a buffer count of 2 or more is
+  // required for lower latency. Beginning with API level 18 (Android 4.3), a
+  // buffer count of 1 is sufficient for lower latency. In addition, the buffer
+  // size and sample rate must be compatible with the device's native output
+  // configuration provided via the audio manager at construction.
+  // TODO(henrika): perhaps set this value dynamically based on OS version.
+  static const int kNumOfOpenSLESBuffers = 2;
 
   explicit OpenSLESPlayer(AudioManager* audio_manager);
   ~OpenSLESPlayer();
@@ -88,11 +88,6 @@
   // internal audio thread while output streaming is active.
   void EnqueuePlayoutData();
 
-  // Configures the SL_DATAFORMAT_PCM structure.
-  SLDataFormat_PCM CreatePCMConfiguration(size_t channels,
-                                          int sample_rate,
-                                          size_t bits_per_sample);
-
   // Allocate memory for audio buffers which will be used to render audio
   // via the SLAndroidSimpleBufferQueueItf interface.
   void AllocateDataBuffers();
@@ -144,11 +139,6 @@
   // 32-bit float representation is needed.
   SLDataFormat_PCM pcm_format_;
 
-  // Number of bytes per audio buffer in each |audio_buffers_[i]|.
-  // Typical sizes are 480 or 512 bytes corresponding to native output buffer
-  // sizes of 240 or 256 audio frames respectively.
-  size_t bytes_per_buffer_;
-
   // Queue of audio buffers to be used by the player object for rendering
   // audio. They will be used in a Round-robin way and the size of each buffer
   // is given by FineAudioBuffer::RequiredBufferSizeBytes().
@@ -159,12 +149,12 @@
   // a finer or coarser granularity. I.e. interacting with this class instead
   // of directly with the AudioDeviceBuffer one can ask for any number of
   // audio data samples.
-  // Example: native buffer size is 240 audio frames at 48kHz sample rate.
-  // WebRTC will provide 480 audio frames per 10ms but OpenSL ES asks for 240
-  // in each callback (one every 5ms). This class can then ask for 240 and the
-  // FineAudioBuffer will ask WebRTC for new data only every second callback
-  // and also cach non-utilized audio.
-  std::unique_ptr<FineAudioBuffer> fine_buffer_;
+  // Example: native buffer size can be 192 audio frames at 48kHz sample rate.
+  // WebRTC will provide 480 audio frames per 10ms but OpenSL ES asks for 192
+  // in each callback (one every 4th ms). This class can then ask for 192 and
+  // the FineAudioBuffer will ask WebRTC for new data approximately only every
+  // second callback and also cache non-utilized audio.
+  std::unique_ptr<FineAudioBuffer> fine_audio_buffer_;
 
   // Keeps track of active audio buffer 'n' in the audio_buffers_[n] queue.
   // Example (kNumOfOpenSLESBuffers = 2): counts 0, 1, 0, 1, ...
diff --git a/webrtc/modules/audio_device/android/opensles_recorder.cc b/webrtc/modules/audio_device/android/opensles_recorder.cc
new file mode 100644
index 0000000..5178d2c
--- /dev/null
+++ b/webrtc/modules/audio_device/android/opensles_recorder.cc
@@ -0,0 +1,426 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_device/android/opensles_recorder.h"
+
+#include <android/log.h>
+
+#include "webrtc/base/arraysize.h"
+#include "webrtc/base/checks.h"
+#include "webrtc/base/format_macros.h"
+#include "webrtc/base/timeutils.h"
+#include "webrtc/modules/audio_device/android/audio_common.h"
+#include "webrtc/modules/audio_device/android/audio_manager.h"
+#include "webrtc/modules/audio_device/fine_audio_buffer.h"
+
+#define TAG "OpenSLESRecorder"
+#define ALOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__)
+#define ALOGD(...) __android_log_print(ANDROID_LOG_DEBUG, TAG, __VA_ARGS__)
+#define ALOGE(...) __android_log_print(ANDROID_LOG_ERROR, TAG, __VA_ARGS__)
+#define ALOGW(...) __android_log_print(ANDROID_LOG_WARN, TAG, __VA_ARGS__)
+#define ALOGI(...) __android_log_print(ANDROID_LOG_INFO, TAG, __VA_ARGS__)
+
+#define LOG_ON_ERROR(op)                                    \
+  [](SLresult err) {                                        \
+    if (err != SL_RESULT_SUCCESS) {                         \
+      ALOGE("%s:%d %s failed: %s", __FILE__, __LINE__, #op, \
+            GetSLErrorString(err));                         \
+      return true;                                          \
+    }                                                       \
+    return false;                                           \
+  }(op)
+
+namespace webrtc {
+
+OpenSLESRecorder::OpenSLESRecorder(AudioManager* audio_manager)
+    : audio_manager_(audio_manager),
+      audio_parameters_(audio_manager->GetRecordAudioParameters()),
+      audio_device_buffer_(nullptr),
+      initialized_(false),
+      recording_(false),
+      engine_(nullptr),
+      recorder_(nullptr),
+      simple_buffer_queue_(nullptr),
+      buffer_index_(0),
+      last_rec_time_(0) {
+  ALOGD("ctor%s", GetThreadInfo().c_str());
+  // Detach from this thread since we want to use the checker to verify calls
+  // from the internal  audio thread.
+  thread_checker_opensles_.DetachFromThread();
+  // Use native audio output parameters provided by the audio manager and
+  // define the PCM format structure.
+  pcm_format_ = CreatePCMConfiguration(audio_parameters_.channels(),
+                                       audio_parameters_.sample_rate(),
+                                       audio_parameters_.bits_per_sample());
+}
+
+OpenSLESRecorder::~OpenSLESRecorder() {
+  ALOGD("dtor%s", GetThreadInfo().c_str());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  Terminate();
+  DestroyAudioRecorder();
+  engine_ = nullptr;
+  RTC_DCHECK(!engine_);
+  RTC_DCHECK(!recorder_);
+  RTC_DCHECK(!simple_buffer_queue_);
+}
+
+int OpenSLESRecorder::Init() {
+  ALOGD("Init%s", GetThreadInfo().c_str());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  return 0;
+}
+
+int OpenSLESRecorder::Terminate() {
+  ALOGD("Terminate%s", GetThreadInfo().c_str());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  StopRecording();
+  return 0;
+}
+
+int OpenSLESRecorder::InitRecording() {
+  ALOGD("InitRecording%s", GetThreadInfo().c_str());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(!initialized_);
+  RTC_DCHECK(!recording_);
+  if (!ObtainEngineInterface()) {
+    ALOGE("Failed to obtain SL Engine interface");
+    return -1;
+  }
+  CreateAudioRecorder();
+  initialized_ = true;
+  buffer_index_ = 0;
+  return 0;
+}
+
+int OpenSLESRecorder::StartRecording() {
+  ALOGD("StartRecording%s", GetThreadInfo().c_str());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(initialized_);
+  RTC_DCHECK(!recording_);
+  if (fine_audio_buffer_) {
+    fine_audio_buffer_->ResetRecord();
+  }
+  // Add buffers to the queue before changing state to SL_RECORDSTATE_RECORDING
+  // to ensure that recording starts as soon as the state is modified. On some
+  // devices, SLAndroidSimpleBufferQueue::Clear() used in Stop() does not flush
+  // the buffers as intended and we therefore check the number of buffers
+  // already queued first. Enqueue() can return SL_RESULT_BUFFER_INSUFFICIENT
+  // otherwise.
+  int num_buffers_in_queue = GetBufferCount();
+  for (int i = 0; i < kNumOfOpenSLESBuffers - num_buffers_in_queue; ++i) {
+    if (!EnqueueAudioBuffer()) {
+      recording_ = false;
+      return -1;
+    }
+  }
+  num_buffers_in_queue = GetBufferCount();
+  RTC_DCHECK_EQ(num_buffers_in_queue, kNumOfOpenSLESBuffers);
+  LogBufferState();
+  // Start audio recording by changing the state to SL_RECORDSTATE_RECORDING.
+  // Given that buffers are already enqueued, recording should start at once.
+  // The macro returns -1 if recording fails to start.
+  last_rec_time_ = rtc::Time();
+  if (LOG_ON_ERROR(
+          (*recorder_)->SetRecordState(recorder_, SL_RECORDSTATE_RECORDING))) {
+    return -1;
+  }
+  recording_ = (GetRecordState() == SL_RECORDSTATE_RECORDING);
+  RTC_DCHECK(recording_);
+  return 0;
+}
+
+int OpenSLESRecorder::StopRecording() {
+  ALOGD("StopRecording%s", GetThreadInfo().c_str());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  if (!initialized_ || !recording_) {
+    return 0;
+  }
+  // Stop recording by setting the record state to SL_RECORDSTATE_STOPPED.
+  if (LOG_ON_ERROR(
+          (*recorder_)->SetRecordState(recorder_, SL_RECORDSTATE_STOPPED))) {
+    return -1;
+  }
+  // Clear the buffer queue to get rid of old data when resuming recording.
+  if (LOG_ON_ERROR((*simple_buffer_queue_)->Clear(simple_buffer_queue_))) {
+    return -1;
+  }
+  thread_checker_opensles_.DetachFromThread();
+  initialized_ = false;
+  recording_ = false;
+  return 0;
+}
+
+void OpenSLESRecorder::AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) {
+  ALOGD("AttachAudioBuffer");
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_CHECK(audio_buffer);
+  audio_device_buffer_ = audio_buffer;
+  // Ensure that the audio device buffer is informed about the native sample
+  // rate used on the recording side.
+  const int sample_rate_hz = audio_parameters_.sample_rate();
+  ALOGD("SetRecordingSampleRate(%d)", sample_rate_hz);
+  audio_device_buffer_->SetRecordingSampleRate(sample_rate_hz);
+  // Ensure that the audio device buffer is informed about the number of
+  // channels preferred by the OS on the recording side.
+  const size_t channels = audio_parameters_.channels();
+  ALOGD("SetRecordingChannels(%" PRIuS ")", channels);
+  audio_device_buffer_->SetRecordingChannels(channels);
+  // Allocated memory for internal data buffers given existing audio parameters.
+  AllocateDataBuffers();
+}
+
+int OpenSLESRecorder::EnableBuiltInAEC(bool enable) {
+  ALOGD("EnableBuiltInAEC(%d)", enable);
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  ALOGE("Not implemented");
+  return 0;
+}
+
+int OpenSLESRecorder::EnableBuiltInAGC(bool enable) {
+  ALOGD("EnableBuiltInAGC(%d)", enable);
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  ALOGE("Not implemented");
+  return 0;
+}
+
+int OpenSLESRecorder::EnableBuiltInNS(bool enable) {
+  ALOGD("EnableBuiltInNS(%d)", enable);
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  ALOGE("Not implemented");
+  return 0;
+}
+
+bool OpenSLESRecorder::ObtainEngineInterface() {
+  ALOGD("ObtainEngineInterface");
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  if (engine_)
+    return true;
+  // Get access to (or create if not already existing) the global OpenSL Engine
+  // object.
+  SLObjectItf engine_object = audio_manager_->GetOpenSLEngine();
+  if (engine_object == nullptr) {
+    ALOGE("Failed to access the global OpenSL engine");
+    return false;
+  }
+  // Get the SL Engine Interface which is implicit.
+  if (LOG_ON_ERROR(
+          (*engine_object)
+              ->GetInterface(engine_object, SL_IID_ENGINE, &engine_))) {
+    return false;
+  }
+  return true;
+}
+
+bool OpenSLESRecorder::CreateAudioRecorder() {
+  ALOGD("CreateAudioRecorder");
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  if (recorder_object_.Get())
+    return true;
+  RTC_DCHECK(!recorder_);
+  RTC_DCHECK(!simple_buffer_queue_);
+
+  // Audio source configuration.
+  SLDataLocator_IODevice mic_locator = {SL_DATALOCATOR_IODEVICE,
+                                        SL_IODEVICE_AUDIOINPUT,
+                                        SL_DEFAULTDEVICEID_AUDIOINPUT, NULL};
+  SLDataSource audio_source = {&mic_locator, NULL};
+
+  // Audio sink configuration.
+  SLDataLocator_AndroidSimpleBufferQueue buffer_queue = {
+      SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE,
+      static_cast<SLuint32>(kNumOfOpenSLESBuffers)};
+  SLDataSink audio_sink = {&buffer_queue, &pcm_format_};
+
+  // Create the audio recorder object (requires the RECORD_AUDIO permission).
+  // Do not realize the recorder yet. Set the configuration first.
+  const SLInterfaceID interface_id[] = {SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
+                                        SL_IID_ANDROIDCONFIGURATION};
+  const SLboolean interface_required[] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE};
+  if (LOG_ON_ERROR((*engine_)->CreateAudioRecorder(
+          engine_, recorder_object_.Receive(), &audio_source, &audio_sink,
+          arraysize(interface_id), interface_id, interface_required))) {
+    return false;
+  }
+
+  // Configure the audio recorder (before it is realized).
+  SLAndroidConfigurationItf recorder_config;
+  if (LOG_ON_ERROR((recorder_object_->GetInterface(recorder_object_.Get(),
+                                                   SL_IID_ANDROIDCONFIGURATION,
+                                                   &recorder_config)))) {
+    return false;
+  }
+
+  // Uses the default microphone tuned for audio communication.
+  // Note that, SL_ANDROID_RECORDING_PRESET_VOICE_RECOGNITION leads to a fast
+  // track but also excludes usage of required effects like AEC, AGC and NS.
+  // SL_ANDROID_RECORDING_PRESET_VOICE_COMMUNICATION
+  SLint32 stream_type = SL_ANDROID_RECORDING_PRESET_VOICE_COMMUNICATION;
+  if (LOG_ON_ERROR(((*recorder_config)
+                        ->SetConfiguration(recorder_config,
+                                           SL_ANDROID_KEY_RECORDING_PRESET,
+                                           &stream_type, sizeof(SLint32))))) {
+    return false;
+  }
+
+  // The audio recorder can now be realized (in synchronous mode).
+  if (LOG_ON_ERROR((recorder_object_->Realize(recorder_object_.Get(),
+                                              SL_BOOLEAN_FALSE)))) {
+    return false;
+  }
+
+  // Get the implicit recorder interface (SL_IID_RECORD).
+  if (LOG_ON_ERROR((recorder_object_->GetInterface(
+          recorder_object_.Get(), SL_IID_RECORD, &recorder_)))) {
+    return false;
+  }
+
+  // Get the simple buffer queue interface (SL_IID_ANDROIDSIMPLEBUFFERQUEUE).
+  // It was explicitly requested.
+  if (LOG_ON_ERROR((recorder_object_->GetInterface(
+          recorder_object_.Get(), SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
+          &simple_buffer_queue_)))) {
+    return false;
+  }
+
+  // Register the input callback for the simple buffer queue.
+  // This callback will be called when receiving new data from the device.
+  if (LOG_ON_ERROR(((*simple_buffer_queue_)
+                        ->RegisterCallback(simple_buffer_queue_,
+                                           SimpleBufferQueueCallback, this)))) {
+    return false;
+  }
+  return true;
+}
+
+void OpenSLESRecorder::DestroyAudioRecorder() {
+  ALOGD("DestroyAudioRecorder");
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  if (!recorder_object_.Get())
+    return;
+  (*simple_buffer_queue_)
+      ->RegisterCallback(simple_buffer_queue_, nullptr, nullptr);
+  recorder_object_.Reset();
+  recorder_ = nullptr;
+  simple_buffer_queue_ = nullptr;
+}
+
+void OpenSLESRecorder::SimpleBufferQueueCallback(
+    SLAndroidSimpleBufferQueueItf buffer_queue,
+    void* context) {
+  OpenSLESRecorder* stream = static_cast<OpenSLESRecorder*>(context);
+  stream->ReadBufferQueue();
+}
+
+void OpenSLESRecorder::AllocateDataBuffers() {
+  ALOGD("AllocateDataBuffers");
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(!simple_buffer_queue_);
+  RTC_CHECK(audio_device_buffer_);
+  // Create a modified audio buffer class which allows us to deliver any number
+  // of samples (and not only multiple of 10ms) to match the native audio unit
+  // buffer size.
+  ALOGD("frames per native buffer: %" PRIuS,
+        audio_parameters_.frames_per_buffer());
+  ALOGD("frames per 10ms buffer: %" PRIuS,
+        audio_parameters_.frames_per_10ms_buffer());
+  ALOGD("bytes per native buffer: %" PRIuS,
+        audio_parameters_.GetBytesPerBuffer());
+  ALOGD("native sample rate: %d", audio_parameters_.sample_rate());
+  RTC_DCHECK(audio_device_buffer_);
+  fine_audio_buffer_.reset(new FineAudioBuffer(
+      audio_device_buffer_, audio_parameters_.GetBytesPerBuffer(),
+      audio_parameters_.sample_rate()));
+  // Allocate queue of audio buffers that stores recorded audio samples.
+  const int data_size_bytes = audio_parameters_.GetBytesPerBuffer();
+  audio_buffers_.reset(new std::unique_ptr<SLint8[]>[kNumOfOpenSLESBuffers]);
+  for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
+    audio_buffers_[i].reset(new SLint8[data_size_bytes]);
+  }
+}
+
+void OpenSLESRecorder::ReadBufferQueue() {
+  RTC_DCHECK(thread_checker_opensles_.CalledOnValidThread());
+  SLuint32 state = GetRecordState();
+  if (state != SL_RECORDSTATE_RECORDING) {
+    ALOGW("Buffer callback in non-recording state!");
+    return;
+  }
+  // Check delta time between two successive callbacks and provide a warning
+  // if it becomes very large.
+  // TODO(henrika): using 150ms as upper limit but this value is rather random.
+  const uint32_t current_time = rtc::Time();
+  const uint32_t diff = current_time - last_rec_time_;
+  if (diff > 150) {
+    ALOGW("Bad OpenSL ES record timing, dT=%u [ms]", diff);
+  }
+  last_rec_time_ = current_time;
+  // Send recorded audio data to the WebRTC sink.
+  // TODO(henrika): fix delay estimates. It is OK to use fixed values for now
+  // since there is no support to turn off built-in EC in combination with
+  // OpenSL ES anyhow. Hence, as is, the WebRTC based AEC (which would use
+  // these estimates) will never be active.
+  const size_t size_in_bytes =
+      static_cast<size_t>(audio_parameters_.GetBytesPerBuffer());
+  const int8_t* data =
+      static_cast<const int8_t*>(audio_buffers_[buffer_index_].get());
+  fine_audio_buffer_->DeliverRecordedData(data, size_in_bytes, 25, 25);
+  // Enqueue the utilized audio buffer and use if for recording again.
+  EnqueueAudioBuffer();
+}
+
+bool OpenSLESRecorder::EnqueueAudioBuffer() {
+  SLresult err =
+      (*simple_buffer_queue_)
+          ->Enqueue(simple_buffer_queue_, audio_buffers_[buffer_index_].get(),
+                    audio_parameters_.GetBytesPerBuffer());
+  if (SL_RESULT_SUCCESS != err) {
+    ALOGE("Enqueue failed: %s", GetSLErrorString(err));
+    return false;
+  }
+  buffer_index_ = (buffer_index_ + 1) % kNumOfOpenSLESBuffers;
+  return true;
+}
+
+SLuint32 OpenSLESRecorder::GetRecordState() const {
+  RTC_DCHECK(recorder_);
+  SLuint32 state;
+  SLresult err = (*recorder_)->GetRecordState(recorder_, &state);
+  if (SL_RESULT_SUCCESS != err) {
+    ALOGE("GetRecordState failed: %s", GetSLErrorString(err));
+  }
+  return state;
+}
+
+SLAndroidSimpleBufferQueueState OpenSLESRecorder::GetBufferQueueState() const {
+  RTC_DCHECK(simple_buffer_queue_);
+  // state.count: Number of buffers currently in the queue.
+  // state.index: Index of the currently filling buffer. This is a linear index
+  // that keeps a cumulative count of the number of buffers recorded.
+  SLAndroidSimpleBufferQueueState state;
+  SLresult err =
+      (*simple_buffer_queue_)->GetState(simple_buffer_queue_, &state);
+  if (SL_RESULT_SUCCESS != err) {
+    ALOGE("GetState failed: %s", GetSLErrorString(err));
+  }
+  return state;
+}
+
+void OpenSLESRecorder::LogBufferState() const {
+  SLAndroidSimpleBufferQueueState state = GetBufferQueueState();
+  ALOGD("state.count:%d state.index:%d", state.count, state.index);
+}
+
+SLuint32 OpenSLESRecorder::GetBufferCount() {
+  SLAndroidSimpleBufferQueueState state = GetBufferQueueState();
+  return state.count;
+}
+
+}  // namespace webrtc
diff --git a/webrtc/modules/audio_device/android/opensles_recorder.h b/webrtc/modules/audio_device/android/opensles_recorder.h
new file mode 100644
index 0000000..952371a
--- /dev/null
+++ b/webrtc/modules/audio_device/android/opensles_recorder.h
@@ -0,0 +1,193 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_RECORDER_H_
+#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_RECORDER_H_
+
+#include <SLES/OpenSLES.h>
+#include <SLES/OpenSLES_Android.h>
+#include <SLES/OpenSLES_AndroidConfiguration.h>
+
+#include <memory>
+
+#include "webrtc/base/thread_checker.h"
+#include "webrtc/modules/audio_device/android/audio_common.h"
+#include "webrtc/modules/audio_device/android/audio_manager.h"
+#include "webrtc/modules/audio_device/android/opensles_common.h"
+#include "webrtc/modules/audio_device/include/audio_device_defines.h"
+#include "webrtc/modules/audio_device/audio_device_generic.h"
+#include "webrtc/modules/utility/include/helpers_android.h"
+
+namespace webrtc {
+
+class FineAudioBuffer;
+
+// Implements 16-bit mono PCM audio input support for Android using the
+// C based OpenSL ES API. No calls from C/C++ to Java using JNI is done.
+//
+// An instance must be created and destroyed on one and the same thread.
+// All public methods must also be called on the same thread. A thread checker
+// will RTC_DCHECK if any method is called on an invalid thread. Recorded audio
+// buffers are provided on a dedicated internal thread managed by the OpenSL
+// ES layer.
+//
+// The existing design forces the user to call InitRecording() after
+// StopRecording() to be able to call StartRecording() again. This is inline
+// with how the Java-based implementation works.
+//
+// As of API level 21, lower latency audio input is supported on select devices.
+// To take advantage of this feature, first confirm that lower latency output is
+// available. The capability for lower latency output is a prerequisite for the
+// lower latency input feature. Then, create an AudioRecorder with the same
+// sample rate and buffer size as would be used for output. OpenSL ES interfaces
+// for input effects preclude the lower latency path.
+// See https://developer.android.com/ndk/guides/audio/opensl-prog-notes.html
+// for more details.
+class OpenSLESRecorder {
+ public:
+  // Beginning with API level 17 (Android 4.2), a buffer count of 2 or more is
+  // required for lower latency. Beginning with API level 18 (Android 4.3), a
+  // buffer count of 1 is sufficient for lower latency. In addition, the buffer
+  // size and sample rate must be compatible with the device's native input
+  // configuration provided via the audio manager at construction.
+  // TODO(henrika): perhaps set this value dynamically based on OS version.
+  static const int kNumOfOpenSLESBuffers = 2;
+
+  explicit OpenSLESRecorder(AudioManager* audio_manager);
+  ~OpenSLESRecorder();
+
+  int Init();
+  int Terminate();
+
+  int InitRecording();
+  bool RecordingIsInitialized() const { return initialized_; }
+
+  int StartRecording();
+  int StopRecording();
+  bool Recording() const { return recording_; }
+
+  void AttachAudioBuffer(AudioDeviceBuffer* audio_buffer);
+
+  // TODO(henrika): add support using OpenSL ES APIs when available.
+  int EnableBuiltInAEC(bool enable);
+  int EnableBuiltInAGC(bool enable);
+  int EnableBuiltInNS(bool enable);
+
+ private:
+  // Obtaines the SL Engine Interface from the existing global Engine object.
+  // The interface exposes creation methods of all the OpenSL ES object types.
+  // This method defines the |engine_| member variable.
+  bool ObtainEngineInterface();
+
+  // Creates/destroys the audio recorder and the simple-buffer queue object.
+  bool CreateAudioRecorder();
+  void DestroyAudioRecorder();
+
+  // Allocate memory for audio buffers which will be used to capture audio
+  // via the SLAndroidSimpleBufferQueueItf interface.
+  void AllocateDataBuffers();
+
+  // These callback methods are called when data has been written to the input
+  // buffer queue. They are both called from an internal "OpenSL ES thread"
+  // which is not attached to the Dalvik VM.
+  static void SimpleBufferQueueCallback(SLAndroidSimpleBufferQueueItf caller,
+                                        void* context);
+  void ReadBufferQueue();
+
+  // Wraps calls to SLAndroidSimpleBufferQueueState::Enqueue() and it can be
+  // called both on the main thread (but before recording has started) and from
+  // the internal audio thread while input streaming is active. It uses
+  // |simple_buffer_queue_| but no lock is needed since the initial calls from
+  // the main thread and the native callback thread are mutually exclusive.
+  bool EnqueueAudioBuffer();
+
+  // Returns the current recorder state.
+  SLuint32 GetRecordState() const;
+
+  // Returns the current buffer queue state.
+  SLAndroidSimpleBufferQueueState GetBufferQueueState() const;
+
+  // Number of buffers currently in the queue.
+  SLuint32 GetBufferCount();
+
+  // Prints a log message of the current queue state. Can be used for debugging
+  // purposes.
+  void LogBufferState() const;
+
+  // Ensures that methods are called from the same thread as this object is
+  // created on.
+  rtc::ThreadChecker thread_checker_;
+
+  // Stores thread ID in first call to SimpleBufferQueueCallback() from internal
+  // non-application thread which is not attached to the Dalvik JVM.
+  // Detached during construction of this object.
+  rtc::ThreadChecker thread_checker_opensles_;
+
+  // Raw pointer to the audio manager injected at construction. Used to cache
+  // audio parameters and to access the global SL engine object needed by the
+  // ObtainEngineInterface() method. The audio manager outlives any instance of
+  // this class.
+  AudioManager* const audio_manager_;
+
+  // Contains audio parameters provided to this class at construction by the
+  // AudioManager.
+  const AudioParameters audio_parameters_;
+
+  // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
+  // AudioDeviceModuleImpl class and called by AudioDeviceModule::Create().
+  AudioDeviceBuffer* audio_device_buffer_;
+
+  // PCM-type format definition.
+  // TODO(henrika): add support for SLAndroidDataFormat_PCM_EX (android-21) if
+  // 32-bit float representation is needed.
+  SLDataFormat_PCM pcm_format_;
+
+  bool initialized_;
+  bool recording_;
+
+  // This interface exposes creation methods for all the OpenSL ES object types.
+  // It is the OpenSL ES API entry point.
+  SLEngineItf engine_;
+
+  // The audio recorder media object records audio to the destination specified
+  // by the data sink capturing it from the input specified by the data source.
+  webrtc::ScopedSLObjectItf recorder_object_;
+
+  // This interface is supported on the audio recorder object and it controls
+  // the state of the audio recorder.
+  SLRecordItf recorder_;
+
+  // The Android Simple Buffer Queue interface is supported on the audio
+  // recorder. For recording, an app should enqueue empty buffers. When a
+  // registered callback sends notification that the system has finished writing
+  // data to the buffer, the app can read the buffer.
+  SLAndroidSimpleBufferQueueItf simple_buffer_queue_;
+
+  // Consumes audio of native buffer size and feeds the WebRTC layer with 10ms
+  // chunks of audio.
+  std::unique_ptr<FineAudioBuffer> fine_audio_buffer_;
+
+  // Queue of audio buffers to be used by the recorder object for capturing
+  // audio. They will be used in a Round-robin way and the size of each buffer
+  // is given by AudioParameters::GetBytesPerBuffer(), i.e., it corresponds to
+  // the native OpenSL ES buffer size.
+  std::unique_ptr<std::unique_ptr<SLint8[]>[]> audio_buffers_;
+
+  // Keeps track of active audio buffer 'n' in the audio_buffers_[n] queue.
+  // Example (kNumOfOpenSLESBuffers = 2): counts 0, 1, 0, 1, ...
+  int buffer_index_;
+
+  // Last time the OpenSL ES layer delivered recorded audio data.
+  uint32_t last_rec_time_;
+};
+
+}  // namespace webrtc
+
+#endif  // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_RECORDER_H_
diff --git a/webrtc/modules/audio_device/audio_device.gypi b/webrtc/modules/audio_device/audio_device.gypi
index 61975d0..ab67e4c 100644
--- a/webrtc/modules/audio_device/audio_device.gypi
+++ b/webrtc/modules/audio_device/audio_device.gypi
@@ -111,6 +111,8 @@
                     'android/opensles_common.h',
                     'android/opensles_player.cc',
                     'android/opensles_player.h',
+                    'android/opensles_recorder.cc',
+                    'android/opensles_recorder.h',
                   ],
                   'link_settings': {
                     'libraries': [
diff --git a/webrtc/modules/audio_device/audio_device_buffer.cc b/webrtc/modules/audio_device/audio_device_buffer.cc
index b6c5df2..0c85fda 100644
--- a/webrtc/modules/audio_device/audio_device_buffer.cc
+++ b/webrtc/modules/audio_device/audio_device_buffer.cc
@@ -288,7 +288,6 @@
 }
 
 int32_t AudioDeviceBuffer::DeliverRecordedData() {
-  RTC_DCHECK(audio_transport_cb_);
   rtc::CritScope lock(&_critSectCb);
 
   if (!audio_transport_cb_) {
diff --git a/webrtc/modules/audio_device/audio_device_impl.cc b/webrtc/modules/audio_device/audio_device_impl.cc
index 850e9f3..cf69275 100644
--- a/webrtc/modules/audio_device/audio_device_impl.cc
+++ b/webrtc/modules/audio_device/audio_device_impl.cc
@@ -10,6 +10,7 @@
 
 #include "webrtc/base/checks.h"
 #include "webrtc/base/logging.h"
+#include "webrtc/base/checks.h"
 #include "webrtc/base/refcount.h"
 #include "webrtc/base/timeutils.h"
 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
@@ -33,6 +34,7 @@
 #include "webrtc/modules/audio_device/android/audio_record_jni.h"
 #include "webrtc/modules/audio_device/android/audio_track_jni.h"
 #include "webrtc/modules/audio_device/android/opensles_player.h"
+#include "webrtc/modules/audio_device/android/opensles_recorder.h"
 #elif defined(WEBRTC_LINUX)
 #if defined(LINUX_ALSA)
 #include "audio_device_alsa_linux.h"
@@ -238,13 +240,18 @@
   _audioManagerAndroid.reset(new AudioManager());
   // Select best possible combination of audio layers.
   if (audioLayer == kPlatformDefaultAudio) {
-    if (_audioManagerAndroid->IsLowLatencyPlayoutSupported()) {
-      // Always use OpenSL ES for output on devices that supports the
+    if (_audioManagerAndroid->IsLowLatencyPlayoutSupported() &&
+        _audioManagerAndroid->IsLowLatencyRecordSupported()) {
+      // Use OpenSL ES for both playout and recording.
+      audioLayer = kAndroidOpenSLESAudio;
+    } else if (_audioManagerAndroid->IsLowLatencyPlayoutSupported() &&
+               !_audioManagerAndroid->IsLowLatencyRecordSupported()) {
+      // Use OpenSL ES for output on devices that only supports the
       // low-latency output audio path.
       audioLayer = kAndroidJavaInputAndOpenSLESOutputAudio;
     } else {
-      // Use Java-based audio in both directions when low-latency output
-      // is not supported.
+      // Use Java-based audio in both directions when low-latency output is
+      // not supported.
       audioLayer = kAndroidJavaAudio;
     }
   }
@@ -253,6 +260,10 @@
     // Java audio for both input and output audio.
     ptrAudioDevice = new AudioDeviceTemplate<AudioRecordJni, AudioTrackJni>(
         audioLayer, audio_manager);
+  } else if (audioLayer == kAndroidOpenSLESAudio) {
+    // OpenSL ES based audio for both input and output audio.
+    ptrAudioDevice = new AudioDeviceTemplate<OpenSLESRecorder, OpenSLESPlayer>(
+        audioLayer, audio_manager);
   } else if (audioLayer == kAndroidJavaInputAndOpenSLESOutputAudio) {
     // Java audio for input and OpenSL ES for output audio (i.e. mixed APIs).
     // This combination provides low-latency output audio and at the same
@@ -261,7 +272,7 @@
         audioLayer, audio_manager);
   } else {
     // Invalid audio layer.
-    ptrAudioDevice = NULL;
+    ptrAudioDevice = nullptr;
   }
 // END #if defined(WEBRTC_ANDROID)
 
diff --git a/webrtc/modules/utility/include/jvm_android.h b/webrtc/modules/utility/include/jvm_android.h
index 574c977..51a0bd8 100644
--- a/webrtc/modules/utility/include/jvm_android.h
+++ b/webrtc/modules/utility/include/jvm_android.h
@@ -64,6 +64,7 @@
   jmethodID GetMethodId(const char* name, const char* signature);
   jmethodID GetStaticMethodId(const char* name, const char* signature);
   jobject CallStaticObjectMethod(jmethodID methodID, ...);
+  jint CallStaticIntMethod(jmethodID methodID, ...);
 
  protected:
   JNIEnv* const jni_;
diff --git a/webrtc/modules/utility/source/jvm_android.cc b/webrtc/modules/utility/source/jvm_android.cc
index d53d1b5..9d08688 100644
--- a/webrtc/modules/utility/source/jvm_android.cc
+++ b/webrtc/modules/utility/source/jvm_android.cc
@@ -40,8 +40,10 @@
 // stack.  Consequently, we only look up all classes once in native WebRTC.
 // http://developer.android.com/training/articles/perf-jni.html#faq_FindClass
 void LoadClasses(JNIEnv* jni) {
+  ALOGD("LoadClasses");
   for (auto& c : loaded_classes) {
     jclass localRef = FindClass(jni, c.name);
+    ALOGD("name: %s", c.name);
     CHECK_EXCEPTION(jni) << "Error during FindClass: " << c.name;
     RTC_CHECK(localRef) << c.name;
     jclass globalRef = reinterpret_cast<jclass>(jni->NewGlobalRef(localRef));
@@ -173,6 +175,14 @@
   return res;
 }
 
+jint JavaClass::CallStaticIntMethod(jmethodID methodID, ...) {
+  va_list args;
+  va_start(args, methodID);
+  jint res = jni_->CallStaticIntMethod(j_class_, methodID, args);
+  CHECK_EXCEPTION(jni_) << "Error during CallStaticIntMethod";
+  return res;
+}
+
 // JNIEnvironment implementation.
 JNIEnvironment::JNIEnvironment(JNIEnv* jni) : jni_(jni) {
   ALOGD("JNIEnvironment::ctor%s", GetThreadInfo().c_str());