Adds full-duplex unit test to AudioDeviceTest on Android
BUG=NONE
R=phoglund@webrtc.org
Review URL: https://webrtc-codereview.appspot.com/42709004
Cr-Original-Commit-Position: refs/heads/master@{#8795}
Cr-Mirrored-From: https://chromium.googlesource.com/external/webrtc
Cr-Mirrored-Commit: 80d9aeeda530aaf7e1851b381f1f7545a876d75e
diff --git a/modules/audio_device/android/audio_device_unittest.cc b/modules/audio_device/android/audio_device_unittest.cc
index cbdc104..7bb4320 100644
--- a/modules/audio_device/android/audio_device_unittest.cc
+++ b/modules/audio_device/android/audio_device_unittest.cc
@@ -8,12 +8,17 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include <list>
+#include <numeric>
+
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/criticalsection.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/audio_device/android/ensure_initialized.h"
#include "webrtc/modules/audio_device/audio_device_impl.h"
#include "webrtc/modules/audio_device/include/audio_device.h"
+#include "webrtc/system_wrappers/interface/clock.h"
#include "webrtc/system_wrappers/interface/event_wrapper.h"
#include "webrtc/system_wrappers/interface/scoped_refptr.h"
#include "webrtc/system_wrappers/interface/sleep.h"
@@ -30,12 +35,13 @@
using ::testing::Return;
using ::testing::TestWithParam;
-// #define ENABLE_PRINTF
-#ifdef ENABLE_PRINTF
-#define PRINT(...) printf(__VA_ARGS__);
+// #define ENABLE_DEBUG_PRINTF
+#ifdef ENABLE_DEBUG_PRINTF
+#define PRINTD(...) fprintf(stderr, __VA_ARGS__);
#else
-#define PRINT(...) ((void)0)
+#define PRINTD(...) ((void)0)
#endif
+#define PRINT(...) fprintf(stderr, __VA_ARGS__);
namespace webrtc {
@@ -55,12 +61,27 @@
// Average number of audio callbacks per second assuming 10ms packet size.
static const int kNumCallbacksPerSecond = 100;
// Play out a test file during this time (unit is in seconds).
-static const int kFilePlayTimeInSec = 2;
+static const int kFilePlayTimeInSec = 5;
// Fixed value for the recording delay using Java based audio backend.
// TODO(henrika): harmonize with OpenSL ES and look for possible improvements.
static const uint32_t kFixedRecordingDelay = 100;
static const int kBitsPerSample = 16;
static const int kBytesPerSample = kBitsPerSample / 8;
+// Run the full-duplex test during this time (unit is in seconds).
+// Note that first |kNumIgnoreFirstCallbacks| are ignored.
+static const int kFullDuplexTimeInSec = 10;
+// Wait for the callback sequence to stabilize by ignoring this amount of the
+// initial callbacks (avoids initial FIFO access).
+// Only used in the RunPlayoutAndRecordingInFullDuplex test.
+static const int kNumIgnoreFirstCallbacks = 50;
+// Sets the number of impulses per second in the latency test.
+static const int kImpulseFrequencyInHz = 1;
+// Length of round-trip latency measurements. Number of transmitted impulses
+// is kImpulseFrequencyInHz * kMeasureLatencyTimeInSec - 1.
+static const int kMeasureLatencyTimeInSec = 11;
+// Utilized in round-trip latency measurements to avoid capturing noise samples.
+static const int kImpulseThreshold = 500;
+static const char kTag[] = "[..........] ";
enum TransportType {
kPlayout = 0x1,
@@ -81,26 +102,29 @@
int recording_channels;
};
-class MockAudioTransport : public AudioTransport {
+// Interface for processing the audio stream. Real implementations can e.g.
+// run audio in loopback, read audio from a file or perform latency
+// measurements.
+class AudioStreamInterface {
public:
- explicit MockAudioTransport(int type)
- : num_callbacks_(0),
- type_(type),
- play_count_(0),
- rec_count_(0),
- file_size_in_bytes_(0),
- sample_rate_(0),
- file_pos_(0) {}
+ virtual void Write(const void* source, int num_frames) = 0;
+ virtual void Read(void* destination, int num_frames) = 0;
+ protected:
+ virtual ~AudioStreamInterface() {}
+};
- // Read file with name |file_name| into |file_| array to ensure that we
- // only read from memory during the test. Note that, we only support mono
- // files currently.
- bool LoadFile(const std::string& file_name, int sample_rate) {
+// Reads audio samples from a PCM file where the file is stored in memory at
+// construction.
+class FileAudioStream : public AudioStreamInterface {
+ public:
+ FileAudioStream(
+ int num_callbacks, const std::string& file_name, int sample_rate)
+ : file_size_in_bytes_(0),
+ sample_rate_(sample_rate),
+ file_pos_(0) {
file_size_in_bytes_ = test::GetFileSize(file_name);
sample_rate_ = sample_rate;
- EXPECT_NE(0, num_callbacks_)
- << "Test must call HandleCallbacks before LoadFile.";
- EXPECT_GE(file_size_in_callbacks(), num_callbacks_)
+ EXPECT_GE(file_size_in_callbacks(), num_callbacks)
<< "Size of test file is not large enough to last during the test.";
const int num_16bit_samples =
test::GetFileSize(file_name) / kBytesPerSample;
@@ -111,9 +135,266 @@
file_.get(), sizeof(int16_t), num_16bit_samples, audio_file);
EXPECT_EQ(num_samples_read, num_16bit_samples);
fclose(audio_file);
- return true;
}
+ // AudioStreamInterface::Write() is not implemented.
+ virtual void Write(const void* source, int num_frames) override {}
+
+ // Read samples from file stored in memory (at construction) and copy
+ // |num_frames| (<=> 10ms) to the |destination| byte buffer.
+ virtual void Read(void* destination, int num_frames) override {
+ memcpy(destination,
+ static_cast<int16_t*> (&file_[file_pos_]),
+ num_frames * sizeof(int16_t));
+ file_pos_ += num_frames;
+ }
+
+ int file_size_in_seconds() const {
+ return (file_size_in_bytes_ / (kBytesPerSample * sample_rate_));
+ }
+ int file_size_in_callbacks() const {
+ return file_size_in_seconds() * kNumCallbacksPerSecond;
+ }
+
+ private:
+ int file_size_in_bytes_;
+ int sample_rate_;
+ rtc::scoped_ptr<int16_t[]> file_;
+ int file_pos_;
+};
+
+// Simple first in first out (FIFO) class that wraps a list of 16-bit audio
+// buffers of fixed size and allows Write and Read operations. The idea is to
+// store recorded audio buffers (using Write) and then read (using Read) these
+// stored buffers with as short delay as possible when the audio layer needs
+// data to play out. The number of buffers in the FIFO will stabilize under
+// normal conditions since there will be a balance between Write and Read calls.
+// The container is a std::list container and access is protected with a lock
+// since both sides (playout and recording) are driven by its own thread.
+class FifoAudioStream : public AudioStreamInterface {
+ public:
+ explicit FifoAudioStream(int frames_per_buffer)
+ : frames_per_buffer_(frames_per_buffer),
+ bytes_per_buffer_(frames_per_buffer_ * sizeof(int16_t)),
+ fifo_(new AudioBufferList),
+ largest_size_(0),
+ total_written_elements_(0),
+ write_count_(0) {
+ EXPECT_NE(fifo_.get(), nullptr);
+ }
+
+ ~FifoAudioStream() {
+ Flush();
+ PRINTD("[%4.3f]\n", average_size());
+ }
+
+ // Allocate new memory, copy |num_frames| samples from |source| into memory
+ // and add pointer to the memory location to end of the list.
+ // Increases the size of the FIFO by one element.
+ virtual void Write(const void* source, int num_frames) override {
+ ASSERT_EQ(num_frames, frames_per_buffer_);
+ PRINTD("+");
+ if (write_count_++ < kNumIgnoreFirstCallbacks) {
+ return;
+ }
+ int16_t* memory = new int16_t[frames_per_buffer_];
+ memcpy(static_cast<int16_t*> (&memory[0]),
+ source,
+ bytes_per_buffer_);
+ rtc::CritScope lock(&lock_);
+ fifo_->push_back(memory);
+ const int size = fifo_->size();
+ if (size > largest_size_) {
+ largest_size_ = size;
+ PRINTD("(%d)", largest_size_);
+ }
+ total_written_elements_ += size;
+ }
+
+ // Read pointer to data buffer from front of list, copy |num_frames| of stored
+ // data into |destination| and delete the utilized memory allocation.
+ // Decreases the size of the FIFO by one element.
+ virtual void Read(void* destination, int num_frames) override {
+ ASSERT_EQ(num_frames, frames_per_buffer_);
+ PRINTD("-");
+ rtc::CritScope lock(&lock_);
+ if (fifo_->empty()) {
+ memset(destination, 0, bytes_per_buffer_);
+ } else {
+ int16_t* memory = fifo_->front();
+ fifo_->pop_front();
+ memcpy(destination,
+ static_cast<int16_t*> (&memory[0]),
+ bytes_per_buffer_);
+ delete memory;
+ }
+ }
+
+ int size() const {
+ return fifo_->size();
+ }
+
+ int largest_size() const {
+ return largest_size_;
+ }
+
+ int average_size() const {
+ return (total_written_elements_ == 0) ? 0.0 : 0.5 + static_cast<float> (
+ total_written_elements_) / (write_count_ - kNumIgnoreFirstCallbacks);
+ }
+
+ private:
+ void Flush() {
+ for (auto it = fifo_->begin(); it != fifo_->end(); ++it) {
+ delete *it;
+ }
+ fifo_->clear();
+ }
+
+ using AudioBufferList = std::list<int16_t*>;
+ rtc::CriticalSection lock_;
+ const int frames_per_buffer_;
+ const int bytes_per_buffer_;
+ rtc::scoped_ptr<AudioBufferList> fifo_;
+ int largest_size_;
+ int total_written_elements_;
+ int write_count_;
+};
+
+// Inserts periodic impulses and measures the latency between the time of
+// transmission and time of receiving the same impulse.
+// Usage requires a special hardware called Audio Loopback Dongle.
+// See http://source.android.com/devices/audio/loopback.html for details.
+class LatencyMeasuringAudioStream : public AudioStreamInterface {
+ public:
+ explicit LatencyMeasuringAudioStream(int frames_per_buffer)
+ : clock_(Clock::GetRealTimeClock()),
+ frames_per_buffer_(frames_per_buffer),
+ bytes_per_buffer_(frames_per_buffer_ * sizeof(int16_t)),
+ play_count_(0),
+ rec_count_(0),
+ pulse_time_(0) {
+ }
+
+ // Insert periodic impulses in first two samples of |destination|.
+ virtual void Read(void* destination, int num_frames) override {
+ ASSERT_EQ(num_frames, frames_per_buffer_);
+ if (play_count_ == 0) {
+ PRINT("[");
+ }
+ play_count_++;
+ memset(destination, 0, bytes_per_buffer_);
+ if (play_count_ % (kNumCallbacksPerSecond / kImpulseFrequencyInHz) == 0) {
+ if (pulse_time_ == 0) {
+ pulse_time_ = clock_->TimeInMilliseconds();
+ }
+ PRINT(".");
+ const int16_t impulse = std::numeric_limits<int16_t>::max();
+ int16_t* ptr16 = static_cast<int16_t*> (destination);
+ for (int i = 0; i < 2; ++i) {
+ *ptr16++ = impulse;
+ }
+ }
+ }
+
+ // Detect received impulses in |source|, derive time between transmission and
+ // detection and add the calculated delay to list of latencies.
+ virtual void Write(const void* source, int num_frames) override {
+ ASSERT_EQ(num_frames, frames_per_buffer_);
+ rec_count_++;
+ if (pulse_time_ == 0) {
+ // Avoid detection of new impulse response until a new impulse has
+ // been transmitted (sets |pulse_time_| to value larger than zero).
+ return;
+ }
+ const int16_t* ptr16 = static_cast<const int16_t*> (source);
+ std::vector<int16_t> vec(ptr16, ptr16 + num_frames);
+ // Find max value in the audio buffer.
+ int max = *std::max_element(vec.begin(), vec.end());
+ // Find index (element position in vector) of the max element.
+ int index_of_max = std::distance(vec.begin(),
+ std::find(vec.begin(), vec.end(),
+ max));
+ if (max > kImpulseThreshold) {
+ PRINTD("(%d,%d)", max, index_of_max);
+ int64_t now_time = clock_->TimeInMilliseconds();
+ int extra_delay = IndexToMilliseconds(static_cast<double> (index_of_max));
+ PRINTD("[%d]", static_cast<int> (now_time - pulse_time_));
+ PRINTD("[%d]", extra_delay);
+ // Total latency is the difference between transmit time and detection
+ // tome plus the extra delay within the buffer in which we detected the
+ // received impulse. It is transmitted at sample 0 but can be received
+ // at sample N where N > 0. The term |extra_delay| accounts for N and it
+ // is a value between 0 and 10ms.
+ latencies_.push_back(now_time - pulse_time_ + extra_delay);
+ pulse_time_ = 0;
+ } else {
+ PRINTD("-");
+ }
+ }
+
+ int num_latency_values() const {
+ return latencies_.size();
+ }
+
+ int min_latency() const {
+ if (latencies_.empty())
+ return 0;
+ return *std::min_element(latencies_.begin(), latencies_.end());
+ }
+
+ int max_latency() const {
+ if (latencies_.empty())
+ return 0;
+ return *std::max_element(latencies_.begin(), latencies_.end());
+ }
+
+ int average_latency() const {
+ if (latencies_.empty())
+ return 0;
+ return 0.5 + static_cast<double> (
+ std::accumulate(latencies_.begin(), latencies_.end(), 0)) /
+ latencies_.size();
+ }
+
+ void PrintResults() const {
+ PRINT("] ");
+ for (auto it = latencies_.begin(); it != latencies_.end(); ++it) {
+ PRINT("%d ", *it);
+ }
+ PRINT("\n");
+ PRINT("%s[min, max, avg]=[%d, %d, %d] ms\n", kTag,
+ min_latency(), max_latency(), average_latency());
+ }
+
+ int IndexToMilliseconds(double index) const {
+ return 10.0 * (index / frames_per_buffer_) + 0.5;
+ }
+
+ private:
+ Clock* clock_;
+ const int frames_per_buffer_;
+ const int bytes_per_buffer_;
+ int play_count_;
+ int rec_count_;
+ int64_t pulse_time_;
+ std::vector<int> latencies_;
+};
+
+// Mocks the AudioTransport object and proxies actions for the two callbacks
+// (RecordedDataIsAvailable and NeedMorePlayData) to different implementations
+// of AudioStreamInterface.
+class MockAudioTransport : public AudioTransport {
+ public:
+ explicit MockAudioTransport(int type)
+ : num_callbacks_(0),
+ type_(type),
+ play_count_(0),
+ rec_count_(0),
+ audio_stream_(nullptr) {}
+
+ virtual ~MockAudioTransport() {}
+
MOCK_METHOD10(RecordedDataIsAvailable,
int32_t(const void* audioSamples,
const uint32_t nSamples,
@@ -135,8 +416,13 @@
int64_t* elapsed_time_ms,
int64_t* ntp_time_ms));
- void HandleCallbacks(EventWrapper* test_is_done, int num_callbacks) {
+ // Set default actions of the mock object. We are delegating to fake
+ // implementations (of AudioStreamInterface) here.
+ void HandleCallbacks(EventWrapper* test_is_done,
+ AudioStreamInterface* audio_stream,
+ int num_callbacks) {
test_is_done_ = test_is_done;
+ audio_stream_ = audio_stream;
num_callbacks_ = num_callbacks;
if (play_mode()) {
ON_CALL(*this, NeedMorePlayData(_, _, _, _, _, _, _, _))
@@ -162,8 +448,14 @@
uint32_t& newMicLevel) {
EXPECT_TRUE(rec_mode()) << "No test is expecting these callbacks.";
rec_count_++;
- if (ReceivedEnoughCallbacks())
+ // Process the recorded audio stream if an AudioStreamInterface
+ // implementation exists.
+ if (audio_stream_) {
+ audio_stream_->Write(audioSamples, nSamples);
+ }
+ if (ReceivedEnoughCallbacks()) {
test_is_done_->Set();
+ }
return 0;
}
@@ -176,18 +468,16 @@
int64_t* elapsed_time_ms,
int64_t* ntp_time_ms) {
EXPECT_TRUE(play_mode()) << "No test is expecting these callbacks.";
- nSamplesOut = nSamples;
- if (file_mode()) {
- // Read samples from file stored in memory (at construction) and copy
- // |nSamples| (<=> 10ms) to the |audioSamples| byte buffer.
- memcpy(audioSamples,
- static_cast<int16_t*> (&file_[file_pos_]),
- nSamples * nBytesPerSample);
- file_pos_ += nSamples;
- }
play_count_++;
- if (ReceivedEnoughCallbacks())
+ nSamplesOut = nSamples;
+ // Read (possibly processed) audio stream samples to be played out if an
+ // AudioStreamInterface implementation exists.
+ if (audio_stream_) {
+ audio_stream_->Read(audioSamples, nSamples);
+ }
+ if (ReceivedEnoughCallbacks()) {
test_is_done_->Set();
+ }
return 0;
}
@@ -209,13 +499,6 @@
bool play_mode() const { return type_ & kPlayout; }
bool rec_mode() const { return type_ & kRecording; }
- bool file_mode() const { return file_.get() != nullptr; }
- int file_size_in_seconds() const {
- return (file_size_in_bytes_ / (kBytesPerSample * sample_rate_));
- }
- int file_size_in_callbacks() const {
- return file_size_in_seconds() * kNumCallbacksPerSecond;
- }
private:
EventWrapper* test_is_done_;
@@ -223,10 +506,8 @@
int type_;
int play_count_;
int rec_count_;
- int file_size_in_bytes_;
- int sample_rate_;
- rtc::scoped_ptr<int16_t[]> file_;
- int file_pos_;
+ AudioStreamInterface* audio_stream_;
+ rtc::scoped_ptr<LatencyMeasuringAudioStream> latency_audio_stream_;
};
// AudioDeviceTest is a value-parameterized test.
@@ -289,7 +570,7 @@
parameters_.recording_channels = audio_buffer->RecordingChannels();
}
- // Retuerns file name relative to the resource root given a sample rate.
+ // Returns file name relative to the resource root given a sample rate.
std::string GetFileName(int sample_rate) {
EXPECT_TRUE(sample_rate == 48000 || sample_rate == 44100);
char fname[64];
@@ -351,13 +632,13 @@
// Create an audio device instance and print out the native audio parameters.
TEST_P(AudioDeviceTest, AudioParameters) {
EXPECT_NE(0, playout_sample_rate());
- PRINT("playout_sample_rate: %d\n", playout_sample_rate());
+ PRINT("%splayout_sample_rate: %d\n", kTag, playout_sample_rate());
EXPECT_NE(0, recording_sample_rate());
- PRINT("playout_sample_rate: %d\n", recording_sample_rate());
+ PRINT("%splayout_sample_rate: %d\n", kTag, recording_sample_rate());
EXPECT_NE(0, playout_channels());
- PRINT("playout_channels: %d\n", playout_channels());
+ PRINT("%splayout_channels: %d\n", kTag, playout_channels());
EXPECT_NE(0, recording_channels());
- PRINT("recording_channels: %d\n", recording_channels());
+ PRINT("%srecording_channels: %d\n", kTag, recording_channels());
}
TEST_P(AudioDeviceTest, InitTerminate) {
@@ -373,23 +654,22 @@
EXPECT_EQ(1, audio_device()->RecordingDevices());
}
+TEST_P(AudioDeviceTest, BuiltInAECIsAvailable) {
+ PRINT("%sBuiltInAECIsAvailable: %s\n",
+ kTag, audio_device()->BuiltInAECIsAvailable() ? "true" : "false");
+}
+
// Tests that playout can be initiated, started and stopped.
TEST_P(AudioDeviceTest, StartStopPlayout) {
StartPlayout();
StopPlayout();
}
-// Tests that recording can be initiated, started and stopped.
-TEST_P(AudioDeviceTest, StartStopRecording) {
- StartRecording();
- StopRecording();
-}
-
// Start playout and verify that the native audio layer starts asking for real
// audio samples to play out using the NeedMorePlayData callback.
TEST_P(AudioDeviceTest, StartPlayoutVerifyCallbacks) {
MockAudioTransport mock(kPlayout);
- mock.HandleCallbacks(test_is_done_.get(), kNumCallbacks);
+ mock.HandleCallbacks(test_is_done_.get(), nullptr, kNumCallbacks);
EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_buffer(),
kBytesPerSample,
playout_channels(),
@@ -407,7 +687,7 @@
// audio samples via the RecordedDataIsAvailable callback.
TEST_P(AudioDeviceTest, StartRecordingVerifyCallbacks) {
MockAudioTransport mock(kRecording);
- mock.HandleCallbacks(test_is_done_.get(), kNumCallbacks);
+ mock.HandleCallbacks(test_is_done_.get(), nullptr, kNumCallbacks);
EXPECT_CALL(mock, RecordedDataIsAvailable(NotNull(),
recording_frames_per_buffer(),
kBytesPerSample,
@@ -431,7 +711,7 @@
// active in both directions.
TEST_P(AudioDeviceTest, StartPlayoutAndRecordingVerifyCallbacks) {
MockAudioTransport mock(kPlayout | kRecording);
- mock.HandleCallbacks(test_is_done_.get(), kNumCallbacks);
+ mock.HandleCallbacks(test_is_done_.get(), nullptr, kNumCallbacks);
EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_buffer(),
kBytesPerSample,
playout_channels(),
@@ -465,16 +745,83 @@
// TODO(henrika): extend test when mono output is supported.
EXPECT_EQ(1, playout_channels());
NiceMock<MockAudioTransport> mock(kPlayout);
- mock.HandleCallbacks(test_is_done_.get(),
- kFilePlayTimeInSec * kNumCallbacksPerSecond);
+ const int num_callbacks = kFilePlayTimeInSec * kNumCallbacksPerSecond;
std::string file_name = GetFileName(playout_sample_rate());
- mock.LoadFile(file_name, playout_sample_rate());
+ rtc::scoped_ptr<FileAudioStream> file_audio_stream(
+ new FileAudioStream(num_callbacks, file_name, playout_sample_rate()));
+ mock.HandleCallbacks(test_is_done_.get(),
+ file_audio_stream.get(),
+ num_callbacks);
EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
StartPlayout();
test_is_done_->Wait(kTestTimeOutInMilliseconds);
StopPlayout();
}
+// Start playout and recording and store recorded data in an intermediate FIFO
+// buffer from which the playout side then reads its samples in the same order
+// as they were stored. Under ideal circumstances, a callback sequence would
+// look like: ...+-+-+-+-+-+-+-..., where '+' means 'packet recorded' and '-'
+// means 'packet played'. Under such conditions, the FIFO would only contain
+// one packet on average. However, under more realistic conditions, the size
+// of the FIFO will vary more due to an unbalance between the two sides.
+// This test tries to verify that the device maintains a balanced callback-
+// sequence by running in loopback for ten seconds while measuring the size
+// (max and average) of the FIFO. The size of the FIFO is increased by the
+// recording side and decreased by the playout side.
+// TODO(henrika): tune the final test parameters after running tests on several
+// different devices.
+TEST_P(AudioDeviceTest, RunPlayoutAndRecordingInFullDuplex) {
+ EXPECT_EQ(recording_channels(), playout_channels());
+ EXPECT_EQ(recording_sample_rate(), playout_sample_rate());
+ NiceMock<MockAudioTransport> mock(kPlayout | kRecording);
+ rtc::scoped_ptr<FifoAudioStream> fifo_audio_stream(
+ new FifoAudioStream(playout_frames_per_buffer()));
+ mock.HandleCallbacks(test_is_done_.get(),
+ fifo_audio_stream.get(),
+ kFullDuplexTimeInSec * kNumCallbacksPerSecond);
+ EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+ StartRecording();
+ StartPlayout();
+ test_is_done_->Wait(std::max(kTestTimeOutInMilliseconds,
+ 1000 * kFullDuplexTimeInSec));
+ StopPlayout();
+ StopRecording();
+ EXPECT_LE(fifo_audio_stream->average_size(), 10);
+ EXPECT_LE(fifo_audio_stream->largest_size(), 20);
+}
+
+// Measures loopback latency and reports the min, max and average values for
+// a full duplex audio session.
+// The latency is measured like so:
+// - Insert impulses periodically on the output side.
+// - Detect the impulses on the input side.
+// - Measure the time difference between the transmit time and receive time.
+// - Store time differences in a vector and calculate min, max and average.
+// This test requires a special hardware called Audio Loopback Dongle.
+// See http://source.android.com/devices/audio/loopback.html for details.
+TEST_P(AudioDeviceTest, DISABLED_MeasureLoopbackLatency) {
+ EXPECT_EQ(recording_channels(), playout_channels());
+ EXPECT_EQ(recording_sample_rate(), playout_sample_rate());
+ NiceMock<MockAudioTransport> mock(kPlayout | kRecording);
+ rtc::scoped_ptr<LatencyMeasuringAudioStream> latency_audio_stream(
+ new LatencyMeasuringAudioStream(playout_frames_per_buffer()));
+ mock.HandleCallbacks(test_is_done_.get(),
+ latency_audio_stream.get(),
+ kMeasureLatencyTimeInSec * kNumCallbacksPerSecond);
+ EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+ StartRecording();
+ StartPlayout();
+ test_is_done_->Wait(std::max(kTestTimeOutInMilliseconds,
+ 1000 * kMeasureLatencyTimeInSec));
+ StopPlayout();
+ StopRecording();
+ // Verify that the correct number of transmitted impulses are detected.
+ EXPECT_EQ(latency_audio_stream->num_latency_values(),
+ kImpulseFrequencyInHz * kMeasureLatencyTimeInSec - 1);
+ latency_audio_stream->PrintResults();
+}
+
INSTANTIATE_TEST_CASE_P(AudioDeviceTest, AudioDeviceTest,
::testing::ValuesIn(kAudioLayers));
diff --git a/modules/audio_device/android/ensure_initialized.cc b/modules/audio_device/android/ensure_initialized.cc
index 469068a..b07c04a 100644
--- a/modules/audio_device/android/ensure_initialized.cc
+++ b/modules/audio_device/android/ensure_initialized.cc
@@ -38,9 +38,9 @@
// TODO(henrika): enable OpenSL ES when it has been refactored to avoid
// crashes.
- // using AudioDeviceOpenSLES
- // AudioDeviceTemplate<OpenSlesInput, OpenSlesOutput>;
- // AudioDeviceOpenSLESInstance::SetAndroidAudioDeviceObjects(jvm, context);
+ // using AudioDeviceOpenSLES =
+ // AudioDeviceTemplate<OpenSlesInput, OpenSlesOutput>;
+ // AudioDeviceOpenSLES::SetAndroidAudioDeviceObjects(jvm, context);
}
void EnsureInitialized() {