Make WebRtcAudioRecord save timestamps

Add timestamps to audio_record_jni DataIsRecorded() function, and make
WebRtcAudioRecord find and send the time stamp to that function.

This CL is an continuation of
https://webrtc-review.googlesource.com/c/src/+/249085

Bug: webrtc:13609
Change-Id: I63ab89f1215893cbe1d11d9d8948f5639fc5cdfe
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/249951
Reviewed-by: Xavier Lepaul‎ <xalep@webrtc.org>
Reviewed-by: Henrik Andreassson <henrika@webrtc.org>
Reviewed-by: Minyue Li <minyue@google.com>
Commit-Queue: Olov Brändström <brandstrom@google.com>
Cr-Commit-Position: refs/heads/main@{#35933}
diff --git a/modules/audio_device/audio_device_buffer.cc b/modules/audio_device/audio_device_buffer.cc
index f80319d..873e5d6 100644
--- a/modules/audio_device/audio_device_buffer.cc
+++ b/modules/audio_device/audio_device_buffer.cc
@@ -246,8 +246,17 @@
     RTC_LOG(LS_INFO) << "Size of recording buffer: " << rec_buffer_.size();
   }
 
-  capture_timestamp_ns_ = capture_timestamp_ns;
-
+  // If the timestamp is less then or equal to zero, it's not valid and are
+  // ignored. If we do antimestamp alignment on them they might accidentally
+  // become greater then zero, and will be handled as if they were a correct
+  // timestamp.
+  capture_timestamp_ns_ =
+      (capture_timestamp_ns > 0)
+          ? rtc::kNumNanosecsPerMicrosec *
+                timestamp_aligner_.TranslateTimestamp(
+                    capture_timestamp_ns_ / rtc::kNumNanosecsPerMicrosec,
+                    rtc::TimeMicros())
+          : capture_timestamp_ns;
   // Derive a new level value twice per second and check if it is non-zero.
   int16_t max_abs = 0;
   RTC_DCHECK_LT(rec_stat_count_, 50);
diff --git a/modules/audio_device/audio_device_buffer.h b/modules/audio_device/audio_device_buffer.h
index dbb9e57..ea6ab9a 100644
--- a/modules/audio_device/audio_device_buffer.h
+++ b/modules/audio_device/audio_device_buffer.h
@@ -23,6 +23,7 @@
 #include "rtc_base/synchronization/mutex.h"
 #include "rtc_base/task_queue.h"
 #include "rtc_base/thread_annotations.h"
+#include "rtc_base/timestamp_aligner.h"
 
 namespace webrtc {
 
@@ -227,6 +228,10 @@
   // being printed in the LogStats() task.
   bool log_stats_ RTC_GUARDED_BY(task_queue_);
 
+  // Used for converting capture timestaps (recieved from AudioRecordThread
+  // via AudioRecordJni::DataIsRecorded) to RTC clock.
+  rtc::TimestampAligner timestamp_aligner_;
+
 // Should *never* be defined in production builds. Only used for testing.
 // When defined, the output signal will be replaced by a sinus tone at 440Hz.
 #ifdef AUDIO_DEVICE_PLAYS_SINUS_TONE
diff --git a/sdk/android/src/java/org/webrtc/audio/WebRtcAudioRecord.java b/sdk/android/src/java/org/webrtc/audio/WebRtcAudioRecord.java
index 10fd58c..6647e5f 100644
--- a/sdk/android/src/java/org/webrtc/audio/WebRtcAudioRecord.java
+++ b/sdk/android/src/java/org/webrtc/audio/WebRtcAudioRecord.java
@@ -17,6 +17,7 @@
 import android.media.AudioManager;
 import android.media.AudioRecord;
 import android.media.AudioRecordingConfiguration;
+import android.media.AudioTimestamp;
 import android.media.MediaRecorder.AudioSource;
 import android.os.Build;
 import android.os.Process;
@@ -130,6 +131,10 @@
       doAudioRecordStateCallback(AUDIO_RECORD_START);
 
       long lastTime = System.nanoTime();
+      AudioTimestamp audioTimestamp = null;
+      if (Build.VERSION.SDK_INT >= 24) {
+        audioTimestamp = new AudioTimestamp();
+      }
       while (keepAlive) {
         int bytesRead = audioRecord.read(byteBuffer, byteBuffer.capacity());
         if (bytesRead == byteBuffer.capacity()) {
@@ -141,7 +146,14 @@
           // failed to join this thread. To be a bit safer, try to avoid calling any native methods
           // in case they've been unregistered after stopRecording() returned.
           if (keepAlive) {
-            nativeDataIsRecorded(nativeAudioRecord, bytesRead);
+            long captureTimeNs = 0;
+            if (Build.VERSION.SDK_INT >= 24) {
+              if (audioRecord.getTimestamp(audioTimestamp, AudioTimestamp.TIMEBASE_MONOTONIC)
+                  == AudioRecord.SUCCESS) {
+                captureTimeNs = audioTimestamp.nanoTime;
+              }
+            }
+            nativeDataIsRecorded(nativeAudioRecord, bytesRead, captureTimeNs);
           }
           if (audioSamplesReadyCallback != null) {
             // Copy the entire byte buffer array. The start of the byteBuffer is not necessarily
@@ -489,7 +501,8 @@
 
   private native void nativeCacheDirectBufferAddress(
       long nativeAudioRecordJni, ByteBuffer byteBuffer);
-  private native void nativeDataIsRecorded(long nativeAudioRecordJni, int bytes);
+  private native void nativeDataIsRecorded(
+      long nativeAudioRecordJni, int bytes, long captureTimestampNs);
 
   // Sets all recorded samples to zero if `mute` is true, i.e., ensures that
   // the microphone is muted.
diff --git a/sdk/android/src/jni/audio_device/audio_record_jni.cc b/sdk/android/src/jni/audio_device/audio_record_jni.cc
index 170c81a..f5f1089 100644
--- a/sdk/android/src/jni/audio_device/audio_record_jni.cc
+++ b/sdk/android/src/jni/audio_device/audio_record_jni.cc
@@ -245,14 +245,15 @@
 // the thread is 'AudioRecordThread'.
 void AudioRecordJni::DataIsRecorded(JNIEnv* env,
                                     const JavaParamRef<jobject>& j_caller,
-                                    int length) {
+                                    int length,
+                                    int64_t capture_timestamp_ns) {
   RTC_DCHECK(thread_checker_java_.IsCurrent());
   if (!audio_device_buffer_) {
     RTC_LOG(LS_ERROR) << "AttachAudioBuffer has not been called";
     return;
   }
-  audio_device_buffer_->SetRecordedBuffer(direct_buffer_address_,
-                                          frames_per_buffer_);
+  audio_device_buffer_->SetRecordedBuffer(
+      direct_buffer_address_, frames_per_buffer_, capture_timestamp_ns);
   // We provide one (combined) fixed delay estimate for the APM and use the
   // `playDelayMs` parameter only. Components like the AEC only sees the sum
   // of `playDelayMs` and `recDelayMs`, hence the distributions does not matter.
diff --git a/sdk/android/src/jni/audio_device/audio_record_jni.h b/sdk/android/src/jni/audio_device/audio_record_jni.h
index 1ff62f8..49c905d 100644
--- a/sdk/android/src/jni/audio_device/audio_record_jni.h
+++ b/sdk/android/src/jni/audio_device/audio_record_jni.h
@@ -90,7 +90,8 @@
   // the thread is 'AudioRecordThread'.
   void DataIsRecorded(JNIEnv* env,
                       const JavaParamRef<jobject>& j_caller,
-                      int length);
+                      int length,
+                      int64_t capture_timestamp_ns);
 
  private:
   // Stores thread ID in constructor.