Update a ton of audio code to use size_t more correctly and in general reduce
use of int16_t/uint16_t.

This is the upshot of a recommendation by henrik.lundin and kwiberg on an original small change ( https://webrtc-codereview.appspot.com/42569004/#ps1 ) to stop using int16_t just because values could fit in it, and is similar in nature to a previous "mass change to use size_t more" ( https://webrtc-codereview.appspot.com/23129004/ ) which also needed to be split up for review but to land all at once, since, like adding "const", such changes tend to cause a lot of transitive effects.

This was be reviewed and approved in pieces:
https://codereview.webrtc.org/1224093003
https://codereview.webrtc.org/1224123002
https://codereview.webrtc.org/1224163002
https://codereview.webrtc.org/1225133003
https://codereview.webrtc.org/1225173002
https://codereview.webrtc.org/1227163003
https://codereview.webrtc.org/1227203003
https://codereview.webrtc.org/1227213002
https://codereview.webrtc.org/1227893002
https://codereview.webrtc.org/1228793004
https://codereview.webrtc.org/1228803003
https://codereview.webrtc.org/1228823002
https://codereview.webrtc.org/1228823003
https://codereview.webrtc.org/1228843002
https://codereview.webrtc.org/1230693002
https://codereview.webrtc.org/1231713002

The change is being landed as TBR to all the folks who reviewed the above.

BUG=chromium:81439
TEST=none
R=andrew@webrtc.org, pbos@webrtc.org
TBR=aluebs, andrew, asapersson, henrika, hlundin, jan.skoglund, kwiberg, minyue, pbos, pthatcher

Review URL: https://codereview.webrtc.org/1230503003 .

Cr-Commit-Position: refs/heads/master@{#9768}
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_receive_test.cc b/webrtc/modules/audio_coding/main/acm2/acm_receive_test.cc
index dc59984..b5a86d0 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_receive_test.cc
+++ b/webrtc/modules/audio_coding/main/acm2/acm_receive_test.cc
@@ -93,7 +93,8 @@
       AudioFrame output_frame;
       EXPECT_TRUE(acm_->Get10MsAudio(&output_frame));
       EXPECT_EQ(output_freq_hz_, output_frame.sample_rate_hz_);
-      const int samples_per_block = output_freq_hz_ * 10 / 1000;
+      const size_t samples_per_block =
+          static_cast<size_t>(output_freq_hz_ * 10 / 1000);
       EXPECT_EQ(samples_per_block, output_frame.samples_per_channel_);
       if (expected_output_channels_ != kArbitraryChannels) {
         if (output_frame.speech_type_ == webrtc::AudioFrame::kPLC) {
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_receive_test_oldapi.cc b/webrtc/modules/audio_coding/main/acm2/acm_receive_test_oldapi.cc
index dd570e6..2a0bbe1 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_receive_test_oldapi.cc
+++ b/webrtc/modules/audio_coding/main/acm2/acm_receive_test_oldapi.cc
@@ -160,7 +160,8 @@
       AudioFrame output_frame;
       EXPECT_EQ(0, acm_->PlayoutData10Ms(output_freq_hz_, &output_frame));
       EXPECT_EQ(output_freq_hz_, output_frame.sample_rate_hz_);
-      const int samples_per_block = output_freq_hz_ * 10 / 1000;
+      const size_t samples_per_block =
+          static_cast<size_t>(output_freq_hz_ * 10 / 1000);
       EXPECT_EQ(samples_per_block, output_frame.samples_per_channel_);
       if (exptected_output_channels_ != kArbitraryChannels) {
         if (output_frame.speech_type_ == webrtc::AudioFrame::kPLC) {
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc b/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc
index 4c11197..1cefeb6 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc
+++ b/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc
@@ -344,7 +344,7 @@
 
 int AcmReceiver::GetAudio(int desired_freq_hz, AudioFrame* audio_frame) {
   enum NetEqOutputType type;
-  int samples_per_channel;
+  size_t samples_per_channel;
   int num_channels;
   bool return_silence = false;
 
@@ -394,7 +394,7 @@
   }
 
   // NetEq always returns 10 ms of audio.
-  current_sample_rate_hz_ = samples_per_channel * 100;
+  current_sample_rate_hz_ = static_cast<int>(samples_per_channel * 100);
 
   // Update if resampling is required.
   bool need_resampling = (desired_freq_hz != -1) &&
@@ -403,18 +403,19 @@
   if (need_resampling && !resampled_last_output_frame_) {
     // Prime the resampler with the last frame.
     int16_t temp_output[AudioFrame::kMaxDataSizeSamples];
-    samples_per_channel =
+    int samples_per_channel_int =
         resampler_.Resample10Msec(last_audio_buffer_.get(),
                                   current_sample_rate_hz_,
                                   desired_freq_hz,
                                   num_channels,
                                   AudioFrame::kMaxDataSizeSamples,
                                   temp_output);
-    if (samples_per_channel < 0) {
+    if (samples_per_channel_int < 0) {
       LOG(LERROR) << "AcmReceiver::GetAudio - "
                      "Resampling last_audio_buffer_ failed.";
       return -1;
     }
+    samples_per_channel = static_cast<size_t>(samples_per_channel_int);
   }
 
   // The audio in |audio_buffer_| is tansferred to |audio_frame_| below, either
@@ -422,17 +423,18 @@
   // TODO(henrik.lundin) Glitches in the output may appear if the output rate
   // from NetEq changes. See WebRTC issue 3923.
   if (need_resampling) {
-    samples_per_channel =
+    int samples_per_channel_int =
         resampler_.Resample10Msec(audio_buffer_.get(),
                                   current_sample_rate_hz_,
                                   desired_freq_hz,
                                   num_channels,
                                   AudioFrame::kMaxDataSizeSamples,
                                   audio_frame->data_);
-    if (samples_per_channel < 0) {
+    if (samples_per_channel_int < 0) {
       LOG(LERROR) << "AcmReceiver::GetAudio - Resampling audio_buffer_ failed.";
       return -1;
     }
+    samples_per_channel = static_cast<size_t>(samples_per_channel_int);
     resampled_last_output_frame_ = true;
   } else {
     resampled_last_output_frame_ = false;
@@ -448,7 +450,7 @@
 
   audio_frame->num_channels_ = num_channels;
   audio_frame->samples_per_channel_ = samples_per_channel;
-  audio_frame->sample_rate_hz_ = samples_per_channel * 100;
+  audio_frame->sample_rate_hz_ = static_cast<int>(samples_per_channel * 100);
 
   // Should set |vad_activity| before calling SetAudioFrameActivityAndType().
   audio_frame->vad_activity_ = previous_audio_activity_;
@@ -787,10 +789,11 @@
     frame->sample_rate_hz_ = current_sample_rate_hz_;
   }
 
-  frame->samples_per_channel_ = frame->sample_rate_hz_ / 100;  // Always 10 ms.
+  frame->samples_per_channel_ =
+      static_cast<size_t>(frame->sample_rate_hz_ / 100);  // Always 10 ms.
   frame->speech_type_ = AudioFrame::kCNG;
   frame->vad_activity_ = AudioFrame::kVadPassive;
-  int samples = frame->samples_per_channel_ * frame->num_channels_;
+  size_t samples = frame->samples_per_channel_ * frame->num_channels_;
   memset(frame->data_, 0, samples * sizeof(int16_t));
   return true;
 }
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_resampler.cc b/webrtc/modules/audio_coding/main/acm2/acm_resampler.cc
index 97d87b1..2650725 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_resampler.cc
+++ b/webrtc/modules/audio_coding/main/acm2/acm_resampler.cc
@@ -29,9 +29,9 @@
                                  int in_freq_hz,
                                  int out_freq_hz,
                                  int num_audio_channels,
-                                 int out_capacity_samples,
+                                 size_t out_capacity_samples,
                                  int16_t* out_audio) {
-  int in_length = in_freq_hz * num_audio_channels / 100;
+  size_t in_length = static_cast<size_t>(in_freq_hz * num_audio_channels / 100);
   int out_length = out_freq_hz * num_audio_channels / 100;
   if (in_freq_hz == out_freq_hz) {
     if (out_capacity_samples < in_length) {
@@ -39,7 +39,7 @@
       return -1;
     }
     memcpy(out_audio, in_audio, in_length * sizeof(int16_t));
-    return in_length / num_audio_channels;
+    return static_cast<int>(in_length / num_audio_channels);
   }
 
   if (resampler_.InitializeIfNeeded(in_freq_hz, out_freq_hz,
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_resampler.h b/webrtc/modules/audio_coding/main/acm2/acm_resampler.h
index a8fc6b6..a19b0c4 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_resampler.h
+++ b/webrtc/modules/audio_coding/main/acm2/acm_resampler.h
@@ -26,7 +26,7 @@
                      int in_freq_hz,
                      int out_freq_hz,
                      int num_audio_channels,
-                     int out_capacity_samples,
+                     size_t out_capacity_samples,
                      int16_t* out_audio);
 
  private:
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_send_test.cc b/webrtc/modules/audio_coding/main/acm2/acm_send_test.cc
index b96db6b..91df16f 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_send_test.cc
+++ b/webrtc/modules/audio_coding/main/acm2/acm_send_test.cc
@@ -29,7 +29,8 @@
     : clock_(0),
       audio_source_(audio_source),
       source_rate_hz_(source_rate_hz),
-      input_block_size_samples_(source_rate_hz_ * kBlockSizeMs / 1000),
+      input_block_size_samples_(
+          static_cast<size_t>(source_rate_hz_ * kBlockSizeMs / 1000)),
       codec_registered_(false),
       test_duration_ms_(test_duration_ms),
       frame_type_(kAudioFrameSpeech),
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_send_test.h b/webrtc/modules/audio_coding/main/acm2/acm_send_test.h
index 4c4db5b..09fe9e6 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_send_test.h
+++ b/webrtc/modules/audio_coding/main/acm2/acm_send_test.h
@@ -63,7 +63,7 @@
   rtc::scoped_ptr<AudioCoding> acm_;
   InputAudioFile* audio_source_;
   int source_rate_hz_;
-  const int input_block_size_samples_;
+  const size_t input_block_size_samples_;
   AudioFrame input_frame_;
   bool codec_registered_;
   int test_duration_ms_;
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.cc b/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.cc
index 1819d59..74e98d9 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.cc
+++ b/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.cc
@@ -31,7 +31,8 @@
       acm_(webrtc::AudioCodingModule::Create(0, &clock_)),
       audio_source_(audio_source),
       source_rate_hz_(source_rate_hz),
-      input_block_size_samples_(source_rate_hz_ * kBlockSizeMs / 1000),
+      input_block_size_samples_(
+          static_cast<size_t>(source_rate_hz_ * kBlockSizeMs / 1000)),
       codec_registered_(false),
       test_duration_ms_(test_duration_ms),
       frame_type_(kAudioFrameSpeech),
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.h b/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.h
index 8cdc298..008e264 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.h
+++ b/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.h
@@ -71,7 +71,7 @@
   rtc::scoped_ptr<AudioCodingModule> acm_;
   InputAudioFile* audio_source_;
   int source_rate_hz_;
-  const int input_block_size_samples_;
+  const size_t input_block_size_samples_;
   AudioFrame input_frame_;
   bool codec_registered_;
   int test_duration_ms_;
diff --git a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
index 32d60a7..46980d3 100644
--- a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
+++ b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
@@ -76,22 +76,24 @@
 }
 
 // Stereo-to-mono can be used as in-place.
-int DownMix(const AudioFrame& frame, int length_out_buff, int16_t* out_buff) {
+int DownMix(const AudioFrame& frame,
+            size_t length_out_buff,
+            int16_t* out_buff) {
   if (length_out_buff < frame.samples_per_channel_) {
     return -1;
   }
-  for (int n = 0; n < frame.samples_per_channel_; ++n)
+  for (size_t n = 0; n < frame.samples_per_channel_; ++n)
     out_buff[n] = (frame.data_[2 * n] + frame.data_[2 * n + 1]) >> 1;
   return 0;
 }
 
 // Mono-to-stereo can be used as in-place.
-int UpMix(const AudioFrame& frame, int length_out_buff, int16_t* out_buff) {
+int UpMix(const AudioFrame& frame, size_t length_out_buff, int16_t* out_buff) {
   if (length_out_buff < frame.samples_per_channel_) {
     return -1;
   }
-  for (int n = frame.samples_per_channel_; n > 0; --n) {
-    int i = n - 1;
+  for (size_t n = frame.samples_per_channel_; n != 0; --n) {
+    size_t i = n - 1;
     int16_t sample = frame.data_[i];
     out_buff[2 * i + 1] = sample;
     out_buff[2 * i] = sample;
@@ -338,11 +340,10 @@
 
 int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame,
                                                InputData* input_data) {
-  if (audio_frame.samples_per_channel_ <= 0) {
+  if (audio_frame.samples_per_channel_ == 0) {
     assert(false);
     WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
-                 "Cannot Add 10 ms audio, payload length is negative or "
-                 "zero");
+                 "Cannot Add 10 ms audio, payload length is zero");
     return -1;
   }
 
@@ -354,7 +355,7 @@
   }
 
   // If the length and frequency matches. We currently just support raw PCM.
-  if ((audio_frame.sample_rate_hz_ / 100) !=
+  if (static_cast<size_t>(audio_frame.sample_rate_hz_ / 100) !=
       audio_frame.samples_per_channel_) {
     WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
                  "Cannot Add 10 ms audio, input frequency and length doesn't"
@@ -477,17 +478,19 @@
     // The result of the resampler is written to output frame.
     dest_ptr_audio = preprocess_frame_.data_;
 
-    preprocess_frame_.samples_per_channel_ = resampler_.Resample10Msec(
+    int samples_per_channel = resampler_.Resample10Msec(
         src_ptr_audio, in_frame.sample_rate_hz_,
         codec_manager_.CurrentEncoder()->SampleRateHz(),
         preprocess_frame_.num_channels_, AudioFrame::kMaxDataSizeSamples,
         dest_ptr_audio);
 
-    if (preprocess_frame_.samples_per_channel_ < 0) {
+    if (samples_per_channel < 0) {
       WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
                    "Cannot add 10 ms audio, resampling failed");
       return -1;
     }
+    preprocess_frame_.samples_per_channel_ =
+        static_cast<size_t>(samples_per_channel);
     preprocess_frame_.sample_rate_hz_ =
         codec_manager_.CurrentEncoder()->SampleRateHz();
   }
diff --git a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h
index beb49bc..c451854 100644
--- a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h
+++ b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h
@@ -248,7 +248,7 @@
   struct InputData {
     uint32_t input_timestamp;
     const int16_t* audio;
-    uint16_t length_per_channel;
+    size_t length_per_channel;
     uint8_t audio_channel;
     // If a re-mix is required (up or down), this buffer will store a re-mixed
     // version of the input.
diff --git a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest.cc b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest.cc
index eea51a3..418ddd1 100644
--- a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest.cc
+++ b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest.cc
@@ -272,7 +272,8 @@
   EXPECT_TRUE(acm_->Get10MsAudio(&audio_frame));
   EXPECT_EQ(0u, audio_frame.timestamp_);
   EXPECT_GT(audio_frame.num_channels_, 0);
-  EXPECT_EQ(kSampleRateHz / 100, audio_frame.samples_per_channel_);
+  EXPECT_EQ(static_cast<size_t>(kSampleRateHz / 100),
+            audio_frame.samples_per_channel_);
   EXPECT_EQ(kSampleRateHz, audio_frame.sample_rate_hz_);
 }
 
diff --git a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc
index 0af6af8..e5371d0 100644
--- a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc
+++ b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc
@@ -314,7 +314,8 @@
   EXPECT_EQ(id_, audio_frame.id_);
   EXPECT_EQ(0u, audio_frame.timestamp_);
   EXPECT_GT(audio_frame.num_channels_, 0);
-  EXPECT_EQ(kSampleRateHz / 100, audio_frame.samples_per_channel_);
+  EXPECT_EQ(static_cast<size_t>(kSampleRateHz / 100),
+            audio_frame.samples_per_channel_);
   EXPECT_EQ(kSampleRateHz, audio_frame.sample_rate_hz_);
 }
 
diff --git a/webrtc/modules/audio_coding/main/acm2/codec_manager.cc b/webrtc/modules/audio_coding/main/acm2/codec_manager.cc
index cad6ee9..7b9c7ed 100644
--- a/webrtc/modules/audio_coding/main/acm2/codec_manager.cc
+++ b/webrtc/modules/audio_coding/main/acm2/codec_manager.cc
@@ -326,10 +326,10 @@
   // Make up a CodecInst.
   send_codec_inst_.channels = external_speech_encoder->NumChannels();
   send_codec_inst_.plfreq = external_speech_encoder->SampleRateHz();
-  send_codec_inst_.pacsize =
-      rtc::CheckedDivExact(external_speech_encoder->Max10MsFramesInAPacket() *
-                               send_codec_inst_.plfreq,
-                           100);
+  send_codec_inst_.pacsize = rtc::CheckedDivExact(
+      static_cast<int>(external_speech_encoder->Max10MsFramesInAPacket() *
+                       send_codec_inst_.plfreq),
+      100);
   send_codec_inst_.pltype = -1;  // Not valid.
   send_codec_inst_.rate = -1;    // Not valid.
   static const char kName[] = "external";
diff --git a/webrtc/modules/audio_coding/main/test/PCMFile.cc b/webrtc/modules/audio_coding/main/test/PCMFile.cc
index 4b08f75..d0ae783 100644
--- a/webrtc/modules/audio_coding/main/test/PCMFile.cc
+++ b/webrtc/modules/audio_coding/main/test/PCMFile.cc
@@ -150,7 +150,7 @@
       }
     } else {
       int16_t* stereo_audio = new int16_t[2 * audio_frame.samples_per_channel_];
-      for (int k = 0; k < audio_frame.samples_per_channel_; k++) {
+      for (size_t k = 0; k < audio_frame.samples_per_channel_; k++) {
         stereo_audio[k << 1] = audio_frame.data_[k];
         stereo_audio[(k << 1) + 1] = audio_frame.data_[k];
       }
@@ -172,7 +172,7 @@
   }
 }
 
-void PCMFile::Write10MsData(int16_t* playout_buffer, uint16_t length_smpls) {
+void PCMFile::Write10MsData(int16_t* playout_buffer, size_t length_smpls) {
   if (fwrite(playout_buffer, sizeof(uint16_t), length_smpls, pcm_file_) !=
       length_smpls) {
     return;
diff --git a/webrtc/modules/audio_coding/main/test/PCMFile.h b/webrtc/modules/audio_coding/main/test/PCMFile.h
index c4487b8..8353898 100644
--- a/webrtc/modules/audio_coding/main/test/PCMFile.h
+++ b/webrtc/modules/audio_coding/main/test/PCMFile.h
@@ -36,7 +36,7 @@
 
   int32_t Read10MsData(AudioFrame& audio_frame);
 
-  void Write10MsData(int16_t *playout_buffer, uint16_t length_smpls);
+  void Write10MsData(int16_t *playout_buffer, size_t length_smpls);
   void Write10MsData(AudioFrame& audio_frame);
 
   uint16_t PayloadLength10Ms() const;
diff --git a/webrtc/modules/audio_coding/main/test/SpatialAudio.cc b/webrtc/modules/audio_coding/main/test/SpatialAudio.cc
index b28c510..134d975 100644
--- a/webrtc/modules/audio_coding/main/test/SpatialAudio.cc
+++ b/webrtc/modules/audio_coding/main/test/SpatialAudio.cc
@@ -159,13 +159,13 @@
 
   while (!_inFile.EndOfFile()) {
     _inFile.Read10MsData(audioFrame);
-    for (int n = 0; n < audioFrame.samples_per_channel_; n++) {
+    for (size_t n = 0; n < audioFrame.samples_per_channel_; n++) {
       audioFrame.data_[n] = (int16_t) floor(
           audioFrame.data_[n] * leftPanning + 0.5);
     }
     CHECK_ERROR(_acmLeft->Add10MsData(audioFrame));
 
-    for (int n = 0; n < audioFrame.samples_per_channel_; n++) {
+    for (size_t n = 0; n < audioFrame.samples_per_channel_; n++) {
       audioFrame.data_[n] = (int16_t) floor(
           audioFrame.data_[n] * rightToLeftRatio + 0.5);
     }
diff --git a/webrtc/modules/audio_coding/main/test/initial_delay_unittest.cc b/webrtc/modules/audio_coding/main/test/initial_delay_unittest.cc
index ffbbc8c..0bac401 100644
--- a/webrtc/modules/audio_coding/main/test/initial_delay_unittest.cc
+++ b/webrtc/modules/audio_coding/main/test/initial_delay_unittest.cc
@@ -32,9 +32,9 @@
 namespace {
 
 double FrameRms(AudioFrame& frame) {
-  int samples = frame.num_channels_ * frame.samples_per_channel_;
+  size_t samples = frame.num_channels_ * frame.samples_per_channel_;
   double rms = 0;
-  for (int n = 0; n < samples; ++n)
+  for (size_t n = 0; n < samples; ++n)
     rms += frame.data_[n] * frame.data_[n];
   rms /= samples;
   rms = sqrt(rms);
@@ -132,9 +132,9 @@
     in_audio_frame.sample_rate_hz_ = codec.plfreq;
     in_audio_frame.num_channels_ = codec.channels;
     in_audio_frame.samples_per_channel_ = codec.plfreq / 100;  // 10 ms.
-    int samples = in_audio_frame.num_channels_ *
+    size_t samples = in_audio_frame.num_channels_ *
         in_audio_frame.samples_per_channel_;
-    for (int n = 0; n < samples; ++n) {
+    for (size_t n = 0; n < samples; ++n) {
       in_audio_frame.data_[n] = kAmp;
     }
 
diff --git a/webrtc/modules/audio_coding/main/test/opus_test.cc b/webrtc/modules/audio_coding/main/test/opus_test.cc
index c61d25a..79124aa 100644
--- a/webrtc/modules/audio_coding/main/test/opus_test.cc
+++ b/webrtc/modules/audio_coding/main/test/opus_test.cc
@@ -270,14 +270,14 @@
 
     if (loop_encode > 0) {
       const int kMaxBytes = 1000;  // Maximum number of bytes for one packet.
-      int16_t bitstream_len_byte;
+      size_t bitstream_len_byte;
       uint8_t bitstream[kMaxBytes];
       for (int i = 0; i < loop_encode; i++) {
         int bitstream_len_byte_int = WebRtcOpus_Encode(
             (channels == 1) ? opus_mono_encoder_ : opus_stereo_encoder_,
             &audio[read_samples], frame_length, kMaxBytes, bitstream);
         ASSERT_GE(bitstream_len_byte_int, 0);
-        bitstream_len_byte = static_cast<int16_t>(bitstream_len_byte_int);
+        bitstream_len_byte = static_cast<size_t>(bitstream_len_byte_int);
 
         // Simulate packet loss by setting |packet_loss_| to "true" in
         // |percent_loss| percent of the loops.
@@ -341,7 +341,8 @@
         audio_frame.samples_per_channel_ * audio_frame.num_channels_);
 
     // Write stand-alone speech to file.
-    out_file_standalone_.Write10MsData(out_audio, decoded_samples * channels);
+    out_file_standalone_.Write10MsData(
+        out_audio, static_cast<size_t>(decoded_samples) * channels);
 
     if (audio_frame.timestamp_ > start_time_stamp) {
       // Number of channels should be the same for both stand-alone and