Update a ton of audio code to use size_t more correctly and in general reduce
use of int16_t/uint16_t.

This is the upshot of a recommendation by henrik.lundin and kwiberg on an original small change ( https://webrtc-codereview.appspot.com/42569004/#ps1 ) to stop using int16_t just because values could fit in it, and is similar in nature to a previous "mass change to use size_t more" ( https://webrtc-codereview.appspot.com/23129004/ ) which also needed to be split up for review but to land all at once, since, like adding "const", such changes tend to cause a lot of transitive effects.

This was be reviewed and approved in pieces:
https://codereview.webrtc.org/1224093003
https://codereview.webrtc.org/1224123002
https://codereview.webrtc.org/1224163002
https://codereview.webrtc.org/1225133003
https://codereview.webrtc.org/1225173002
https://codereview.webrtc.org/1227163003
https://codereview.webrtc.org/1227203003
https://codereview.webrtc.org/1227213002
https://codereview.webrtc.org/1227893002
https://codereview.webrtc.org/1228793004
https://codereview.webrtc.org/1228803003
https://codereview.webrtc.org/1228823002
https://codereview.webrtc.org/1228823003
https://codereview.webrtc.org/1228843002
https://codereview.webrtc.org/1230693002
https://codereview.webrtc.org/1231713002

The change is being landed as TBR to all the folks who reviewed the above.

BUG=chromium:81439
TEST=none
R=andrew@webrtc.org, pbos@webrtc.org
TBR=aluebs, andrew, asapersson, henrika, hlundin, jan.skoglund, kwiberg, minyue, pbos, pthatcher

Review URL: https://codereview.webrtc.org/1230503003 .

Cr-Commit-Position: refs/heads/master@{#9768}
diff --git a/talk/app/webrtc/mediastreamhandler.cc b/talk/app/webrtc/mediastreamhandler.cc
index f68699d..ded603b 100644
--- a/talk/app/webrtc/mediastreamhandler.cc
+++ b/talk/app/webrtc/mediastreamhandler.cc
@@ -68,7 +68,7 @@
                                    int bits_per_sample,
                                    int sample_rate,
                                    int number_of_channels,
-                                   int number_of_frames) {
+                                   size_t number_of_frames) {
   rtc::CritScope lock(&lock_);
   if (sink_) {
     sink_->OnData(audio_data, bits_per_sample, sample_rate,
diff --git a/talk/app/webrtc/mediastreamhandler.h b/talk/app/webrtc/mediastreamhandler.h
index 801648d..1782f26e 100644
--- a/talk/app/webrtc/mediastreamhandler.h
+++ b/talk/app/webrtc/mediastreamhandler.h
@@ -82,7 +82,7 @@
               int bits_per_sample,
               int sample_rate,
               int number_of_channels,
-              int number_of_frames) override;
+              size_t number_of_frames) override;
 
   // cricket::AudioRenderer implementation.
   void SetSink(cricket::AudioRenderer::Sink* sink) override;
diff --git a/talk/app/webrtc/mediastreaminterface.h b/talk/app/webrtc/mediastreaminterface.h
index 4de0648..ceca99b 100644
--- a/talk/app/webrtc/mediastreaminterface.h
+++ b/talk/app/webrtc/mediastreaminterface.h
@@ -187,7 +187,7 @@
                       int bits_per_sample,
                       int sample_rate,
                       int number_of_channels,
-                      int number_of_frames) = 0;
+                      size_t number_of_frames) = 0;
  protected:
   virtual ~AudioTrackSinkInterface() {}
 };
diff --git a/talk/app/webrtc/test/fakeaudiocapturemodule.cc b/talk/app/webrtc/test/fakeaudiocapturemodule.cc
index 321e76b..32f9c84 100644
--- a/talk/app/webrtc/test/fakeaudiocapturemodule.cc
+++ b/talk/app/webrtc/test/fakeaudiocapturemodule.cc
@@ -615,9 +615,9 @@
 
 void FakeAudioCaptureModule::SetSendBuffer(int value) {
   Sample* buffer_ptr = reinterpret_cast<Sample*>(send_buffer_);
-  const int buffer_size_in_samples =
+  const size_t buffer_size_in_samples =
       sizeof(send_buffer_) / kNumberBytesPerSample;
-  for (int i = 0; i < buffer_size_in_samples; ++i) {
+  for (size_t i = 0; i < buffer_size_in_samples; ++i) {
     buffer_ptr[i] = value;
   }
 }
@@ -628,9 +628,9 @@
 
 bool FakeAudioCaptureModule::CheckRecBuffer(int value) {
   const Sample* buffer_ptr = reinterpret_cast<const Sample*>(rec_buffer_);
-  const int buffer_size_in_samples =
+  const size_t buffer_size_in_samples =
       sizeof(rec_buffer_) / kNumberBytesPerSample;
-  for (int i = 0; i < buffer_size_in_samples; ++i) {
+  for (size_t i = 0; i < buffer_size_in_samples; ++i) {
     if (buffer_ptr[i] >= value) return true;
   }
   return false;
@@ -698,7 +698,7 @@
       return;
     }
     ResetRecBuffer();
-    uint32_t nSamplesOut = 0;
+    size_t nSamplesOut = 0;
     int64_t elapsed_time_ms = 0;
     int64_t ntp_time_ms = 0;
     if (audio_callback_->NeedMorePlayData(kNumberSamples, kNumberBytesPerSample,
diff --git a/talk/app/webrtc/test/fakeaudiocapturemodule.h b/talk/app/webrtc/test/fakeaudiocapturemodule.h
index be1df9a..9f36ed8 100644
--- a/talk/app/webrtc/test/fakeaudiocapturemodule.h
+++ b/talk/app/webrtc/test/fakeaudiocapturemodule.h
@@ -57,8 +57,8 @@
 
   // The value for the following constants have been derived by running VoE
   // using a real ADM. The constants correspond to 10ms of mono audio at 44kHz.
-  static const int kNumberSamples = 440;
-  static const int kNumberBytesPerSample = sizeof(Sample);
+  static const size_t kNumberSamples = 440;
+  static const size_t kNumberBytesPerSample = sizeof(Sample);
 
   // Creates a FakeAudioCaptureModule or returns NULL on failure.
   static rtc::scoped_refptr<FakeAudioCaptureModule> Create();
diff --git a/talk/app/webrtc/test/fakeaudiocapturemodule_unittest.cc b/talk/app/webrtc/test/fakeaudiocapturemodule_unittest.cc
index fcfdf7e..e2dc123 100644
--- a/talk/app/webrtc/test/fakeaudiocapturemodule_unittest.cc
+++ b/talk/app/webrtc/test/fakeaudiocapturemodule_unittest.cc
@@ -56,8 +56,8 @@
   // Callbacks inherited from webrtc::AudioTransport.
   // ADM is pushing data.
   int32_t RecordedDataIsAvailable(const void* audioSamples,
-                                  const uint32_t nSamples,
-                                  const uint8_t nBytesPerSample,
+                                  const size_t nSamples,
+                                  const size_t nBytesPerSample,
                                   const uint8_t nChannels,
                                   const uint32_t samplesPerSec,
                                   const uint32_t totalDelayMS,
@@ -80,18 +80,18 @@
   }
 
   // ADM is pulling data.
-  int32_t NeedMorePlayData(const uint32_t nSamples,
-                           const uint8_t nBytesPerSample,
+  int32_t NeedMorePlayData(const size_t nSamples,
+                           const size_t nBytesPerSample,
                            const uint8_t nChannels,
                            const uint32_t samplesPerSec,
                            void* audioSamples,
-                           uint32_t& nSamplesOut,
+                           size_t& nSamplesOut,
                            int64_t* elapsed_time_ms,
                            int64_t* ntp_time_ms) override {
     rtc::CritScope cs(&crit_);
     ++pull_iterations_;
-    const uint32_t audio_buffer_size = nSamples * nBytesPerSample;
-    const uint32_t bytes_out = RecordedDataReceived() ?
+    const size_t audio_buffer_size = nSamples * nBytesPerSample;
+    const size_t bytes_out = RecordedDataReceived() ?
         CopyFromRecBuffer(audioSamples, audio_buffer_size):
         GenerateZeroBuffer(audioSamples, audio_buffer_size);
     nSamplesOut = bytes_out / nBytesPerSample;
@@ -115,13 +115,13 @@
   bool RecordedDataReceived() const {
     return rec_buffer_bytes_ != 0;
   }
-  int32_t GenerateZeroBuffer(void* audio_buffer, uint32_t audio_buffer_size) {
+  size_t GenerateZeroBuffer(void* audio_buffer, size_t audio_buffer_size) {
     memset(audio_buffer, 0, audio_buffer_size);
     return audio_buffer_size;
   }
-  int32_t CopyFromRecBuffer(void* audio_buffer, uint32_t audio_buffer_size) {
+  size_t CopyFromRecBuffer(void* audio_buffer, size_t audio_buffer_size) {
     EXPECT_EQ(audio_buffer_size, rec_buffer_bytes_);
-    const uint32_t min_buffer_size = min(audio_buffer_size, rec_buffer_bytes_);
+    const size_t min_buffer_size = min(audio_buffer_size, rec_buffer_bytes_);
     memcpy(audio_buffer, rec_buffer_, min_buffer_size);
     return min_buffer_size;
   }
@@ -133,7 +133,7 @@
 
   char rec_buffer_[FakeAudioCaptureModule::kNumberSamples *
                    FakeAudioCaptureModule::kNumberBytesPerSample];
-  uint32_t rec_buffer_bytes_;
+  size_t rec_buffer_bytes_;
 };
 
 TEST_F(FakeAdmTest, TestProccess) {
diff --git a/talk/media/base/audiorenderer.h b/talk/media/base/audiorenderer.h
index 1553318..5c03576 100644
--- a/talk/media/base/audiorenderer.h
+++ b/talk/media/base/audiorenderer.h
@@ -28,6 +28,8 @@
 #ifndef TALK_MEDIA_BASE_AUDIORENDERER_H_
 #define TALK_MEDIA_BASE_AUDIORENDERER_H_
 
+#include <cstddef>
+
 namespace cricket {
 
 // Abstract interface for rendering the audio data.
@@ -40,7 +42,7 @@
                         int bits_per_sample,
                         int sample_rate,
                         int number_of_channels,
-                        int number_of_frames) = 0;
+                        size_t number_of_frames) = 0;
 
     // Called when the AudioRenderer is going away.
     virtual void OnClose() = 0;
diff --git a/talk/media/base/fakemediaengine.h b/talk/media/base/fakemediaengine.h
index 086f831..2c579e2 100644
--- a/talk/media/base/fakemediaengine.h
+++ b/talk/media/base/fakemediaengine.h
@@ -438,7 +438,7 @@
                 int bits_per_sample,
                 int sample_rate,
                 int number_of_channels,
-                int number_of_frames) override {}
+                size_t number_of_frames) override {}
     void OnClose() override { renderer_ = NULL; }
     AudioRenderer* renderer() const { return renderer_; }
 
diff --git a/talk/media/webrtc/fakewebrtcvoiceengine.h b/talk/media/webrtc/fakewebrtcvoiceengine.h
index 3ac2f3b..cea9e40 100644
--- a/talk/media/webrtc/fakewebrtcvoiceengine.h
+++ b/talk/media/webrtc/fakewebrtcvoiceengine.h
@@ -132,7 +132,7 @@
   WEBRTC_STUB(ProcessStream, (webrtc::AudioFrame* frame));
   WEBRTC_STUB(ProcessStream, (
       const float* const* src,
-      int samples_per_channel,
+      size_t samples_per_channel,
       int input_sample_rate_hz,
       webrtc::AudioProcessing::ChannelLayout input_layout,
       int output_sample_rate_hz,
@@ -147,7 +147,7 @@
   WEBRTC_STUB(ProcessReverseStream, (webrtc::AudioFrame * frame));
   WEBRTC_STUB(AnalyzeReverseStream, (
       const float* const* data,
-      int samples_per_channel,
+      size_t samples_per_channel,
       int sample_rate_hz,
       webrtc::AudioProcessing::ChannelLayout layout));
   WEBRTC_STUB(ProcessReverseStream,
diff --git a/talk/media/webrtc/webrtcvoiceengine.cc b/talk/media/webrtc/webrtcvoiceengine.cc
index e288e70..80208ba 100644
--- a/talk/media/webrtc/webrtcvoiceengine.cc
+++ b/talk/media/webrtc/webrtcvoiceengine.cc
@@ -1569,7 +1569,7 @@
 void WebRtcVoiceEngine::Process(int channel,
                                 webrtc::ProcessingTypes type,
                                 int16_t audio10ms[],
-                                int length,
+                                size_t length,
                                 int sampling_freq,
                                 bool is_stereo) {
     rtc::CritScope cs(&signal_media_critical_);
@@ -1665,7 +1665,7 @@
               int bits_per_sample,
               int sample_rate,
               int number_of_channels,
-              int number_of_frames) override {
+              size_t number_of_frames) override {
     voe_audio_transport_->OnData(channel_,
                                  audio_data,
                                  bits_per_sample,
diff --git a/talk/media/webrtc/webrtcvoiceengine.h b/talk/media/webrtc/webrtcvoiceengine.h
index c8e7980..21056cd 100644
--- a/talk/media/webrtc/webrtcvoiceengine.h
+++ b/talk/media/webrtc/webrtcvoiceengine.h
@@ -130,7 +130,7 @@
   void Process(int channel,
                webrtc::ProcessingTypes type,
                int16_t audio10ms[],
-               int length,
+               size_t length,
                int sampling_freq,
                bool is_stereo) override;
 
diff --git a/webrtc/common_audio/audio_converter.cc b/webrtc/common_audio/audio_converter.cc
index 7e043b7..624c38d 100644
--- a/webrtc/common_audio/audio_converter.cc
+++ b/webrtc/common_audio/audio_converter.cc
@@ -24,8 +24,8 @@
 
 class CopyConverter : public AudioConverter {
  public:
-  CopyConverter(int src_channels, int src_frames, int dst_channels,
-                int dst_frames)
+  CopyConverter(int src_channels, size_t src_frames, int dst_channels,
+                size_t dst_frames)
       : AudioConverter(src_channels, src_frames, dst_channels, dst_frames) {}
   ~CopyConverter() override {};
 
@@ -41,15 +41,15 @@
 
 class UpmixConverter : public AudioConverter {
  public:
-  UpmixConverter(int src_channels, int src_frames, int dst_channels,
-                 int dst_frames)
+  UpmixConverter(int src_channels, size_t src_frames, int dst_channels,
+                 size_t dst_frames)
       : AudioConverter(src_channels, src_frames, dst_channels, dst_frames) {}
   ~UpmixConverter() override {};
 
   void Convert(const float* const* src, size_t src_size, float* const* dst,
                size_t dst_capacity) override {
     CheckSizes(src_size, dst_capacity);
-    for (int i = 0; i < dst_frames(); ++i) {
+    for (size_t i = 0; i < dst_frames(); ++i) {
       const float value = src[0][i];
       for (int j = 0; j < dst_channels(); ++j)
         dst[j][i] = value;
@@ -59,8 +59,8 @@
 
 class DownmixConverter : public AudioConverter {
  public:
-  DownmixConverter(int src_channels, int src_frames, int dst_channels,
-                   int dst_frames)
+  DownmixConverter(int src_channels, size_t src_frames, int dst_channels,
+                   size_t dst_frames)
       : AudioConverter(src_channels, src_frames, dst_channels, dst_frames) {
   }
   ~DownmixConverter() override {};
@@ -69,7 +69,7 @@
                size_t dst_capacity) override {
     CheckSizes(src_size, dst_capacity);
     float* dst_mono = dst[0];
-    for (int i = 0; i < src_frames(); ++i) {
+    for (size_t i = 0; i < src_frames(); ++i) {
       float sum = 0;
       for (int j = 0; j < src_channels(); ++j)
         sum += src[j][i];
@@ -80,8 +80,8 @@
 
 class ResampleConverter : public AudioConverter {
  public:
-  ResampleConverter(int src_channels, int src_frames, int dst_channels,
-                    int dst_frames)
+  ResampleConverter(int src_channels, size_t src_frames, int dst_channels,
+                    size_t dst_frames)
       : AudioConverter(src_channels, src_frames, dst_channels, dst_frames) {
     resamplers_.reserve(src_channels);
     for (int i = 0; i < src_channels; ++i)
@@ -136,9 +136,9 @@
 };
 
 rtc::scoped_ptr<AudioConverter> AudioConverter::Create(int src_channels,
-                                                       int src_frames,
+                                                       size_t src_frames,
                                                        int dst_channels,
-                                                       int dst_frames) {
+                                                       size_t dst_frames) {
   rtc::scoped_ptr<AudioConverter> sp;
   if (src_channels > dst_channels) {
     if (src_frames != dst_frames) {
@@ -182,8 +182,8 @@
       dst_channels_(0),
       dst_frames_(0) {}
 
-AudioConverter::AudioConverter(int src_channels, int src_frames,
-                               int dst_channels, int dst_frames)
+AudioConverter::AudioConverter(int src_channels, size_t src_frames,
+                               int dst_channels, size_t dst_frames)
     : src_channels_(src_channels),
       src_frames_(src_frames),
       dst_channels_(dst_channels),
@@ -192,8 +192,8 @@
 }
 
 void AudioConverter::CheckSizes(size_t src_size, size_t dst_capacity) const {
-  CHECK_EQ(src_size, checked_cast<size_t>(src_channels() * src_frames()));
-  CHECK_GE(dst_capacity, checked_cast<size_t>(dst_channels() * dst_frames()));
+  CHECK_EQ(src_size, src_channels() * src_frames());
+  CHECK_GE(dst_capacity, dst_channels() * dst_frames());
 }
 
 }  // namespace webrtc
diff --git a/webrtc/common_audio/audio_converter.h b/webrtc/common_audio/audio_converter.h
index 772872f..407b5ff 100644
--- a/webrtc/common_audio/audio_converter.h
+++ b/webrtc/common_audio/audio_converter.h
@@ -27,9 +27,9 @@
   // Returns a new AudioConverter, which will use the supplied format for its
   // lifetime. Caller is responsible for the memory.
   static rtc::scoped_ptr<AudioConverter> Create(int src_channels,
-                                                int src_frames,
+                                                size_t src_frames,
                                                 int dst_channels,
-                                                int dst_frames);
+                                                size_t dst_frames);
   virtual ~AudioConverter() {};
 
   // Convert |src|, containing |src_size| samples, to |dst|, having a sample
@@ -40,23 +40,23 @@
                        float* const* dst, size_t dst_capacity) = 0;
 
   int src_channels() const { return src_channels_; }
-  int src_frames() const { return src_frames_; }
+  size_t src_frames() const { return src_frames_; }
   int dst_channels() const { return dst_channels_; }
-  int dst_frames() const { return dst_frames_; }
+  size_t dst_frames() const { return dst_frames_; }
 
  protected:
   AudioConverter();
-  AudioConverter(int src_channels, int src_frames, int dst_channels,
-                 int dst_frames);
+  AudioConverter(int src_channels, size_t src_frames, int dst_channels,
+                 size_t dst_frames);
 
   // Helper to CHECK that inputs are correctly sized.
   void CheckSizes(size_t src_size, size_t dst_capacity) const;
 
  private:
   const int src_channels_;
-  const int src_frames_;
+  const size_t src_frames_;
   const int dst_channels_;
-  const int dst_frames_;
+  const size_t dst_frames_;
 
   DISALLOW_COPY_AND_ASSIGN(AudioConverter);
 };
diff --git a/webrtc/common_audio/audio_converter_unittest.cc b/webrtc/common_audio/audio_converter_unittest.cc
index 590c8ce..c85b96e 100644
--- a/webrtc/common_audio/audio_converter_unittest.cc
+++ b/webrtc/common_audio/audio_converter_unittest.cc
@@ -13,6 +13,7 @@
 #include <vector>
 
 #include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/format_macros.h"
 #include "webrtc/base/scoped_ptr.h"
 #include "webrtc/common_audio/audio_converter.h"
 #include "webrtc/common_audio/channel_buffer.h"
@@ -43,20 +44,20 @@
 // signals to compensate for the resampling delay.
 float ComputeSNR(const ChannelBuffer<float>& ref,
                  const ChannelBuffer<float>& test,
-                 int expected_delay) {
+                 size_t expected_delay) {
   VerifyParams(ref, test);
   float best_snr = 0;
-  int best_delay = 0;
+  size_t best_delay = 0;
 
   // Search within one sample of the expected delay.
-  for (int delay = std::max(expected_delay, 1) - 1;
+  for (size_t delay = std::max(expected_delay, static_cast<size_t>(1)) - 1;
        delay <= std::min(expected_delay + 1, ref.num_frames());
        ++delay) {
     float mse = 0;
     float variance = 0;
     float mean = 0;
     for (int i = 0; i < ref.num_channels(); ++i) {
-      for (int j = 0; j < ref.num_frames() - delay; ++j) {
+      for (size_t j = 0; j < ref.num_frames() - delay; ++j) {
         float error = ref.channels()[i][j] - test.channels()[i][j + delay];
         mse += error * error;
         variance += ref.channels()[i][j] * ref.channels()[i][j];
@@ -64,7 +65,7 @@
       }
     }
 
-    const int length = ref.num_channels() * (ref.num_frames() - delay);
+    const size_t length = ref.num_channels() * (ref.num_frames() - delay);
     mse /= length;
     variance /= length;
     mean /= length;
@@ -77,7 +78,7 @@
       best_delay = delay;
     }
   }
-  printf("SNR=%.1f dB at delay=%d\n", best_snr, best_delay);
+  printf("SNR=%.1f dB at delay=%" PRIuS "\n", best_snr, best_delay);
   return best_snr;
 }
 
@@ -122,9 +123,10 @@
   ScopedBuffer ref_buffer = CreateBuffer(ref_data, dst_frames);
 
   // The sinc resampler has a known delay, which we compute here.
-  const int delay_frames = src_sample_rate_hz == dst_sample_rate_hz ? 0 :
-      PushSincResampler::AlgorithmicDelaySeconds(src_sample_rate_hz) *
-          dst_sample_rate_hz;
+  const size_t delay_frames = src_sample_rate_hz == dst_sample_rate_hz ? 0 :
+      static_cast<size_t>(
+          PushSincResampler::AlgorithmicDelaySeconds(src_sample_rate_hz) *
+          dst_sample_rate_hz);
   printf("(%d, %d Hz) -> (%d, %d Hz) ",  // SNR reported on the same line later.
       src_channels, src_sample_rate_hz, dst_channels, dst_sample_rate_hz);
 
diff --git a/webrtc/common_audio/audio_ring_buffer_unittest.cc b/webrtc/common_audio/audio_ring_buffer_unittest.cc
index a83c875..a7a6a94 100644
--- a/webrtc/common_audio/audio_ring_buffer_unittest.cc
+++ b/webrtc/common_audio/audio_ring_buffer_unittest.cc
@@ -34,27 +34,27 @@
   while (input_pos + buf.WriteFramesAvailable() < total_frames) {
     // Write until the buffer is as full as possible.
     while (buf.WriteFramesAvailable() >= num_write_chunk_frames) {
-      buf.Write(input.Slice(slice.get(), static_cast<int>(input_pos)),
-                num_channels, num_write_chunk_frames);
+      buf.Write(input.Slice(slice.get(), input_pos), num_channels,
+                num_write_chunk_frames);
       input_pos += num_write_chunk_frames;
     }
     // Read until the buffer is as empty as possible.
     while (buf.ReadFramesAvailable() >= num_read_chunk_frames) {
       EXPECT_LT(output_pos, total_frames);
-      buf.Read(output->Slice(slice.get(), static_cast<int>(output_pos)),
-               num_channels, num_read_chunk_frames);
+      buf.Read(output->Slice(slice.get(), output_pos), num_channels,
+               num_read_chunk_frames);
       output_pos += num_read_chunk_frames;
     }
   }
 
   // Write and read the last bit.
   if (input_pos < total_frames) {
-    buf.Write(input.Slice(slice.get(), static_cast<int>(input_pos)),
-              num_channels, total_frames - input_pos);
+    buf.Write(input.Slice(slice.get(), input_pos), num_channels,
+              total_frames - input_pos);
   }
   if (buf.ReadFramesAvailable()) {
-    buf.Read(output->Slice(slice.get(), static_cast<int>(output_pos)),
-             num_channels, buf.ReadFramesAvailable());
+    buf.Read(output->Slice(slice.get(), output_pos), num_channels,
+             buf.ReadFramesAvailable());
   }
   EXPECT_EQ(0u, buf.ReadFramesAvailable());
 }
diff --git a/webrtc/common_audio/audio_util.cc b/webrtc/common_audio/audio_util.cc
index b330b94..2ce2eba 100644
--- a/webrtc/common_audio/audio_util.cc
+++ b/webrtc/common_audio/audio_util.cc
@@ -41,7 +41,7 @@
 
 template <>
 void DownmixInterleavedToMono<int16_t>(const int16_t* interleaved,
-                                       int num_frames,
+                                       size_t num_frames,
                                        int num_channels,
                                        int16_t* deinterleaved) {
   DownmixInterleavedToMonoImpl<int16_t, int32_t>(interleaved, num_frames,
diff --git a/webrtc/common_audio/audio_util_unittest.cc b/webrtc/common_audio/audio_util_unittest.cc
index 3ac3911..5583778 100644
--- a/webrtc/common_audio/audio_util_unittest.cc
+++ b/webrtc/common_audio/audio_util_unittest.cc
@@ -18,20 +18,20 @@
 
 using ::testing::ElementsAreArray;
 
-void ExpectArraysEq(const int16_t* ref, const int16_t* test, int length) {
-  for (int i = 0; i < length; ++i) {
+void ExpectArraysEq(const int16_t* ref, const int16_t* test, size_t length) {
+  for (size_t i = 0; i < length; ++i) {
     EXPECT_EQ(ref[i], test[i]);
   }
 }
 
-void ExpectArraysEq(const float* ref, const float* test, int length) {
-  for (int i = 0; i < length; ++i) {
+void ExpectArraysEq(const float* ref, const float* test, size_t length) {
+  for (size_t i = 0; i < length; ++i) {
     EXPECT_FLOAT_EQ(ref[i], test[i]);
   }
 }
 
 TEST(AudioUtilTest, FloatToS16) {
-  const int kSize = 9;
+  const size_t kSize = 9;
   const float kInput[kSize] = {0.f,
                                0.4f / 32767.f,
                                0.6f / 32767.f,
@@ -49,7 +49,7 @@
 }
 
 TEST(AudioUtilTest, S16ToFloat) {
-  const int kSize = 7;
+  const size_t kSize = 7;
   const int16_t kInput[kSize] = {0, 1, -1, 16384, -16384, 32767, -32768};
   const float kReference[kSize] = {
       0.f, 1.f / 32767.f, -1.f / 32768.f, 16384.f / 32767.f, -0.5f, 1.f, -1.f};
@@ -59,7 +59,7 @@
 }
 
 TEST(AudioUtilTest, FloatS16ToS16) {
-  const int kSize = 7;
+  const size_t kSize = 7;
   const float kInput[kSize] = {0.f,   0.4f,    0.5f,    -0.4f,
                                -0.5f, 32768.f, -32769.f};
   const int16_t kReference[kSize] = {0, 0, 1, 0, -1, 32767, -32768};
@@ -69,7 +69,7 @@
 }
 
 TEST(AudioUtilTest, FloatToFloatS16) {
-  const int kSize = 9;
+  const size_t kSize = 9;
   const float kInput[kSize] = {0.f,
                                0.4f / 32767.f,
                                0.6f / 32767.f,
@@ -87,7 +87,7 @@
 }
 
 TEST(AudioUtilTest, FloatS16ToFloat) {
-  const int kSize = 9;
+  const size_t kSize = 9;
   const float kInput[kSize] = {0.f,     0.4f,     0.6f,     -0.4f,    -0.6f,
                                32767.f, -32768.f, 36043.7f, -36044.8f};
   const float kReference[kSize] = {0.f,
@@ -106,9 +106,9 @@
 
 TEST(AudioUtilTest, InterleavingStereo) {
   const int16_t kInterleaved[] = {2, 3, 4, 9, 8, 27, 16, 81};
-  const int kSamplesPerChannel = 4;
+  const size_t kSamplesPerChannel = 4;
   const int kNumChannels = 2;
-  const int kLength = kSamplesPerChannel * kNumChannels;
+  const size_t kLength = kSamplesPerChannel * kNumChannels;
   int16_t left[kSamplesPerChannel], right[kSamplesPerChannel];
   int16_t* deinterleaved[] = {left, right};
   Deinterleave(kInterleaved, kSamplesPerChannel, kNumChannels, deinterleaved);
@@ -124,7 +124,7 @@
 
 TEST(AudioUtilTest, InterleavingMonoIsIdentical) {
   const int16_t kInterleaved[] = {1, 2, 3, 4, 5};
-  const int kSamplesPerChannel = 5;
+  const size_t kSamplesPerChannel = 5;
   const int kNumChannels = 1;
   int16_t mono[kSamplesPerChannel];
   int16_t* deinterleaved[] = {mono};
@@ -138,7 +138,7 @@
 
 TEST(AudioUtilTest, DownmixInterleavedToMono) {
   {
-    const int kNumFrames = 4;
+    const size_t kNumFrames = 4;
     const int kNumChannels = 1;
     const int16_t interleaved[kNumChannels * kNumFrames] = {1, 2, -1, -3};
     int16_t deinterleaved[kNumFrames];
@@ -149,7 +149,7 @@
     EXPECT_THAT(deinterleaved, ElementsAreArray(interleaved));
   }
   {
-    const int kNumFrames = 2;
+    const size_t kNumFrames = 2;
     const int kNumChannels = 2;
     const int16_t interleaved[kNumChannels * kNumFrames] = {10, 20, -10, -30};
     int16_t deinterleaved[kNumFrames];
@@ -161,7 +161,7 @@
     EXPECT_THAT(deinterleaved, ElementsAreArray(expected));
   }
   {
-    const int kNumFrames = 3;
+    const size_t kNumFrames = 3;
     const int kNumChannels = 3;
     const int16_t interleaved[kNumChannels * kNumFrames] = {
         30000, 30000, 24001, -5, -10, -20, -30000, -30999, -30000};
@@ -177,7 +177,7 @@
 
 TEST(AudioUtilTest, DownmixToMonoTest) {
   {
-    const int kNumFrames = 4;
+    const size_t kNumFrames = 4;
     const int kNumChannels = 1;
     const float input_data[kNumChannels][kNumFrames] = {{1.f, 2.f, -1.f, -3.f}};
     const float* input[kNumChannels];
@@ -192,7 +192,7 @@
     EXPECT_THAT(downmixed, ElementsAreArray(input_data[0]));
   }
   {
-    const int kNumFrames = 3;
+    const size_t kNumFrames = 3;
     const int kNumChannels = 2;
     const float input_data[kNumChannels][kNumFrames] = {{1.f, 2.f, -1.f},
                                                         {3.f, 0.f, 1.f}};
@@ -209,7 +209,7 @@
     EXPECT_THAT(downmixed, ElementsAreArray(expected));
   }
   {
-    const int kNumFrames = 3;
+    const size_t kNumFrames = 3;
     const int kNumChannels = 3;
     const int16_t input_data[kNumChannels][kNumFrames] = {
         {30000, -5, -30000}, {30000, -10, -30999}, {24001, -20, -30000}};
diff --git a/webrtc/common_audio/blocker.cc b/webrtc/common_audio/blocker.cc
index 6d171ca..359e881 100644
--- a/webrtc/common_audio/blocker.cc
+++ b/webrtc/common_audio/blocker.cc
@@ -18,15 +18,15 @@
 
 // Adds |a| and |b| frame by frame into |result| (basically matrix addition).
 void AddFrames(const float* const* a,
-               int a_start_index,
+               size_t a_start_index,
                const float* const* b,
                int b_start_index,
-               int num_frames,
+               size_t num_frames,
                int num_channels,
                float* const* result,
-               int result_start_index) {
+               size_t result_start_index) {
   for (int i = 0; i < num_channels; ++i) {
-    for (int j = 0; j < num_frames; ++j) {
+    for (size_t j = 0; j < num_frames; ++j) {
       result[i][j + result_start_index] =
           a[i][j + a_start_index] + b[i][j + b_start_index];
     }
@@ -35,11 +35,11 @@
 
 // Copies |src| into |dst| channel by channel.
 void CopyFrames(const float* const* src,
-                int src_start_index,
-                int num_frames,
+                size_t src_start_index,
+                size_t num_frames,
                 int num_channels,
                 float* const* dst,
-                int dst_start_index) {
+                size_t dst_start_index) {
   for (int i = 0; i < num_channels; ++i) {
     memcpy(&dst[i][dst_start_index],
            &src[i][src_start_index],
@@ -49,11 +49,11 @@
 
 // Moves |src| into |dst| channel by channel.
 void MoveFrames(const float* const* src,
-                int src_start_index,
-                int num_frames,
+                size_t src_start_index,
+                size_t num_frames,
                 int num_channels,
                 float* const* dst,
-                int dst_start_index) {
+                size_t dst_start_index) {
   for (int i = 0; i < num_channels; ++i) {
     memmove(&dst[i][dst_start_index],
             &src[i][src_start_index],
@@ -62,8 +62,8 @@
 }
 
 void ZeroOut(float* const* buffer,
-             int starting_idx,
-             int num_frames,
+             size_t starting_idx,
+             size_t num_frames,
              int num_channels) {
   for (int i = 0; i < num_channels; ++i) {
     memset(&buffer[i][starting_idx], 0,
@@ -74,18 +74,18 @@
 // Pointwise multiplies each channel of |frames| with |window|. Results are
 // stored in |frames|.
 void ApplyWindow(const float* window,
-                 int num_frames,
+                 size_t num_frames,
                  int num_channels,
                  float* const* frames) {
   for (int i = 0; i < num_channels; ++i) {
-    for (int j = 0; j < num_frames; ++j) {
+    for (size_t j = 0; j < num_frames; ++j) {
       frames[i][j] = frames[i][j] * window[j];
     }
   }
 }
 
-int gcd(int a, int b) {
-  int tmp;
+size_t gcd(size_t a, size_t b) {
+  size_t tmp;
   while (b) {
      tmp = a;
      a = b;
@@ -98,12 +98,12 @@
 
 namespace webrtc {
 
-Blocker::Blocker(int chunk_size,
-                 int block_size,
+Blocker::Blocker(size_t chunk_size,
+                 size_t block_size,
                  int num_input_channels,
                  int num_output_channels,
                  const float* window,
-                 int shift_amount,
+                 size_t shift_amount,
                  BlockerCallback* callback)
     : chunk_size_(chunk_size),
       block_size_(block_size),
@@ -165,7 +165,7 @@
 //
 // TODO(claguna): Look at using ring buffers to eliminate some copies.
 void Blocker::ProcessChunk(const float* const* input,
-                           int chunk_size,
+                           size_t chunk_size,
                            int num_input_channels,
                            int num_output_channels,
                            float* const* output) {
@@ -174,7 +174,7 @@
   CHECK_EQ(num_output_channels, num_output_channels_);
 
   input_buffer_.Write(input, num_input_channels, chunk_size_);
-  int first_frame_in_block = frame_offset_;
+  size_t first_frame_in_block = frame_offset_;
 
   // Loop through blocks.
   while (first_frame_in_block < chunk_size_) {
diff --git a/webrtc/common_audio/blocker.h b/webrtc/common_audio/blocker.h
index 00f372d..025638a 100644
--- a/webrtc/common_audio/blocker.h
+++ b/webrtc/common_audio/blocker.h
@@ -25,7 +25,7 @@
   virtual ~BlockerCallback() {}
 
   virtual void ProcessBlock(const float* const* input,
-                            int num_frames,
+                            size_t num_frames,
                             int num_input_channels,
                             int num_output_channels,
                             float* const* output) = 0;
@@ -63,34 +63,34 @@
 // copy of window and does not attempt to delete it.
 class Blocker {
  public:
-  Blocker(int chunk_size,
-          int block_size,
+  Blocker(size_t chunk_size,
+          size_t block_size,
           int num_input_channels,
           int num_output_channels,
           const float* window,
-          int shift_amount,
+          size_t shift_amount,
           BlockerCallback* callback);
 
   void ProcessChunk(const float* const* input,
-                    int chunk_size,
+                    size_t chunk_size,
                     int num_input_channels,
                     int num_output_channels,
                     float* const* output);
 
  private:
-  const int chunk_size_;
-  const int block_size_;
+  const size_t chunk_size_;
+  const size_t block_size_;
   const int num_input_channels_;
   const int num_output_channels_;
 
   // The number of frames of delay to add at the beginning of the first chunk.
-  const int initial_delay_;
+  const size_t initial_delay_;
 
   // The frame index into the input buffer where the first block should be read
   // from. This is necessary because shift_amount_ is not necessarily a
   // multiple of chunk_size_, so blocks won't line up at the start of the
   // buffer.
-  int frame_offset_;
+  size_t frame_offset_;
 
   // Since blocks nearly always overlap, there are certain blocks that require
   // frames from the end of one chunk and the beginning of the next chunk. The
@@ -113,7 +113,7 @@
 
   // The amount of frames between the start of contiguous blocks. For example,
   // |shift_amount_| = |block_size_| / 2 for a Hann window.
-  int shift_amount_;
+  size_t shift_amount_;
 
   BlockerCallback* callback_;
 };
diff --git a/webrtc/common_audio/blocker_unittest.cc b/webrtc/common_audio/blocker_unittest.cc
index 9e99886..397e269 100644
--- a/webrtc/common_audio/blocker_unittest.cc
+++ b/webrtc/common_audio/blocker_unittest.cc
@@ -18,12 +18,12 @@
 class PlusThreeBlockerCallback : public webrtc::BlockerCallback {
  public:
   void ProcessBlock(const float* const* input,
-                    int num_frames,
+                    size_t num_frames,
                     int num_input_channels,
                     int num_output_channels,
                     float* const* output) override {
     for (int i = 0; i < num_output_channels; ++i) {
-      for (int j = 0; j < num_frames; ++j) {
+      for (size_t j = 0; j < num_frames; ++j) {
         output[i][j] = input[i][j] + 3;
       }
     }
@@ -34,12 +34,12 @@
 class CopyBlockerCallback : public webrtc::BlockerCallback {
  public:
   void ProcessBlock(const float* const* input,
-                    int num_frames,
+                    size_t num_frames,
                     int num_input_channels,
                     int num_output_channels,
                     float* const* output) override {
     for (int i = 0; i < num_output_channels; ++i) {
-      for (int j = 0; j < num_frames; ++j) {
+      for (size_t j = 0; j < num_frames; ++j) {
         output[i][j] = input[i][j];
       }
     }
diff --git a/webrtc/common_audio/channel_buffer.cc b/webrtc/common_audio/channel_buffer.cc
index 14aaa7a..d3dc7c0 100644
--- a/webrtc/common_audio/channel_buffer.cc
+++ b/webrtc/common_audio/channel_buffer.cc
@@ -12,9 +12,9 @@
 
 namespace webrtc {
 
-IFChannelBuffer::IFChannelBuffer(int num_frames,
+IFChannelBuffer::IFChannelBuffer(size_t num_frames,
                                  int num_channels,
-                                 int num_bands)
+                                 size_t num_bands)
     : ivalid_(true),
       ibuf_(num_frames, num_channels, num_bands),
       fvalid_(true),
@@ -48,7 +48,7 @@
     const int16_t* const* int_channels = ibuf_.channels();
     float* const* float_channels = fbuf_.channels();
     for (int i = 0; i < ibuf_.num_channels(); ++i) {
-      for (int j = 0; j < ibuf_.num_frames(); ++j) {
+      for (size_t j = 0; j < ibuf_.num_frames(); ++j) {
         float_channels[i][j] = int_channels[i][j];
       }
     }
diff --git a/webrtc/common_audio/channel_buffer.h b/webrtc/common_audio/channel_buffer.h
index a5dcc6c..00ea733 100644
--- a/webrtc/common_audio/channel_buffer.h
+++ b/webrtc/common_audio/channel_buffer.h
@@ -39,9 +39,9 @@
 template <typename T>
 class ChannelBuffer {
  public:
-  ChannelBuffer(int num_frames,
+  ChannelBuffer(size_t num_frames,
                 int num_channels,
-                int num_bands = 1)
+                size_t num_bands = 1)
       : data_(new T[num_frames * num_channels]()),
         channels_(new T*[num_channels * num_bands]),
         bands_(new T*[num_channels * num_bands]),
@@ -50,7 +50,7 @@
         num_channels_(num_channels),
         num_bands_(num_bands) {
     for (int i = 0; i < num_channels_; ++i) {
-      for (int j = 0; j < num_bands_; ++j) {
+      for (size_t j = 0; j < num_bands_; ++j) {
         channels_[j * num_channels_ + i] =
             &data_[i * num_frames_ + j * num_frames_per_band_];
         bands_[i * num_bands_ + j] = channels_[j * num_channels_ + i];
@@ -74,12 +74,11 @@
   // 0 <= band < |num_bands_|
   // 0 <= channel < |num_channels_|
   // 0 <= sample < |num_frames_per_band_|
-  const T* const* channels(int band) const {
+  const T* const* channels(size_t band) const {
     DCHECK_LT(band, num_bands_);
-    DCHECK_GE(band, 0);
     return &channels_[band * num_channels_];
   }
-  T* const* channels(int band) {
+  T* const* channels(size_t band) {
     const ChannelBuffer<T>* t = this;
     return const_cast<T* const*>(t->channels(band));
   }
@@ -103,21 +102,21 @@
 
   // Sets the |slice| pointers to the |start_frame| position for each channel.
   // Returns |slice| for convenience.
-  const T* const* Slice(T** slice, int start_frame) const {
+  const T* const* Slice(T** slice, size_t start_frame) const {
     DCHECK_LT(start_frame, num_frames_);
     for (int i = 0; i < num_channels_; ++i)
       slice[i] = &channels_[i][start_frame];
     return slice;
   }
-  T** Slice(T** slice, int start_frame) {
+  T** Slice(T** slice, size_t start_frame) {
     const ChannelBuffer<T>* t = this;
     return const_cast<T**>(t->Slice(slice, start_frame));
   }
 
-  int num_frames() const { return num_frames_; }
-  int num_frames_per_band() const { return num_frames_per_band_; }
+  size_t num_frames() const { return num_frames_; }
+  size_t num_frames_per_band() const { return num_frames_per_band_; }
   int num_channels() const { return num_channels_; }
-  int num_bands() const { return num_bands_; }
+  size_t num_bands() const { return num_bands_; }
   size_t size() const {return num_frames_ * num_channels_; }
 
   void SetDataForTesting(const T* data, size_t size) {
@@ -129,10 +128,10 @@
   rtc::scoped_ptr<T[]> data_;
   rtc::scoped_ptr<T* []> channels_;
   rtc::scoped_ptr<T* []> bands_;
-  const int num_frames_;
-  const int num_frames_per_band_;
+  const size_t num_frames_;
+  const size_t num_frames_per_band_;
   const int num_channels_;
-  const int num_bands_;
+  const size_t num_bands_;
 };
 
 // One int16_t and one float ChannelBuffer that are kept in sync. The sync is
@@ -143,17 +142,17 @@
 // fbuf() until the next call to any of the other functions.
 class IFChannelBuffer {
  public:
-  IFChannelBuffer(int num_frames, int num_channels, int num_bands = 1);
+  IFChannelBuffer(size_t num_frames, int num_channels, size_t num_bands = 1);
 
   ChannelBuffer<int16_t>* ibuf();
   ChannelBuffer<float>* fbuf();
   const ChannelBuffer<int16_t>* ibuf_const() const;
   const ChannelBuffer<float>* fbuf_const() const;
 
-  int num_frames() const { return ibuf_.num_frames(); }
-  int num_frames_per_band() const { return ibuf_.num_frames_per_band(); }
+  size_t num_frames() const { return ibuf_.num_frames(); }
+  size_t num_frames_per_band() const { return ibuf_.num_frames_per_band(); }
   int num_channels() const { return ibuf_.num_channels(); }
-  int num_bands() const { return ibuf_.num_bands(); }
+  size_t num_bands() const { return ibuf_.num_bands(); }
 
  private:
   void RefreshF() const;
diff --git a/webrtc/common_audio/fft4g.c b/webrtc/common_audio/fft4g.c
index 24d45eb..9cf7b9f 100644
--- a/webrtc/common_audio/fft4g.c
+++ b/webrtc/common_audio/fft4g.c
@@ -27,7 +27,7 @@
     dfst: Sine Transform of RDFT (Real Anti-symmetric DFT)
 function prototypes
     void cdft(int, int, float *, int *, float *);
-    void rdft(int, int, float *, int *, float *);
+    void rdft(size_t, int, float *, size_t *, float *);
     void ddct(int, int, float *, int *, float *);
     void ddst(int, int, float *, int *, float *);
     void dfct(int, float *, float *, int *, float *);
@@ -94,7 +94,7 @@
             ip[0] = 0; // first time only
             rdft(n, -1, a, ip, w);
     [parameters]
-        n              :data length (int)
+        n              :data length (size_t)
                         n >= 2, n = power of 2
         a[0...n-1]     :input/output data (float *)
                         <case1>
@@ -107,7 +107,7 @@
                                 a[2*j] = R[j], 0<=j<n/2
                                 a[2*j+1] = I[j], 0<j<n/2
                                 a[1] = R[n/2]
-        ip[0...*]      :work area for bit reversal (int *)
+        ip[0...*]      :work area for bit reversal (size_t *)
                         length of ip >= 2+sqrt(n/2)
                         strictly,
                         length of ip >=
@@ -286,18 +286,20 @@
     w[] and ip[] are compatible with all routines.
 */
 
-static void makewt(int nw, int *ip, float *w);
-static void makect(int nc, int *ip, float *c);
-static void bitrv2(int n, int *ip, float *a);
+#include <stddef.h>
+
+static void makewt(size_t nw, size_t *ip, float *w);
+static void makect(size_t nc, size_t *ip, float *c);
+static void bitrv2(size_t n, size_t *ip, float *a);
 #if 0  // Not used.
 static void bitrv2conj(int n, int *ip, float *a);
 #endif
-static void cftfsub(int n, float *a, float *w);
-static void cftbsub(int n, float *a, float *w);
-static void cft1st(int n, float *a, float *w);
-static void cftmdl(int n, int l, float *a, float *w);
-static void rftfsub(int n, float *a, int nc, float *c);
-static void rftbsub(int n, float *a, int nc, float *c);
+static void cftfsub(size_t n, float *a, float *w);
+static void cftbsub(size_t n, float *a, float *w);
+static void cft1st(size_t n, float *a, float *w);
+static void cftmdl(size_t n, size_t l, float *a, float *w);
+static void rftfsub(size_t n, float *a, size_t nc, float *c);
+static void rftbsub(size_t n, float *a, size_t nc, float *c);
 #if 0  // Not used.
 static void dctsub(int n, float *a, int nc, float *c)
 static void dstsub(int n, float *a, int nc, float *c)
@@ -325,9 +327,9 @@
 #endif
 
 
-void WebRtc_rdft(int n, int isgn, float *a, int *ip, float *w)
+void WebRtc_rdft(size_t n, int isgn, float *a, size_t *ip, float *w)
 {
-    int nw, nc;
+    size_t nw, nc;
     float xi;
 
     nw = ip[0];
@@ -643,9 +645,9 @@
 
 #include <math.h>
 
-static void makewt(int nw, int *ip, float *w)
+static void makewt(size_t nw, size_t *ip, float *w)
 {
-    int j, nwh;
+    size_t j, nwh;
     float delta, x, y;
 
     ip[0] = nw;
@@ -672,9 +674,9 @@
 }
 
 
-static void makect(int nc, int *ip, float *c)
+static void makect(size_t nc, size_t *ip, float *c)
 {
-    int j, nch;
+    size_t j, nch;
     float delta;
 
     ip[1] = nc;
@@ -694,9 +696,9 @@
 /* -------- child routines -------- */
 
 
-static void bitrv2(int n, int *ip, float *a)
+static void bitrv2(size_t n, size_t *ip, float *a)
 {
-    int j, j1, k, k1, l, m, m2;
+    size_t j, j1, k, k1, l, m, m2;
     float xr, xi, yr, yi;
 
     ip[0] = 0;
@@ -903,9 +905,9 @@
 }
 #endif
 
-static void cftfsub(int n, float *a, float *w)
+static void cftfsub(size_t n, float *a, float *w)
 {
-    int j, j1, j2, j3, l;
+    size_t j, j1, j2, j3, l;
     float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i;
 
     l = 2;
@@ -953,9 +955,9 @@
 }
 
 
-static void cftbsub(int n, float *a, float *w)
+static void cftbsub(size_t n, float *a, float *w)
 {
-    int j, j1, j2, j3, l;
+    size_t j, j1, j2, j3, l;
     float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i;
 
     l = 2;
@@ -1003,9 +1005,9 @@
 }
 
 
-static void cft1st(int n, float *a, float *w)
+static void cft1st(size_t n, float *a, float *w)
 {
-    int j, k1, k2;
+    size_t j, k1, k2;
     float wk1r, wk1i, wk2r, wk2i, wk3r, wk3i;
     float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i;
 
@@ -1108,9 +1110,9 @@
 }
 
 
-static void cftmdl(int n, int l, float *a, float *w)
+static void cftmdl(size_t n, size_t l, float *a, float *w)
 {
-    int j, j1, j2, j3, k, k1, k2, m, m2;
+    size_t j, j1, j2, j3, k, k1, k2, m, m2;
     float wk1r, wk1i, wk2r, wk2i, wk3r, wk3i;
     float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i;
 
@@ -1235,9 +1237,9 @@
 }
 
 
-static void rftfsub(int n, float *a, int nc, float *c)
+static void rftfsub(size_t n, float *a, size_t nc, float *c)
 {
-    int j, k, kk, ks, m;
+    size_t j, k, kk, ks, m;
     float wkr, wki, xr, xi, yr, yi;
 
     m = n >> 1;
@@ -1260,9 +1262,9 @@
 }
 
 
-static void rftbsub(int n, float *a, int nc, float *c)
+static void rftbsub(size_t n, float *a, size_t nc, float *c)
 {
-    int j, k, kk, ks, m;
+    size_t j, k, kk, ks, m;
     float wkr, wki, xr, xi, yr, yi;
 
     a[1] = -a[1];
diff --git a/webrtc/common_audio/fft4g.h b/webrtc/common_audio/fft4g.h
index 2fe4299..6dd792f 100644
--- a/webrtc/common_audio/fft4g.h
+++ b/webrtc/common_audio/fft4g.h
@@ -16,7 +16,7 @@
 #endif
 
 // Refer to fft4g.c for documentation.
-void WebRtc_rdft(int n, int isgn, float *a, int *ip, float *w);
+void WebRtc_rdft(size_t n, int isgn, float *a, size_t *ip, float *w);
 
 #if defined(__cplusplus)
 }
diff --git a/webrtc/common_audio/include/audio_util.h b/webrtc/common_audio/include/audio_util.h
index 99771dd..d8e1ce3 100644
--- a/webrtc/common_audio/include/audio_util.h
+++ b/webrtc/common_audio/include/audio_util.h
@@ -86,13 +86,13 @@
 // per buffer).
 template <typename T>
 void Deinterleave(const T* interleaved,
-                  int samples_per_channel,
+                  size_t samples_per_channel,
                   int num_channels,
                   T* const* deinterleaved) {
   for (int i = 0; i < num_channels; ++i) {
     T* channel = deinterleaved[i];
     int interleaved_idx = i;
-    for (int j = 0; j < samples_per_channel; ++j) {
+    for (size_t j = 0; j < samples_per_channel; ++j) {
       channel[j] = interleaved[interleaved_idx];
       interleaved_idx += num_channels;
     }
@@ -104,13 +104,13 @@
 // (|samples_per_channel| * |num_channels|).
 template <typename T>
 void Interleave(const T* const* deinterleaved,
-                int samples_per_channel,
+                size_t samples_per_channel,
                 int num_channels,
                 T* interleaved) {
   for (int i = 0; i < num_channels; ++i) {
     const T* channel = deinterleaved[i];
     int interleaved_idx = i;
-    for (int j = 0; j < samples_per_channel; ++j) {
+    for (size_t j = 0; j < samples_per_channel; ++j) {
       interleaved[interleaved_idx] = channel[j];
       interleaved_idx += num_channels;
     }
@@ -135,10 +135,10 @@
 
 template <typename T, typename Intermediate>
 void DownmixToMono(const T* const* input_channels,
-                   int num_frames,
+                   size_t num_frames,
                    int num_channels,
                    T* out) {
-  for (int i = 0; i < num_frames; ++i) {
+  for (size_t i = 0; i < num_frames; ++i) {
     Intermediate value = input_channels[0][i];
     for (int j = 1; j < num_channels; ++j) {
       value += input_channels[j][i];
@@ -151,11 +151,11 @@
 // all channels.
 template <typename T, typename Intermediate>
 void DownmixInterleavedToMonoImpl(const T* interleaved,
-                                  int num_frames,
+                                  size_t num_frames,
                                   int num_channels,
                                   T* deinterleaved) {
   DCHECK_GT(num_channels, 0);
-  DCHECK_GT(num_frames, 0);
+  DCHECK_GT(num_frames, 0u);
 
   const T* const end = interleaved + num_frames * num_channels;
 
@@ -173,13 +173,13 @@
 
 template <typename T>
 void DownmixInterleavedToMono(const T* interleaved,
-                              int num_frames,
+                              size_t num_frames,
                               int num_channels,
                               T* deinterleaved);
 
 template <>
 void DownmixInterleavedToMono<int16_t>(const int16_t* interleaved,
-                                       int num_frames,
+                                       size_t num_frames,
                                        int num_channels,
                                        int16_t* deinterleaved);
 
diff --git a/webrtc/common_audio/lapped_transform.cc b/webrtc/common_audio/lapped_transform.cc
index 0239498..525450d 100644
--- a/webrtc/common_audio/lapped_transform.cc
+++ b/webrtc/common_audio/lapped_transform.cc
@@ -20,7 +20,7 @@
 namespace webrtc {
 
 void LappedTransform::BlockThunk::ProcessBlock(const float* const* input,
-                                               int num_frames,
+                                               size_t num_frames,
                                                int num_input_channels,
                                                int num_output_channels,
                                                float* const* output) {
@@ -35,7 +35,7 @@
                            parent_->cplx_pre_.Row(i));
   }
 
-  int block_length = RealFourier::ComplexLength(
+  size_t block_length = RealFourier::ComplexLength(
       RealFourier::FftOrder(num_frames));
   CHECK_EQ(parent_->cplx_length_, block_length);
   parent_->block_processor_->ProcessAudioBlock(parent_->cplx_pre_.Array(),
@@ -54,10 +54,10 @@
 
 LappedTransform::LappedTransform(int num_in_channels,
                                  int num_out_channels,
-                                 int chunk_length,
+                                 size_t chunk_length,
                                  const float* window,
-                                 int block_length,
-                                 int shift_amount,
+                                 size_t block_length,
+                                 size_t shift_amount,
                                  Callback* callback)
     : blocker_callback_(this),
       num_in_channels_(num_in_channels),
@@ -84,12 +84,12 @@
                  cplx_length_,
                  RealFourier::kFftBufferAlignment) {
   CHECK(num_in_channels_ > 0 && num_out_channels_ > 0);
-  CHECK_GT(block_length_, 0);
-  CHECK_GT(chunk_length_, 0);
+  CHECK_GT(block_length_, 0u);
+  CHECK_GT(chunk_length_, 0u);
   CHECK(block_processor_);
 
   // block_length_ power of 2?
-  CHECK_EQ(0, block_length_ & (block_length_ - 1));
+  CHECK_EQ(0u, block_length_ & (block_length_ - 1));
 }
 
 void LappedTransform::ProcessChunk(const float* const* in_chunk,
diff --git a/webrtc/common_audio/lapped_transform.h b/webrtc/common_audio/lapped_transform.h
index 270644c..75af186 100644
--- a/webrtc/common_audio/lapped_transform.h
+++ b/webrtc/common_audio/lapped_transform.h
@@ -35,7 +35,7 @@
     virtual ~Callback() {}
 
     virtual void ProcessAudioBlock(const std::complex<float>* const* in_block,
-                                   int num_in_channels, int frames,
+                                   int num_in_channels, size_t frames,
                                    int num_out_channels,
                                    std::complex<float>* const* out_block) = 0;
   };
@@ -46,8 +46,12 @@
   // |block_length| defines the length of a block, in samples.
   // |shift_amount| is in samples. |callback| is the caller-owned audio
   // processing function called for each block of the input chunk.
-  LappedTransform(int num_in_channels, int num_out_channels, int chunk_length,
-                  const float* window, int block_length, int shift_amount,
+  LappedTransform(int num_in_channels,
+                  int num_out_channels,
+                  size_t chunk_length,
+                  const float* window,
+                  size_t block_length,
+                  size_t shift_amount,
                   Callback* callback);
   ~LappedTransform() {}
 
@@ -63,7 +67,7 @@
   // to ProcessChunk via the parameter in_chunk.
   //
   // Returns the same chunk_length passed to the LappedTransform constructor.
-  int chunk_length() const { return chunk_length_; }
+  size_t chunk_length() const { return chunk_length_; }
 
   // Get the number of input channels.
   //
@@ -89,7 +93,7 @@
    public:
     explicit BlockThunk(LappedTransform* parent) : parent_(parent) {}
 
-    virtual void ProcessBlock(const float* const* input, int num_frames,
+    virtual void ProcessBlock(const float* const* input, size_t num_frames,
                               int num_input_channels, int num_output_channels,
                               float* const* output);
 
@@ -100,14 +104,14 @@
   const int num_in_channels_;
   const int num_out_channels_;
 
-  const int block_length_;
-  const int chunk_length_;
+  const size_t block_length_;
+  const size_t chunk_length_;
 
   Callback* const block_processor_;
   Blocker blocker_;
 
   rtc::scoped_ptr<RealFourier> fft_;
-  const int cplx_length_;
+  const size_t cplx_length_;
   AlignedArray<float> real_buf_;
   AlignedArray<std::complex<float> > cplx_pre_;
   AlignedArray<std::complex<float> > cplx_post_;
diff --git a/webrtc/common_audio/lapped_transform_unittest.cc b/webrtc/common_audio/lapped_transform_unittest.cc
index a21398c..49751c0 100644
--- a/webrtc/common_audio/lapped_transform_unittest.cc
+++ b/webrtc/common_audio/lapped_transform_unittest.cc
@@ -26,7 +26,7 @@
 
   virtual void ProcessAudioBlock(const complex<float>* const* in_block,
                                  int in_channels,
-                                 int frames,
+                                 size_t frames,
                                  int out_channels,
                                  complex<float>* const* out_block) {
     CHECK_EQ(in_channels, out_channels);
@@ -50,19 +50,19 @@
 
   virtual void ProcessAudioBlock(const complex<float>* const* in_block,
                                  int in_channels,
-                                 int frames,
+                                 size_t frames,
                                  int out_channels,
                                  complex<float>* const* out_block) {
     CHECK_EQ(in_channels, out_channels);
 
-    int full_length = (frames - 1) * 2;
+    size_t full_length = (frames - 1) * 2;
     ++block_num_;
 
     if (block_num_ > 0) {
       ASSERT_NEAR(in_block[0][0].real(), static_cast<float>(full_length),
                   1e-5f);
       ASSERT_NEAR(in_block[0][0].imag(), 0.0f, 1e-5f);
-      for (int i = 1; i < frames; ++i) {
+      for (size_t i = 1; i < frames; ++i) {
         ASSERT_NEAR(in_block[0][i].real(), 0.0f, 1e-5f);
         ASSERT_NEAR(in_block[0][i].imag(), 0.0f, 1e-5f);
       }
@@ -190,14 +190,14 @@
   // Make sure that chunk_length returns the same value passed to the
   // LappedTransform constructor.
   {
-    const int kExpectedChunkLength = 512;
+    const size_t kExpectedChunkLength = 512;
     const LappedTransform trans(1, 1, kExpectedChunkLength, window,
                                 kBlockLength, kBlockLength, &call);
 
     EXPECT_EQ(kExpectedChunkLength, trans.chunk_length());
   }
   {
-    const int kExpectedChunkLength = 160;
+    const size_t kExpectedChunkLength = 160;
     const LappedTransform trans(1, 1, kExpectedChunkLength, window,
                                 kBlockLength, kBlockLength, &call);
 
diff --git a/webrtc/common_audio/real_fourier.cc b/webrtc/common_audio/real_fourier.cc
index cb707e4..29b704b 100644
--- a/webrtc/common_audio/real_fourier.cc
+++ b/webrtc/common_audio/real_fourier.cc
@@ -29,17 +29,17 @@
 #endif
 }
 
-int RealFourier::FftOrder(int length) {
-  CHECK_GT(length, 0);
+int RealFourier::FftOrder(size_t length) {
+  CHECK_GT(length, 0U);
   return WebRtcSpl_GetSizeInBits(static_cast<uint32_t>(length - 1));
 }
 
-int RealFourier::FftLength(int order) {
+size_t RealFourier::FftLength(int order) {
   CHECK_GE(order, 0);
-  return 1 << order;
+  return static_cast<size_t>(1 << order);
 }
 
-int RealFourier::ComplexLength(int order) {
+size_t RealFourier::ComplexLength(int order) {
   return FftLength(order) / 2 + 1;
 }
 
diff --git a/webrtc/common_audio/real_fourier.h b/webrtc/common_audio/real_fourier.h
index cc49dbf..ef4fec8 100644
--- a/webrtc/common_audio/real_fourier.h
+++ b/webrtc/common_audio/real_fourier.h
@@ -39,14 +39,14 @@
 
   // Helper to compute the smallest FFT order (a power of 2) which will contain
   // the given input length.
-  static int FftOrder(int length);
+  static int FftOrder(size_t length);
 
   // Helper to compute the input length from the FFT order.
-  static int FftLength(int order);
+  static size_t FftLength(int order);
 
   // Helper to compute the exact length, in complex floats, of the transform
   // output (i.e. |2^order / 2 + 1|).
-  static int ComplexLength(int order);
+  static size_t ComplexLength(int order);
 
   // Buffer allocation helpers. The buffers are large enough to hold |count|
   // floats/complexes and suitably aligned for use by the implementation.
diff --git a/webrtc/common_audio/real_fourier_ooura.cc b/webrtc/common_audio/real_fourier_ooura.cc
index 6f76516..1c4004d 100644
--- a/webrtc/common_audio/real_fourier_ooura.cc
+++ b/webrtc/common_audio/real_fourier_ooura.cc
@@ -22,12 +22,12 @@
 
 namespace {
 
-void Conjugate(complex<float>* array, int complex_length) {
+void Conjugate(complex<float>* array, size_t complex_length) {
   std::for_each(array, array + complex_length,
                 [=](complex<float>& v) { v = std::conj(v); });
 }
 
-size_t ComputeWorkIpSize(int fft_length) {
+size_t ComputeWorkIpSize(size_t fft_length) {
   return static_cast<size_t>(2 + std::ceil(std::sqrt(
       static_cast<float>(fft_length))));
 }
@@ -40,7 +40,7 @@
       complex_length_(ComplexLength(order_)),
       // Zero-initializing work_ip_ will cause rdft to initialize these work
       // arrays on the first call.
-      work_ip_(new int[ComputeWorkIpSize(length_)]()),
+      work_ip_(new size_t[ComputeWorkIpSize(length_)]()),
       work_w_(new float[complex_length_]()) {
   CHECK_GE(fft_order, 1);
 }
@@ -66,7 +66,7 @@
     auto dest_complex = reinterpret_cast<complex<float>*>(dest);
     // The real output array is shorter than the input complex array by one
     // complex element.
-    const int dest_complex_length = complex_length_ - 1;
+    const size_t dest_complex_length = complex_length_ - 1;
     std::copy(src, src + dest_complex_length, dest_complex);
     // Restore Ooura's conjugate definition.
     Conjugate(dest_complex, dest_complex_length);
diff --git a/webrtc/common_audio/real_fourier_ooura.h b/webrtc/common_audio/real_fourier_ooura.h
index 67b3ffd..8d094bf 100644
--- a/webrtc/common_audio/real_fourier_ooura.h
+++ b/webrtc/common_audio/real_fourier_ooura.h
@@ -31,11 +31,11 @@
 
  private:
   const int order_;
-  const int length_;
-  const int complex_length_;
+  const size_t length_;
+  const size_t complex_length_;
   // These are work arrays for Ooura. The names are based on the comments in
   // fft4g.c.
-  const rtc::scoped_ptr<int[]> work_ip_;
+  const rtc::scoped_ptr<size_t[]> work_ip_;
   const rtc::scoped_ptr<float[]> work_w_;
 };
 
diff --git a/webrtc/common_audio/real_fourier_unittest.cc b/webrtc/common_audio/real_fourier_unittest.cc
index 526f762..5c85421 100644
--- a/webrtc/common_audio/real_fourier_unittest.cc
+++ b/webrtc/common_audio/real_fourier_unittest.cc
@@ -46,12 +46,12 @@
 }
 
 TEST(RealFourierStaticsTest, ComplexLengthComputation) {
-  EXPECT_EQ(2, RealFourier::ComplexLength(1));
-  EXPECT_EQ(3, RealFourier::ComplexLength(2));
-  EXPECT_EQ(5, RealFourier::ComplexLength(3));
-  EXPECT_EQ(9, RealFourier::ComplexLength(4));
-  EXPECT_EQ(17, RealFourier::ComplexLength(5));
-  EXPECT_EQ(65, RealFourier::ComplexLength(7));
+  EXPECT_EQ(2U, RealFourier::ComplexLength(1));
+  EXPECT_EQ(3U, RealFourier::ComplexLength(2));
+  EXPECT_EQ(5U, RealFourier::ComplexLength(3));
+  EXPECT_EQ(9U, RealFourier::ComplexLength(4));
+  EXPECT_EQ(17U, RealFourier::ComplexLength(5));
+  EXPECT_EQ(65U, RealFourier::ComplexLength(7));
 }
 
 template <typename T>
diff --git a/webrtc/common_audio/resampler/include/push_resampler.h b/webrtc/common_audio/resampler/include/push_resampler.h
index a4e57e4..b5c0003 100644
--- a/webrtc/common_audio/resampler/include/push_resampler.h
+++ b/webrtc/common_audio/resampler/include/push_resampler.h
@@ -33,7 +33,7 @@
 
   // Returns the total number of samples provided in destination (e.g. 32 kHz,
   // 2 channel audio gives 640 samples).
-  int Resample(const T* src, int src_length, T* dst, int dst_capacity);
+  int Resample(const T* src, size_t src_length, T* dst, size_t dst_capacity);
 
  private:
   rtc::scoped_ptr<PushSincResampler> sinc_resampler_;
diff --git a/webrtc/common_audio/resampler/include/resampler.h b/webrtc/common_audio/resampler/include/resampler.h
index 4b63e9c..0d4c1af 100644
--- a/webrtc/common_audio/resampler/include/resampler.h
+++ b/webrtc/common_audio/resampler/include/resampler.h
@@ -16,6 +16,8 @@
 #ifndef WEBRTC_RESAMPLER_RESAMPLER_H_
 #define WEBRTC_RESAMPLER_RESAMPLER_H_
 
+#include <stddef.h>
+
 #include "webrtc/typedefs.h"
 
 namespace webrtc {
@@ -36,8 +38,8 @@
     int ResetIfNeeded(int inFreq, int outFreq, int num_channels);
 
     // Resample samplesIn to samplesOut.
-    int Push(const int16_t* samplesIn, int lengthIn, int16_t* samplesOut,
-             int maxLen, int &outLen);
+    int Push(const int16_t* samplesIn, size_t lengthIn, int16_t* samplesOut,
+             size_t maxLen, size_t &outLen);
 
 private:
     enum ResamplerMode
@@ -73,10 +75,10 @@
     // Storage if needed
     int16_t* in_buffer_;
     int16_t* out_buffer_;
-    int in_buffer_size_;
-    int out_buffer_size_;
-    int in_buffer_size_max_;
-    int out_buffer_size_max_;
+    size_t in_buffer_size_;
+    size_t out_buffer_size_;
+    size_t in_buffer_size_max_;
+    size_t out_buffer_size_max_;
 
     int my_in_frequency_khz_;
     int my_out_frequency_khz_;
diff --git a/webrtc/common_audio/resampler/push_resampler.cc b/webrtc/common_audio/resampler/push_resampler.cc
index 973c8f7..566acde 100644
--- a/webrtc/common_audio/resampler/push_resampler.cc
+++ b/webrtc/common_audio/resampler/push_resampler.cc
@@ -47,8 +47,10 @@
   dst_sample_rate_hz_ = dst_sample_rate_hz;
   num_channels_ = num_channels;
 
-  const int src_size_10ms_mono = src_sample_rate_hz / 100;
-  const int dst_size_10ms_mono = dst_sample_rate_hz / 100;
+  const size_t src_size_10ms_mono =
+      static_cast<size_t>(src_sample_rate_hz / 100);
+  const size_t dst_size_10ms_mono =
+      static_cast<size_t>(dst_sample_rate_hz / 100);
   sinc_resampler_.reset(new PushSincResampler(src_size_10ms_mono,
                                               dst_size_10ms_mono));
   if (num_channels_ == 2) {
@@ -64,10 +66,12 @@
 }
 
 template <typename T>
-int PushResampler<T>::Resample(const T* src, int src_length, T* dst,
-                               int dst_capacity) {
-  const int src_size_10ms = src_sample_rate_hz_ * num_channels_ / 100;
-  const int dst_size_10ms = dst_sample_rate_hz_ * num_channels_ / 100;
+int PushResampler<T>::Resample(const T* src, size_t src_length, T* dst,
+                               size_t dst_capacity) {
+  const size_t src_size_10ms =
+      static_cast<size_t>(src_sample_rate_hz_ * num_channels_ / 100);
+  const size_t dst_size_10ms =
+      static_cast<size_t>(dst_sample_rate_hz_ * num_channels_ / 100);
   if (src_length != src_size_10ms || dst_capacity < dst_size_10ms)
     return -1;
 
@@ -75,15 +79,15 @@
     // The old resampler provides this memcpy facility in the case of matching
     // sample rates, so reproduce it here for the sinc resampler.
     memcpy(dst, src, src_length * sizeof(T));
-    return src_length;
+    return static_cast<int>(src_length);
   }
   if (num_channels_ == 2) {
-    const int src_length_mono = src_length / num_channels_;
-    const int dst_capacity_mono = dst_capacity / num_channels_;
+    const size_t src_length_mono = src_length / num_channels_;
+    const size_t dst_capacity_mono = dst_capacity / num_channels_;
     T* deinterleaved[] = {src_left_.get(), src_right_.get()};
     Deinterleave(src, src_length_mono, num_channels_, deinterleaved);
 
-    int dst_length_mono =
+    size_t dst_length_mono =
         sinc_resampler_->Resample(src_left_.get(), src_length_mono,
                                   dst_left_.get(), dst_capacity_mono);
     sinc_resampler_right_->Resample(src_right_.get(), src_length_mono,
@@ -92,9 +96,10 @@
     deinterleaved[0] = dst_left_.get();
     deinterleaved[1] = dst_right_.get();
     Interleave(deinterleaved, dst_length_mono, num_channels_, dst);
-    return dst_length_mono * num_channels_;
+    return static_cast<int>(dst_length_mono * num_channels_);
   } else {
-    return sinc_resampler_->Resample(src, src_length, dst, dst_capacity);
+    return static_cast<int>(
+        sinc_resampler_->Resample(src, src_length, dst, dst_capacity));
   }
 }
 
diff --git a/webrtc/common_audio/resampler/push_sinc_resampler.cc b/webrtc/common_audio/resampler/push_sinc_resampler.cc
index 7d37202..72ed56b 100644
--- a/webrtc/common_audio/resampler/push_sinc_resampler.cc
+++ b/webrtc/common_audio/resampler/push_sinc_resampler.cc
@@ -17,7 +17,8 @@
 
 namespace webrtc {
 
-PushSincResampler::PushSincResampler(int source_frames, int destination_frames)
+PushSincResampler::PushSincResampler(size_t source_frames,
+                                     size_t destination_frames)
     : resampler_(new SincResampler(source_frames * 1.0 / destination_frames,
                                    source_frames,
                                    this)),
@@ -30,10 +31,10 @@
 PushSincResampler::~PushSincResampler() {
 }
 
-int PushSincResampler::Resample(const int16_t* source,
-                                int source_length,
-                                int16_t* destination,
-                                int destination_capacity) {
+size_t PushSincResampler::Resample(const int16_t* source,
+                                   size_t source_length,
+                                   int16_t* destination,
+                                   size_t destination_capacity) {
   if (!float_buffer_.get())
     float_buffer_.reset(new float[destination_frames_]);
 
@@ -45,10 +46,10 @@
   return destination_frames_;
 }
 
-int PushSincResampler::Resample(const float* source,
-                                int source_length,
-                                float* destination,
-                                int destination_capacity) {
+size_t PushSincResampler::Resample(const float* source,
+                                   size_t source_length,
+                                   float* destination,
+                                   size_t destination_capacity) {
   CHECK_EQ(source_length, resampler_->request_frames());
   CHECK_GE(destination_capacity, destination_frames_);
   // Cache the source pointer. Calling Resample() will immediately trigger
@@ -77,7 +78,7 @@
   return destination_frames_;
 }
 
-void PushSincResampler::Run(int frames, float* destination) {
+void PushSincResampler::Run(size_t frames, float* destination) {
   // Ensure we are only asked for the available samples. This would fail if
   // Run() was triggered more than once per Resample() call.
   CHECK_EQ(source_available_, frames);
@@ -93,7 +94,7 @@
   if (source_ptr_) {
     std::memcpy(destination, source_ptr_, frames * sizeof(*destination));
   } else {
-    for (int i = 0; i < frames; ++i)
+    for (size_t i = 0; i < frames; ++i)
       destination[i] = static_cast<float>(source_ptr_int_[i]);
   }
   source_available_ -= frames;
diff --git a/webrtc/common_audio/resampler/push_sinc_resampler.h b/webrtc/common_audio/resampler/push_sinc_resampler.h
index c48ec71..4307451 100644
--- a/webrtc/common_audio/resampler/push_sinc_resampler.h
+++ b/webrtc/common_audio/resampler/push_sinc_resampler.h
@@ -27,7 +27,7 @@
   // Provide the size of the source and destination blocks in samples. These
   // must correspond to the same time duration (typically 10 ms) as the sample
   // ratio is inferred from them.
-  PushSincResampler(int source_frames, int destination_frames);
+  PushSincResampler(size_t source_frames, size_t destination_frames);
   ~PushSincResampler() override;
 
   // Perform the resampling. |source_frames| must always equal the
@@ -35,12 +35,12 @@
   // at least as large as |destination_frames|. Returns the number of samples
   // provided in destination (for convenience, since this will always be equal
   // to |destination_frames|).
-  int Resample(const int16_t* source, int source_frames,
-               int16_t* destination, int destination_capacity);
-  int Resample(const float* source,
-               int source_frames,
-               float* destination,
-               int destination_capacity);
+  size_t Resample(const int16_t* source, size_t source_frames,
+                  int16_t* destination, size_t destination_capacity);
+  size_t Resample(const float* source,
+                  size_t source_frames,
+                  float* destination,
+                  size_t destination_capacity);
 
   // Delay due to the filter kernel. Essentially, the time after which an input
   // sample will appear in the resampled output.
@@ -50,7 +50,7 @@
 
  protected:
   // Implements SincResamplerCallback.
-  void Run(int frames, float* destination) override;
+  void Run(size_t frames, float* destination) override;
 
  private:
   friend class PushSincResamplerTest;
@@ -60,13 +60,13 @@
   rtc::scoped_ptr<float[]> float_buffer_;
   const float* source_ptr_;
   const int16_t* source_ptr_int_;
-  const int destination_frames_;
+  const size_t destination_frames_;
 
   // True on the first call to Resample(), to prime the SincResampler buffer.
   bool first_pass_;
 
   // Used to assert we are only requested for as much data as is available.
-  int source_available_;
+  size_t source_available_;
 
   DISALLOW_COPY_AND_ASSIGN(PushSincResampler);
 };
diff --git a/webrtc/common_audio/resampler/push_sinc_resampler_unittest.cc b/webrtc/common_audio/resampler/push_sinc_resampler_unittest.cc
index f955a68..a7e9715 100644
--- a/webrtc/common_audio/resampler/push_sinc_resampler_unittest.cc
+++ b/webrtc/common_audio/resampler/push_sinc_resampler_unittest.cc
@@ -58,14 +58,14 @@
 
 class ZeroSource : public SincResamplerCallback {
  public:
-  void Run(int frames, float* destination) {
+  void Run(size_t frames, float* destination) {
     std::memset(destination, 0, sizeof(float) * frames);
   }
 };
 
 void PushSincResamplerTest::ResampleBenchmarkTest(bool int_format) {
-  const int input_samples = input_rate_ / 100;
-  const int output_samples = output_rate_ / 100;
+  const size_t input_samples = static_cast<size_t>(input_rate_ / 100);
+  const size_t output_samples = static_cast<size_t>(output_rate_ / 100);
   const int kResampleIterations = 500000;
 
   // Source for data to be resampled.
@@ -77,7 +77,7 @@
   rtc::scoped_ptr<int16_t[]> destination_int(new int16_t[output_samples]);
 
   resampler_source.Run(input_samples, source.get());
-  for (int i = 0; i < input_samples; ++i) {
+  for (size_t i = 0; i < input_samples; ++i) {
     source_int[i] = static_cast<int16_t>(floor(32767 * source[i] + 0.5));
   }
 
@@ -134,11 +134,13 @@
   // Make comparisons using one second of data.
   static const double kTestDurationSecs = 1;
   // 10 ms blocks.
-  const int kNumBlocks = kTestDurationSecs * 100;
-  const int input_block_size = input_rate_ / 100;
-  const int output_block_size = output_rate_ / 100;
-  const int input_samples = kTestDurationSecs * input_rate_;
-  const int output_samples = kTestDurationSecs * output_rate_;
+  const size_t kNumBlocks = static_cast<size_t>(kTestDurationSecs * 100);
+  const size_t input_block_size = static_cast<size_t>(input_rate_ / 100);
+  const size_t output_block_size = static_cast<size_t>(output_rate_ / 100);
+  const size_t input_samples =
+      static_cast<size_t>(kTestDurationSecs * input_rate_);
+  const size_t output_samples =
+      static_cast<size_t>(kTestDurationSecs * output_rate_);
 
   // Nyquist frequency for the input sampling rate.
   const double input_nyquist_freq = 0.5 * input_rate_;
@@ -163,7 +165,7 @@
   // deal with it in the test by delaying the "pure" source to match. It must be
   // checked before the first call to Resample(), because ChunkSize() will
   // change afterwards.
-  const int output_delay_samples = output_block_size -
+  const size_t output_delay_samples = output_block_size -
       resampler.get_resampler_for_testing()->ChunkSize();
 
   // Generate resampled signal.
@@ -171,7 +173,7 @@
   // rather than in a single pass, to exercise how it will be used in WebRTC.
   resampler_source.Run(input_samples, source.get());
   if (int_format) {
-    for (int i = 0; i < kNumBlocks; ++i) {
+    for (size_t i = 0; i < kNumBlocks; ++i) {
       FloatToS16(&source[i * input_block_size], input_block_size,
                source_int.get());
       EXPECT_EQ(output_block_size,
@@ -183,7 +185,7 @@
                &resampled_destination[i * output_block_size]);
     }
   } else {
-    for (int i = 0; i < kNumBlocks; ++i) {
+    for (size_t i = 0; i < kNumBlocks; ++i) {
       EXPECT_EQ(
           output_block_size,
           resampler.Resample(&source[i * input_block_size],
@@ -211,7 +213,7 @@
   double low_frequency_range = kLowFrequencyNyquistRange * 0.5 * minimum_rate;
   double high_frequency_range = kHighFrequencyNyquistRange * 0.5 * minimum_rate;
 
-  for (int i = 0; i < output_samples; ++i) {
+  for (size_t i = 0; i < output_samples; ++i) {
     double error = fabs(resampled_destination[i] - pure_destination[i]);
 
     if (pure_source.Frequency(i) < low_frequency_range) {
diff --git a/webrtc/common_audio/resampler/resampler.cc b/webrtc/common_audio/resampler/resampler.cc
index bfaeffc..c9e7a1f 100644
--- a/webrtc/common_audio/resampler/resampler.cc
+++ b/webrtc/common_audio/resampler/resampler.cc
@@ -401,8 +401,8 @@
 }
 
 // Synchronous resampling, all output samples are written to samplesOut
-int Resampler::Push(const int16_t * samplesIn, int lengthIn,
-                    int16_t* samplesOut, int maxLen, int &outLen)
+int Resampler::Push(const int16_t * samplesIn, size_t lengthIn,
+                    int16_t* samplesOut, size_t maxLen, size_t &outLen)
 {
     if (num_channels_ == 2)
     {
@@ -413,7 +413,7 @@
         int16_t* out_right =
                 (int16_t*)malloc(maxLen / 2 * sizeof(int16_t));
         int res = 0;
-        for (int i = 0; i < lengthIn; i += 2)
+        for (size_t i = 0; i < lengthIn; i += 2)
         {
             left[i >> 1] = samplesIn[i];
             right[i >> 1] = samplesIn[i + 1];
@@ -422,8 +422,8 @@
         // It's OK to overwrite the local parameter, since it's just a copy
         lengthIn = lengthIn / 2;
 
-        int actualOutLen_left = 0;
-        int actualOutLen_right = 0;
+        size_t actualOutLen_left = 0;
+        size_t actualOutLen_right = 0;
         // Do resampling for right channel
         res |= slave_left_->Push(left, lengthIn, out_left, maxLen / 2, actualOutLen_left);
         res |= slave_right_->Push(right, lengthIn, out_right, maxLen / 2, actualOutLen_right);
@@ -437,7 +437,7 @@
         }
 
         // Reassemble the signal
-        for (int i = 0; i < actualOutLen_left; i++)
+        for (size_t i = 0; i < actualOutLen_left; i++)
         {
             samplesOut[i * 2] = out_left[i];
             samplesOut[i * 2 + 1] = out_right[i];
@@ -486,7 +486,7 @@
             }
             tmp_mem = (int32_t*)malloc(336 * sizeof(int32_t));
 
-            for (int i = 0; i < lengthIn; i += 160)
+            for (size_t i = 0; i < lengthIn; i += 160)
             {
                 WebRtcSpl_Resample16khzTo48khz(samplesIn + i, samplesOut + i * 3,
                                                (WebRtcSpl_State16khzTo48khz *)state1_,
@@ -529,7 +529,7 @@
             WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
             outLen = lengthIn * 2;
 
-            for (int i = 0; i < outLen; i += 160)
+            for (size_t i = 0; i < outLen; i += 160)
             {
                 WebRtcSpl_Resample16khzTo48khz(tmp + i, samplesOut + i * 3,
                                                (WebRtcSpl_State16khzTo48khz *)state2_,
@@ -560,7 +560,7 @@
             WebRtcSpl_UpsampleBy2(samplesOut, outLen, tmp, (int32_t*) state2_);
             outLen = outLen * 2;
             // 4:12
-            for (int i = 0; i < outLen; i += 160) {
+            for (size_t i = 0; i < outLen; i += 160) {
               // WebRtcSpl_Resample16khzTo48khz() takes a block of 160 samples
               // as input and outputs a resampled block of 480 samples. The
               // data is now actually in 32 kHz sampling rate, despite the
@@ -589,7 +589,7 @@
             }
             tmp = static_cast<int16_t*> (malloc(sizeof(int16_t) * lengthIn * 3));
             tmp_mem = (int32_t*)malloc(336 * sizeof(int32_t));
-            for (int i = 0; i < lengthIn; i += 160)
+            for (size_t i = 0; i < lengthIn; i += 160)
             {
                 WebRtcSpl_Resample16khzTo48khz(samplesIn + i, tmp + i * 3,
                                                (WebRtcSpl_State16khzTo48khz *)state1_,
@@ -621,7 +621,7 @@
 
             tmp_mem = (int32_t*)malloc(98 * sizeof(int32_t));
 
-            for (int i = 0; i < lengthIn; i += 80)
+            for (size_t i = 0; i < lengthIn; i += 80)
             {
                 WebRtcSpl_Resample8khzTo22khz(tmp + i, samplesOut + (i * 11) / 4,
                                               (WebRtcSpl_State8khzTo22khz *)state2_,
@@ -645,7 +645,7 @@
             }
             tmp_mem = (int32_t*)malloc(98 * sizeof(int32_t));
 
-            for (int i = 0; i < lengthIn; i += 80)
+            for (size_t i = 0; i < lengthIn; i += 80)
             {
                 WebRtcSpl_Resample8khzTo22khz(samplesIn + i, samplesOut + (i * 11) / 4,
                                               (WebRtcSpl_State8khzTo22khz *)state1_,
@@ -667,7 +667,7 @@
             }
             tmp_mem = (int32_t*)malloc(88 * sizeof(int32_t));
 
-            for (int i = 0; i < lengthIn; i += 160)
+            for (size_t i = 0; i < lengthIn; i += 160)
             {
                 WebRtcSpl_Resample16khzTo22khz(samplesIn + i, samplesOut + (i * 11) / 8,
                                                (WebRtcSpl_State16khzTo22khz *)state1_,
@@ -693,7 +693,7 @@
 
             WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
 
-            for (int i = 0; i < (lengthIn * 2); i += 220)
+            for (size_t i = 0; i < (lengthIn * 2); i += 220)
             {
                 WebRtcSpl_Resample22khzTo16khz(tmp + i, samplesOut + (i / 220) * 160,
                                                (WebRtcSpl_State22khzTo16khz *)state2_,
@@ -725,7 +725,7 @@
             WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, samplesOut, (int32_t*)state1_);
 
             // 22 -> 16 in tmp
-            for (int i = 0; i < (lengthIn * 2); i += 220)
+            for (size_t i = 0; i < (lengthIn * 2); i += 220)
             {
                 WebRtcSpl_Resample22khzTo16khz(samplesOut + i, tmp + (i / 220) * 160,
                                                (WebRtcSpl_State22khzTo16khz *)state2_,
@@ -763,7 +763,7 @@
             }
             tmp_mem = (int32_t*)malloc(496 * sizeof(int32_t));
 
-            for (int i = 0; i < lengthIn; i += 480)
+            for (size_t i = 0; i < lengthIn; i += 480)
             {
                 WebRtcSpl_Resample48khzTo16khz(samplesIn + i, samplesOut + i / 3,
                                                (WebRtcSpl_State48khzTo16khz *)state1_,
@@ -801,7 +801,7 @@
             tmp_mem = (int32_t*)malloc(496 * sizeof(int32_t));
             tmp = (int16_t*)malloc((sizeof(int16_t) * lengthIn) / 3);
 
-            for (int i = 0; i < lengthIn; i += 480)
+            for (size_t i = 0; i < lengthIn; i += 480)
             {
                 WebRtcSpl_Resample48khzTo16khz(samplesIn + i, tmp + i / 3,
                                                (WebRtcSpl_State48khzTo16khz *)state1_,
@@ -827,7 +827,7 @@
             tmp = (int16_t*) malloc((sizeof(int16_t) * lengthIn) / 3);
             tmp_2 = (int16_t*) malloc((sizeof(int16_t) * lengthIn) / 6);
             // 12:4
-            for (int i = 0; i < lengthIn; i += 480) {
+            for (size_t i = 0; i < lengthIn; i += 480) {
               // WebRtcSpl_Resample48khzTo16khz() takes a block of 480 samples
               // as input and outputs a resampled block of 160 samples. The
               // data is now actually in 96 kHz sampling rate, despite the
@@ -867,7 +867,7 @@
                 return -1;
             }
             tmp_mem = (int32_t*)malloc(496 * sizeof(int32_t));
-            for (int i = 0; i < lengthIn; i += 480)
+            for (size_t i = 0; i < lengthIn; i += 480)
             {
                 WebRtcSpl_Resample48khzTo16khz(tmp + i, samplesOut + i / 3,
                                                (WebRtcSpl_State48khzTo16khz *)state2_,
@@ -891,7 +891,7 @@
             tmp_mem = (int32_t*)malloc(126 * sizeof(int32_t));
             tmp = (int16_t*)malloc((lengthIn * 4) / 11 * sizeof(int16_t));
 
-            for (int i = 0; i < lengthIn; i += 220)
+            for (size_t i = 0; i < lengthIn; i += 220)
             {
                 WebRtcSpl_Resample22khzTo8khz(samplesIn + i, tmp + (i * 4) / 11,
                                               (WebRtcSpl_State22khzTo8khz *)state1_,
@@ -919,7 +919,7 @@
             }
             tmp_mem = (int32_t*)malloc(126 * sizeof(int32_t));
 
-            for (int i = 0; i < lengthIn; i += 220)
+            for (size_t i = 0; i < lengthIn; i += 220)
             {
                 WebRtcSpl_Resample22khzTo8khz(samplesIn + i, samplesOut + (i * 4) / 11,
                                               (WebRtcSpl_State22khzTo8khz *)state1_,
@@ -941,7 +941,7 @@
             }
             tmp_mem = (int32_t*)malloc(104 * sizeof(int32_t));
 
-            for (int i = 0; i < lengthIn; i += 220)
+            for (size_t i = 0; i < lengthIn; i += 220)
             {
                 WebRtcSpl_Resample22khzTo16khz(samplesIn + i, samplesOut + (i * 8) / 11,
                                                (WebRtcSpl_State22khzTo16khz *)state1_,
diff --git a/webrtc/common_audio/resampler/resampler_unittest.cc b/webrtc/common_audio/resampler/resampler_unittest.cc
index 40a31bb..c5953d0 100644
--- a/webrtc/common_audio/resampler/resampler_unittest.cc
+++ b/webrtc/common_audio/resampler/resampler_unittest.cc
@@ -98,12 +98,12 @@
       SCOPED_TRACE(ss.str());
 
       if (ValidRates(kRates[i], kRates[j])) {
-        int in_length = kRates[i] / 100;
-        int out_length = 0;
+        size_t in_length = static_cast<size_t>(kRates[i] / 100);
+        size_t out_length = 0;
         EXPECT_EQ(0, rs_.Reset(kRates[i], kRates[j], kChannels));
         EXPECT_EQ(0, rs_.Push(data_in_, in_length, data_out_, kDataSize,
                               out_length));
-        EXPECT_EQ(kRates[j] / 100, out_length);
+        EXPECT_EQ(static_cast<size_t>(kRates[j] / 100), out_length);
       } else {
         EXPECT_EQ(-1, rs_.Reset(kRates[i], kRates[j], kChannels));
       }
@@ -120,13 +120,13 @@
       SCOPED_TRACE(ss.str());
 
       if (ValidRates(kRates[i], kRates[j])) {
-        int in_length = kChannels * kRates[i] / 100;
-        int out_length = 0;
+        size_t in_length = static_cast<size_t>(kChannels * kRates[i] / 100);
+        size_t out_length = 0;
         EXPECT_EQ(0, rs_.Reset(kRates[i], kRates[j],
                                kChannels));
         EXPECT_EQ(0, rs_.Push(data_in_, in_length, data_out_, kDataSize,
                               out_length));
-        EXPECT_EQ(kChannels * kRates[j] / 100, out_length);
+        EXPECT_EQ(static_cast<size_t>(kChannels * kRates[j] / 100), out_length);
       } else {
         EXPECT_EQ(-1, rs_.Reset(kRates[i], kRates[j],
                                 kChannels));
diff --git a/webrtc/common_audio/resampler/sinc_resampler.cc b/webrtc/common_audio/resampler/sinc_resampler.cc
index 81c789d..c4f1488 100644
--- a/webrtc/common_audio/resampler/sinc_resampler.cc
+++ b/webrtc/common_audio/resampler/sinc_resampler.cc
@@ -149,7 +149,7 @@
 #endif
 
 SincResampler::SincResampler(double io_sample_rate_ratio,
-                             int request_frames,
+                             size_t request_frames,
                              SincResamplerCallback* read_cb)
     : io_sample_rate_ratio_(io_sample_rate_ratio),
       read_cb_(read_cb),
@@ -215,14 +215,15 @@
   // Generates a set of windowed sinc() kernels.
   // We generate a range of sub-sample offsets from 0.0 to 1.0.
   const double sinc_scale_factor = SincScaleFactor(io_sample_rate_ratio_);
-  for (int offset_idx = 0; offset_idx <= kKernelOffsetCount; ++offset_idx) {
+  for (size_t offset_idx = 0; offset_idx <= kKernelOffsetCount; ++offset_idx) {
     const float subsample_offset =
         static_cast<float>(offset_idx) / kKernelOffsetCount;
 
-    for (int i = 0; i < kKernelSize; ++i) {
-      const int idx = i + offset_idx * kKernelSize;
-      const float pre_sinc =
-          static_cast<float>(M_PI * (i - kKernelSize / 2 - subsample_offset));
+    for (size_t i = 0; i < kKernelSize; ++i) {
+      const size_t idx = i + offset_idx * kKernelSize;
+      const float pre_sinc = static_cast<float>(M_PI *
+          (static_cast<int>(i) - static_cast<int>(kKernelSize / 2) -
+           subsample_offset));
       kernel_pre_sinc_storage_[idx] = pre_sinc;
 
       // Compute Blackman window, matching the offset of the sinc().
@@ -252,9 +253,9 @@
   // Optimize reinitialization by reusing values which are independent of
   // |sinc_scale_factor|.  Provides a 3x speedup.
   const double sinc_scale_factor = SincScaleFactor(io_sample_rate_ratio_);
-  for (int offset_idx = 0; offset_idx <= kKernelOffsetCount; ++offset_idx) {
-    for (int i = 0; i < kKernelSize; ++i) {
-      const int idx = i + offset_idx * kKernelSize;
+  for (size_t offset_idx = 0; offset_idx <= kKernelOffsetCount; ++offset_idx) {
+    for (size_t i = 0; i < kKernelSize; ++i) {
+      const size_t idx = i + offset_idx * kKernelSize;
       const float window = kernel_window_storage_[idx];
       const float pre_sinc = kernel_pre_sinc_storage_[idx];
 
@@ -266,8 +267,8 @@
   }
 }
 
-void SincResampler::Resample(int frames, float* destination) {
-  int remaining_frames = frames;
+void SincResampler::Resample(size_t frames, float* destination) {
+  size_t remaining_frames = frames;
 
   // Step (1) -- Prime the input buffer at the start of the input stream.
   if (!buffer_primed_ && remaining_frames) {
@@ -343,8 +344,8 @@
 
 #undef CONVOLVE_FUNC
 
-int SincResampler::ChunkSize() const {
-  return static_cast<int>(block_size_ / io_sample_rate_ratio_);
+size_t SincResampler::ChunkSize() const {
+  return static_cast<size_t>(block_size_ / io_sample_rate_ratio_);
 }
 
 void SincResampler::Flush() {
@@ -363,7 +364,7 @@
 
   // Generate a single output sample.  Unrolling this loop hurt performance in
   // local testing.
-  int n = kKernelSize;
+  size_t n = kKernelSize;
   while (n--) {
     sum1 += *input_ptr * *k1++;
     sum2 += *input_ptr++ * *k2++;
diff --git a/webrtc/common_audio/resampler/sinc_resampler.h b/webrtc/common_audio/resampler/sinc_resampler.h
index 062117a..3f2ec33 100644
--- a/webrtc/common_audio/resampler/sinc_resampler.h
+++ b/webrtc/common_audio/resampler/sinc_resampler.h
@@ -28,7 +28,7 @@
 class SincResamplerCallback {
  public:
   virtual ~SincResamplerCallback() {}
-  virtual void Run(int frames, float* destination) = 0;
+  virtual void Run(size_t frames, float* destination) = 0;
 };
 
 // SincResampler is a high-quality single-channel sample-rate converter.
@@ -37,17 +37,18 @@
   // The kernel size can be adjusted for quality (higher is better) at the
   // expense of performance.  Must be a multiple of 32.
   // TODO(dalecurtis): Test performance to see if we can jack this up to 64+.
-  static const int kKernelSize = 32;
+  static const size_t kKernelSize = 32;
 
   // Default request size.  Affects how often and for how much SincResampler
   // calls back for input.  Must be greater than kKernelSize.
-  static const int kDefaultRequestSize = 512;
+  static const size_t kDefaultRequestSize = 512;
 
   // The kernel offset count is used for interpolation and is the number of
   // sub-sample kernel shifts.  Can be adjusted for quality (higher is better)
   // at the expense of allocating more memory.
-  static const int kKernelOffsetCount = 32;
-  static const int kKernelStorageSize = kKernelSize * (kKernelOffsetCount + 1);
+  static const size_t kKernelOffsetCount = 32;
+  static const size_t kKernelStorageSize =
+      kKernelSize * (kKernelOffsetCount + 1);
 
   // Constructs a SincResampler with the specified |read_cb|, which is used to
   // acquire audio data for resampling.  |io_sample_rate_ratio| is the ratio
@@ -56,18 +57,18 @@
   // greater than kKernelSize.  Specify kDefaultRequestSize if there are no
   // request size constraints.
   SincResampler(double io_sample_rate_ratio,
-                int request_frames,
+                size_t request_frames,
                 SincResamplerCallback* read_cb);
   virtual ~SincResampler();
 
   // Resample |frames| of data from |read_cb_| into |destination|.
-  void Resample(int frames, float* destination);
+  void Resample(size_t frames, float* destination);
 
   // The maximum size in frames that guarantees Resample() will only make a
   // single call to |read_cb_| for more data.
-  int ChunkSize() const;
+  size_t ChunkSize() const;
 
-  int request_frames() const { return request_frames_; }
+  size_t request_frames() const { return request_frames_; }
 
   // Flush all buffered data and reset internal indices.  Not thread safe, do
   // not call while Resample() is in progress.
@@ -125,13 +126,13 @@
   SincResamplerCallback* read_cb_;
 
   // The size (in samples) to request from each |read_cb_| execution.
-  const int request_frames_;
+  const size_t request_frames_;
 
   // The number of source frames processed per pass.
-  int block_size_;
+  size_t block_size_;
 
   // The size (in samples) of the internal buffer used by the resampler.
-  const int input_buffer_size_;
+  const size_t input_buffer_size_;
 
   // Contains kKernelOffsetCount kernels back-to-back, each of size kKernelSize.
   // The kernel offsets are sub-sample shifts of a windowed sinc shifted from
diff --git a/webrtc/common_audio/resampler/sinc_resampler_sse.cc b/webrtc/common_audio/resampler/sinc_resampler_sse.cc
index e00e0e5..9e3953f 100644
--- a/webrtc/common_audio/resampler/sinc_resampler_sse.cc
+++ b/webrtc/common_audio/resampler/sinc_resampler_sse.cc
@@ -27,13 +27,13 @@
   // Based on |input_ptr| alignment, we need to use loadu or load.  Unrolling
   // these loops hurt performance in local testing.
   if (reinterpret_cast<uintptr_t>(input_ptr) & 0x0F) {
-    for (int i = 0; i < kKernelSize; i += 4) {
+    for (size_t i = 0; i < kKernelSize; i += 4) {
       m_input = _mm_loadu_ps(input_ptr + i);
       m_sums1 = _mm_add_ps(m_sums1, _mm_mul_ps(m_input, _mm_load_ps(k1 + i)));
       m_sums2 = _mm_add_ps(m_sums2, _mm_mul_ps(m_input, _mm_load_ps(k2 + i)));
     }
   } else {
-    for (int i = 0; i < kKernelSize; i += 4) {
+    for (size_t i = 0; i < kKernelSize; i += 4) {
       m_input = _mm_load_ps(input_ptr + i);
       m_sums1 = _mm_add_ps(m_sums1, _mm_mul_ps(m_input, _mm_load_ps(k1 + i)));
       m_sums2 = _mm_add_ps(m_sums2, _mm_mul_ps(m_input, _mm_load_ps(k2 + i)));
diff --git a/webrtc/common_audio/resampler/sinc_resampler_unittest.cc b/webrtc/common_audio/resampler/sinc_resampler_unittest.cc
index 1aea902..8bdcb25 100644
--- a/webrtc/common_audio/resampler/sinc_resampler_unittest.cc
+++ b/webrtc/common_audio/resampler/sinc_resampler_unittest.cc
@@ -36,7 +36,7 @@
 // Helper class to ensure ChunkedResample() functions properly.
 class MockSource : public SincResamplerCallback {
  public:
-  MOCK_METHOD2(Run, void(int frames, float* destination));
+  MOCK_METHOD2(Run, void(size_t frames, float* destination));
 };
 
 ACTION(ClearBuffer) {
@@ -61,7 +61,7 @@
                           &mock_source);
 
   static const int kChunks = 2;
-  int max_chunk_size = resampler.ChunkSize() * kChunks;
+  size_t max_chunk_size = resampler.ChunkSize() * kChunks;
   rtc::scoped_ptr<float[]> resampled_destination(new float[max_chunk_size]);
 
   // Verify requesting ChunkSize() frames causes a single callback.
@@ -96,7 +96,7 @@
   EXPECT_CALL(mock_source, Run(_, _))
       .Times(1).WillOnce(ClearBuffer());
   resampler.Resample(resampler.ChunkSize() / 2, resampled_destination.get());
-  for (int i = 0; i < resampler.ChunkSize() / 2; ++i)
+  for (size_t i = 0; i < resampler.ChunkSize() / 2; ++i)
     ASSERT_FLOAT_EQ(resampled_destination[i], 0);
 }
 
@@ -251,8 +251,10 @@
 TEST_P(SincResamplerTest, Resample) {
   // Make comparisons using one second of data.
   static const double kTestDurationSecs = 1;
-  const int input_samples = kTestDurationSecs * input_rate_;
-  const int output_samples = kTestDurationSecs * output_rate_;
+  const size_t input_samples =
+      static_cast<size_t>(kTestDurationSecs * input_rate_);
+  const size_t output_samples =
+      static_cast<size_t>(kTestDurationSecs * output_rate_);
 
   // Nyquist frequency for the input sampling rate.
   const double input_nyquist_freq = 0.5 * input_rate_;
@@ -302,7 +304,7 @@
   int minimum_rate = std::min(input_rate_, output_rate_);
   double low_frequency_range = kLowFrequencyNyquistRange * 0.5 * minimum_rate;
   double high_frequency_range = kHighFrequencyNyquistRange * 0.5 * minimum_rate;
-  for (int i = 0; i < output_samples; ++i) {
+  for (size_t i = 0; i < output_samples; ++i) {
     double error = fabs(resampled_destination[i] - pure_destination[i]);
 
     if (pure_source.Frequency(i) < low_frequency_range) {
diff --git a/webrtc/common_audio/resampler/sinusoidal_linear_chirp_source.cc b/webrtc/common_audio/resampler/sinusoidal_linear_chirp_source.cc
index 597820b..5d21568 100644
--- a/webrtc/common_audio/resampler/sinusoidal_linear_chirp_source.cc
+++ b/webrtc/common_audio/resampler/sinusoidal_linear_chirp_source.cc
@@ -18,7 +18,9 @@
 namespace webrtc {
 
 SinusoidalLinearChirpSource::SinusoidalLinearChirpSource(int sample_rate,
-  int samples, double max_frequency, double delay_samples)
+                                                         size_t samples,
+                                                         double max_frequency,
+                                                         double delay_samples)
     : sample_rate_(sample_rate),
       total_samples_(samples),
       max_frequency_(max_frequency),
@@ -29,8 +31,8 @@
   k_ = (max_frequency_ - kMinFrequency) / duration;
 }
 
-void SinusoidalLinearChirpSource::Run(int frames, float* destination) {
-  for (int i = 0; i < frames; ++i, ++current_index_) {
+void SinusoidalLinearChirpSource::Run(size_t frames, float* destination) {
+  for (size_t i = 0; i < frames; ++i, ++current_index_) {
     // Filter out frequencies higher than Nyquist.
     if (Frequency(current_index_) > 0.5 * sample_rate_) {
       destination[i] = 0;
@@ -48,7 +50,7 @@
   }
 }
 
-double SinusoidalLinearChirpSource::Frequency(int position) {
+double SinusoidalLinearChirpSource::Frequency(size_t position) {
   return kMinFrequency + (position - delay_samples_) *
       (max_frequency_ - kMinFrequency) / total_samples_;
 }
diff --git a/webrtc/common_audio/resampler/sinusoidal_linear_chirp_source.h b/webrtc/common_audio/resampler/sinusoidal_linear_chirp_source.h
index 1bddfc9..f128180 100644
--- a/webrtc/common_audio/resampler/sinusoidal_linear_chirp_source.h
+++ b/webrtc/common_audio/resampler/sinusoidal_linear_chirp_source.h
@@ -26,14 +26,14 @@
  public:
   // |delay_samples| can be used to insert a fractional sample delay into the
   // source.  It will produce zeros until non-negative time is reached.
-  SinusoidalLinearChirpSource(int sample_rate, int samples,
+  SinusoidalLinearChirpSource(int sample_rate, size_t samples,
                               double max_frequency, double delay_samples);
 
   virtual ~SinusoidalLinearChirpSource() {}
 
-  void Run(int frames, float* destination) override;
+  void Run(size_t frames, float* destination) override;
 
-  double Frequency(int position);
+  double Frequency(size_t position);
 
  private:
   enum {
@@ -41,10 +41,10 @@
   };
 
   int sample_rate_;
-  int total_samples_;
+  size_t total_samples_;
   double max_frequency_;
   double k_;
-  int current_index_;
+  size_t current_index_;
   double delay_samples_;
 
   DISALLOW_COPY_AND_ASSIGN(SinusoidalLinearChirpSource);
diff --git a/webrtc/common_audio/signal_processing/auto_correlation.c b/webrtc/common_audio/signal_processing/auto_correlation.c
index 405a08e..8d6c879 100644
--- a/webrtc/common_audio/signal_processing/auto_correlation.c
+++ b/webrtc/common_audio/signal_processing/auto_correlation.c
@@ -11,20 +11,18 @@
 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
 
 int WebRtcSpl_AutoCorrelation(const int16_t* in_vector,
-                              int in_vector_length,
-                              int order,
+                              size_t in_vector_length,
+                              size_t order,
                               int32_t* result,
                               int* scale) {
   int32_t sum = 0;
-  int i = 0, j = 0;
+  size_t i = 0, j = 0;
   int16_t smax = 0;
   int scaling = 0;
 
   if (order > in_vector_length) {
     /* Undefined */
     return -1;
-  } else if (order < 0) {
-    order = in_vector_length;
   }
 
   // Find the maximum absolute value of the samples.
@@ -64,5 +62,5 @@
   }
 
   *scale = scaling;
-  return order + 1;
+  return (int)(order + 1);
 }
diff --git a/webrtc/common_audio/signal_processing/complex_fft.c b/webrtc/common_audio/signal_processing/complex_fft.c
index f21b7d87..97ebacc 100644
--- a/webrtc/common_audio/signal_processing/complex_fft.c
+++ b/webrtc/common_audio/signal_processing/complex_fft.c
@@ -157,7 +157,8 @@
 
 int WebRtcSpl_ComplexIFFT(int16_t frfi[], int stages, int mode)
 {
-    int i, j, l, k, istep, n, m, scale, shift;
+    size_t i, j, l, istep, n, m;
+    int k, scale, shift;
     int16_t wr, wi;
     int32_t tr32, ti32, qr32, qi32;
     int32_t tmp32, round2;
diff --git a/webrtc/common_audio/signal_processing/copy_set_operations.c b/webrtc/common_audio/signal_processing/copy_set_operations.c
index 84d3bc4..9d7cf47 100644
--- a/webrtc/common_audio/signal_processing/copy_set_operations.c
+++ b/webrtc/common_audio/signal_processing/copy_set_operations.c
@@ -26,9 +26,9 @@
 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
 
 
-void WebRtcSpl_MemSetW16(int16_t *ptr, int16_t set_value, int length)
+void WebRtcSpl_MemSetW16(int16_t *ptr, int16_t set_value, size_t length)
 {
-    int j;
+    size_t j;
     int16_t *arrptr = ptr;
 
     for (j = length; j > 0; j--)
@@ -37,9 +37,9 @@
     }
 }
 
-void WebRtcSpl_MemSetW32(int32_t *ptr, int32_t set_value, int length)
+void WebRtcSpl_MemSetW32(int32_t *ptr, int32_t set_value, size_t length)
 {
-    int j;
+    size_t j;
     int32_t *arrptr = ptr;
 
     for (j = length; j > 0; j--)
@@ -48,9 +48,11 @@
     }
 }
 
-void WebRtcSpl_MemCpyReversedOrder(int16_t* dest, int16_t* source, int length)
+void WebRtcSpl_MemCpyReversedOrder(int16_t* dest,
+                                   int16_t* source,
+                                   size_t length)
 {
-    int j;
+    size_t j;
     int16_t* destPtr = dest;
     int16_t* sourcePtr = source;
 
@@ -61,20 +63,20 @@
 }
 
 void WebRtcSpl_CopyFromEndW16(const int16_t *vector_in,
-                              int length,
-                              int samples,
+                              size_t length,
+                              size_t samples,
                               int16_t *vector_out)
 {
     // Copy the last <samples> of the input vector to vector_out
     WEBRTC_SPL_MEMCPY_W16(vector_out, &vector_in[length - samples], samples);
 }
 
-void WebRtcSpl_ZerosArrayW16(int16_t *vector, int length)
+void WebRtcSpl_ZerosArrayW16(int16_t *vector, size_t length)
 {
     WebRtcSpl_MemSetW16(vector, 0, length);
 }
 
-void WebRtcSpl_ZerosArrayW32(int32_t *vector, int length)
+void WebRtcSpl_ZerosArrayW32(int32_t *vector, size_t length)
 {
     WebRtcSpl_MemSetW32(vector, 0, length);
 }
diff --git a/webrtc/common_audio/signal_processing/cross_correlation.c b/webrtc/common_audio/signal_processing/cross_correlation.c
index ba34438..d7c9f2b 100644
--- a/webrtc/common_audio/signal_processing/cross_correlation.c
+++ b/webrtc/common_audio/signal_processing/cross_correlation.c
@@ -14,11 +14,11 @@
 void WebRtcSpl_CrossCorrelationC(int32_t* cross_correlation,
                                  const int16_t* seq1,
                                  const int16_t* seq2,
-                                 int16_t dim_seq,
-                                 int16_t dim_cross_correlation,
+                                 size_t dim_seq,
+                                 size_t dim_cross_correlation,
                                  int right_shifts,
                                  int step_seq2) {
-  int i = 0, j = 0;
+  size_t i = 0, j = 0;
 
   for (i = 0; i < dim_cross_correlation; i++) {
     int32_t corr = 0;
diff --git a/webrtc/common_audio/signal_processing/cross_correlation_mips.c b/webrtc/common_audio/signal_processing/cross_correlation_mips.c
index ac3df6d..b236402 100644
--- a/webrtc/common_audio/signal_processing/cross_correlation_mips.c
+++ b/webrtc/common_audio/signal_processing/cross_correlation_mips.c
@@ -13,8 +13,8 @@
 void WebRtcSpl_CrossCorrelation_mips(int32_t* cross_correlation,
                                      const int16_t* seq1,
                                      const int16_t* seq2,
-                                     int16_t dim_seq,
-                                     int16_t dim_cross_correlation,
+                                     size_t dim_seq,
+                                     size_t dim_cross_correlation,
                                      int right_shifts,
                                      int step_seq2) {
 
diff --git a/webrtc/common_audio/signal_processing/cross_correlation_neon.c b/webrtc/common_audio/signal_processing/cross_correlation_neon.c
index 9bf16cf..918b671 100644
--- a/webrtc/common_audio/signal_processing/cross_correlation_neon.c
+++ b/webrtc/common_audio/signal_processing/cross_correlation_neon.c
@@ -15,19 +15,14 @@
 static inline void DotProductWithScaleNeon(int32_t* cross_correlation,
                                            const int16_t* vector1,
                                            const int16_t* vector2,
-                                           int length,
+                                           size_t length,
                                            int scaling) {
-  int i = 0;
-  int len1 = length >> 3;
-  int len2 = length & 7;
+  size_t i = 0;
+  size_t len1 = length >> 3;
+  size_t len2 = length & 7;
   int64x2_t sum0 = vdupq_n_s64(0);
   int64x2_t sum1 = vdupq_n_s64(0);
 
-  if (length < 0) {
-    *cross_correlation = 0;
-    return;
-  }
-
   for (i = len1; i > 0; i -= 1) {
     int16x8_t seq1_16x8 = vld1q_s16(vector1);
     int16x8_t seq2_16x8 = vld1q_s16(vector2);
@@ -72,11 +67,11 @@
 void WebRtcSpl_CrossCorrelationNeon(int32_t* cross_correlation,
                                     const int16_t* seq1,
                                     const int16_t* seq2,
-                                    int16_t dim_seq,
-                                    int16_t dim_cross_correlation,
+                                    size_t dim_seq,
+                                    size_t dim_cross_correlation,
                                     int right_shifts,
                                     int step_seq2) {
-  int i = 0;
+  size_t i = 0;
 
   for (i = 0; i < dim_cross_correlation; i++) {
     const int16_t* seq1_ptr = seq1;
diff --git a/webrtc/common_audio/signal_processing/dot_product_with_scale.c b/webrtc/common_audio/signal_processing/dot_product_with_scale.c
index c01ec57..1302d62 100644
--- a/webrtc/common_audio/signal_processing/dot_product_with_scale.c
+++ b/webrtc/common_audio/signal_processing/dot_product_with_scale.c
@@ -12,10 +12,10 @@
 
 int32_t WebRtcSpl_DotProductWithScale(const int16_t* vector1,
                                       const int16_t* vector2,
-                                      int length,
+                                      size_t length,
                                       int scaling) {
   int32_t sum = 0;
-  int i = 0;
+  size_t i = 0;
 
   /* Unroll the loop to improve performance. */
   for (i = 0; i + 3 < length; i += 4) {
diff --git a/webrtc/common_audio/signal_processing/downsample_fast.c b/webrtc/common_audio/signal_processing/downsample_fast.c
index 179c36a..726a888 100644
--- a/webrtc/common_audio/signal_processing/downsample_fast.c
+++ b/webrtc/common_audio/signal_processing/downsample_fast.c
@@ -13,20 +13,20 @@
 // TODO(Bjornv): Change the function parameter order to WebRTC code style.
 // C version of WebRtcSpl_DownsampleFast() for generic platforms.
 int WebRtcSpl_DownsampleFastC(const int16_t* data_in,
-                              int data_in_length,
+                              size_t data_in_length,
                               int16_t* data_out,
-                              int data_out_length,
+                              size_t data_out_length,
                               const int16_t* __restrict coefficients,
-                              int coefficients_length,
+                              size_t coefficients_length,
                               int factor,
-                              int delay) {
-  int i = 0;
-  int j = 0;
+                              size_t delay) {
+  size_t i = 0;
+  size_t j = 0;
   int32_t out_s32 = 0;
-  int endpos = delay + factor * (data_out_length - 1) + 1;
+  size_t endpos = delay + factor * (data_out_length - 1) + 1;
 
   // Return error if any of the running conditions doesn't meet.
-  if (data_out_length <= 0 || coefficients_length <= 0
+  if (data_out_length == 0 || coefficients_length == 0
                            || data_in_length < endpos) {
     return -1;
   }
diff --git a/webrtc/common_audio/signal_processing/downsample_fast_mips.c b/webrtc/common_audio/signal_processing/downsample_fast_mips.c
index dbde43d..ac39401 100644
--- a/webrtc/common_audio/signal_processing/downsample_fast_mips.c
+++ b/webrtc/common_audio/signal_processing/downsample_fast_mips.c
@@ -12,18 +12,18 @@
 
 // Version of WebRtcSpl_DownsampleFast() for MIPS platforms.
 int WebRtcSpl_DownsampleFast_mips(const int16_t* data_in,
-                                  int data_in_length,
+                                  size_t data_in_length,
                                   int16_t* data_out,
-                                  int data_out_length,
+                                  size_t data_out_length,
                                   const int16_t* __restrict coefficients,
-                                  int coefficients_length,
+                                  size_t coefficients_length,
                                   int factor,
-                                  int delay) {
+                                  size_t delay) {
   int i;
   int j;
   int k;
   int32_t out_s32 = 0;
-  int endpos = delay + factor * (data_out_length - 1) + 1;
+  size_t endpos = delay + factor * (data_out_length - 1) + 1;
 
   int32_t  tmp1, tmp2, tmp3, tmp4, factor_2;
   int16_t* p_coefficients;
@@ -36,7 +36,7 @@
 #endif  // #if !defined(MIPS_DSP_R1_LE)
 
   // Return error if any of the running conditions doesn't meet.
-  if (data_out_length <= 0 || coefficients_length <= 0
+  if (data_out_length == 0 || coefficients_length == 0
                            || data_in_length < endpos) {
     return -1;
   }
diff --git a/webrtc/common_audio/signal_processing/downsample_fast_neon.c b/webrtc/common_audio/signal_processing/downsample_fast_neon.c
index f775e69..58732da 100644
--- a/webrtc/common_audio/signal_processing/downsample_fast_neon.c
+++ b/webrtc/common_audio/signal_processing/downsample_fast_neon.c
@@ -15,22 +15,22 @@
 // NEON intrinsics version of WebRtcSpl_DownsampleFast()
 // for ARM 32-bit/64-bit platforms.
 int WebRtcSpl_DownsampleFastNeon(const int16_t* data_in,
-                                 int data_in_length,
+                                 size_t data_in_length,
                                  int16_t* data_out,
-                                 int data_out_length,
+                                 size_t data_out_length,
                                  const int16_t* __restrict coefficients,
-                                 int coefficients_length,
+                                 size_t coefficients_length,
                                  int factor,
-                                 int delay) {
-  int i = 0;
-  int j = 0;
+                                 size_t delay) {
+  size_t i = 0;
+  size_t j = 0;
   int32_t out_s32 = 0;
-  int endpos = delay + factor * (data_out_length - 1) + 1;
-  int res = data_out_length & 0x7;
-  int endpos1 = endpos - factor * res;
+  size_t endpos = delay + factor * (data_out_length - 1) + 1;
+  size_t res = data_out_length & 0x7;
+  size_t endpos1 = endpos - factor * res;
 
   // Return error if any of the running conditions doesn't meet.
-  if (data_out_length <= 0 || coefficients_length <= 0
+  if (data_out_length == 0 || coefficients_length == 0
                            || data_in_length < endpos) {
     return -1;
   }
diff --git a/webrtc/common_audio/signal_processing/energy.c b/webrtc/common_audio/signal_processing/energy.c
index 1d1ed62..e83f1a6 100644
--- a/webrtc/common_audio/signal_processing/energy.c
+++ b/webrtc/common_audio/signal_processing/energy.c
@@ -17,13 +17,15 @@
 
 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
 
-int32_t WebRtcSpl_Energy(int16_t* vector, int vector_length, int* scale_factor)
+int32_t WebRtcSpl_Energy(int16_t* vector,
+                         size_t vector_length,
+                         int* scale_factor)
 {
     int32_t en = 0;
-    int i;
+    size_t i;
     int scaling =
         WebRtcSpl_GetScalingSquare(vector, vector_length, vector_length);
-    int looptimes = vector_length;
+    size_t looptimes = vector_length;
     int16_t *vectorptr = vector;
 
     for (i = 0; i < looptimes; i++)
diff --git a/webrtc/common_audio/signal_processing/filter_ar.c b/webrtc/common_audio/signal_processing/filter_ar.c
index 2a1805c..dfbc4c2 100644
--- a/webrtc/common_audio/signal_processing/filter_ar.c
+++ b/webrtc/common_audio/signal_processing/filter_ar.c
@@ -17,21 +17,21 @@
 
 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
 
-int WebRtcSpl_FilterAR(const int16_t* a,
-                       int a_length,
-                       const int16_t* x,
-                       int x_length,
-                       int16_t* state,
-                       int state_length,
-                       int16_t* state_low,
-                       int state_low_length,
-                       int16_t* filtered,
-                       int16_t* filtered_low,
-                       int filtered_low_length)
+size_t WebRtcSpl_FilterAR(const int16_t* a,
+                          size_t a_length,
+                          const int16_t* x,
+                          size_t x_length,
+                          int16_t* state,
+                          size_t state_length,
+                          int16_t* state_low,
+                          size_t state_low_length,
+                          int16_t* filtered,
+                          int16_t* filtered_low,
+                          size_t filtered_low_length)
 {
     int32_t o;
     int32_t oLOW;
-    int i, j, stop;
+    size_t i, j, stop;
     const int16_t* x_ptr = &x[0];
     int16_t* filteredFINAL_ptr = filtered;
     int16_t* filteredFINAL_LOW_ptr = filtered_low;
diff --git a/webrtc/common_audio/signal_processing/filter_ar_fast_q12.c b/webrtc/common_audio/signal_processing/filter_ar_fast_q12.c
index cfd82ca..70001a0 100644
--- a/webrtc/common_audio/signal_processing/filter_ar_fast_q12.c
+++ b/webrtc/common_audio/signal_processing/filter_ar_fast_q12.c
@@ -16,10 +16,10 @@
 void WebRtcSpl_FilterARFastQ12(const int16_t* data_in,
                                int16_t* data_out,
                                const int16_t* __restrict coefficients,
-                               int coefficients_length,
-                               int data_length) {
-  int i = 0;
-  int j = 0;
+                               size_t coefficients_length,
+                               size_t data_length) {
+  size_t i = 0;
+  size_t j = 0;
 
   assert(data_length > 0);
   assert(coefficients_length > 1);
diff --git a/webrtc/common_audio/signal_processing/filter_ar_fast_q12_armv7.S b/webrtc/common_audio/signal_processing/filter_ar_fast_q12_armv7.S
index b72396e..76c8eee 100644
--- a/webrtc/common_audio/signal_processing/filter_ar_fast_q12_armv7.S
+++ b/webrtc/common_audio/signal_processing/filter_ar_fast_q12_armv7.S
@@ -155,10 +155,10 @@
 @void WebRtcSpl_FilterARFastQ12(int16_t* data_in,
 @                               int16_t* data_out,
 @                               int16_t* __restrict coefficients,
-@                               int coefficients_length,
-@                               int data_length) {
-@  int i = 0;
-@  int j = 0;
+@                               size_t coefficients_length,
+@                               size_t data_length) {
+@  size_t i = 0;
+@  size_t j = 0;
 @
 @  assert(data_length > 0);
 @  assert(coefficients_length > 1);
diff --git a/webrtc/common_audio/signal_processing/filter_ar_fast_q12_mips.c b/webrtc/common_audio/signal_processing/filter_ar_fast_q12_mips.c
index e77e1f5..0384701 100644
--- a/webrtc/common_audio/signal_processing/filter_ar_fast_q12_mips.c
+++ b/webrtc/common_audio/signal_processing/filter_ar_fast_q12_mips.c
@@ -14,8 +14,8 @@
 void WebRtcSpl_FilterARFastQ12(const int16_t* data_in,
                                int16_t* data_out,
                                const int16_t* __restrict coefficients,
-                               int coefficients_length,
-                               int data_length) {
+                               size_t coefficients_length,
+                               size_t data_length) {
   int r0, r1, r2, r3;
   int coef0, offset;
   int i, j, k;
diff --git a/webrtc/common_audio/signal_processing/filter_ma_fast_q12.c b/webrtc/common_audio/signal_processing/filter_ma_fast_q12.c
index afec839..f4d9a3d 100644
--- a/webrtc/common_audio/signal_processing/filter_ma_fast_q12.c
+++ b/webrtc/common_audio/signal_processing/filter_ma_fast_q12.c
@@ -20,10 +20,10 @@
 void WebRtcSpl_FilterMAFastQ12(const int16_t* in_ptr,
                                int16_t* out_ptr,
                                const int16_t* B,
-                               int16_t B_length,
-                               int16_t length)
+                               size_t B_length,
+                               size_t length)
 {
-    int i, j;
+    size_t i, j;
     for (i = 0; i < length; i++)
     {
         int32_t o = 0;
diff --git a/webrtc/common_audio/signal_processing/get_hanning_window.c b/webrtc/common_audio/signal_processing/get_hanning_window.c
index 519b665..d83ac21 100644
--- a/webrtc/common_audio/signal_processing/get_hanning_window.c
+++ b/webrtc/common_audio/signal_processing/get_hanning_window.c
@@ -53,15 +53,15 @@
 16354,  16362,  16369,  16374,  16378,  16382,  16383,  16384
 };
 
-void WebRtcSpl_GetHanningWindow(int16_t *v, int16_t size)
+void WebRtcSpl_GetHanningWindow(int16_t *v, size_t size)
 {
-    int jj;
+    size_t jj;
     int16_t *vptr1;
 
     int32_t index;
     int32_t factor = ((int32_t)0x40000000);
 
-    factor = WebRtcSpl_DivW32W16(factor, size);
+    factor = WebRtcSpl_DivW32W16(factor, (int16_t)size);
     if (size < 513)
         index = (int32_t)-0x200000;
     else
diff --git a/webrtc/common_audio/signal_processing/get_scaling_square.c b/webrtc/common_audio/signal_processing/get_scaling_square.c
index 3b9171d..82e3c8b 100644
--- a/webrtc/common_audio/signal_processing/get_scaling_square.c
+++ b/webrtc/common_audio/signal_processing/get_scaling_square.c
@@ -18,16 +18,16 @@
 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
 
 int16_t WebRtcSpl_GetScalingSquare(int16_t* in_vector,
-                                   int in_vector_length,
-                                   int times)
+                                   size_t in_vector_length,
+                                   size_t times)
 {
     int16_t nbits = WebRtcSpl_GetSizeInBits((uint32_t)times);
-    int i;
+    size_t i;
     int16_t smax = -1;
     int16_t sabs;
     int16_t *sptr = in_vector;
     int16_t t;
-    int looptimes = in_vector_length;
+    size_t looptimes = in_vector_length;
 
     for (i = looptimes; i > 0; i--)
     {
diff --git a/webrtc/common_audio/signal_processing/ilbc_specific_functions.c b/webrtc/common_audio/signal_processing/ilbc_specific_functions.c
index dae25a4..301a922 100644
--- a/webrtc/common_audio/signal_processing/ilbc_specific_functions.c
+++ b/webrtc/common_audio/signal_processing/ilbc_specific_functions.c
@@ -23,10 +23,10 @@
 
 void WebRtcSpl_ReverseOrderMultArrayElements(int16_t *out, const int16_t *in,
                                              const int16_t *win,
-                                             int16_t vector_length,
+                                             size_t vector_length,
                                              int16_t right_shifts)
 {
-    int i;
+    size_t i;
     int16_t *outptr = out;
     const int16_t *inptr = in;
     const int16_t *winptr = win;
@@ -37,10 +37,10 @@
 }
 
 void WebRtcSpl_ElementwiseVectorMult(int16_t *out, const int16_t *in,
-                                     const int16_t *win, int16_t vector_length,
+                                     const int16_t *win, size_t vector_length,
                                      int16_t right_shifts)
 {
-    int i;
+    size_t i;
     int16_t *outptr = out;
     const int16_t *inptr = in;
     const int16_t *winptr = win;
@@ -51,10 +51,10 @@
 }
 
 void WebRtcSpl_AddVectorsAndShift(int16_t *out, const int16_t *in1,
-                                  const int16_t *in2, int16_t vector_length,
+                                  const int16_t *in2, size_t vector_length,
                                   int16_t right_shifts)
 {
-    int i;
+    size_t i;
     int16_t *outptr = out;
     const int16_t *in1ptr = in1;
     const int16_t *in2ptr = in2;
@@ -66,9 +66,10 @@
 
 void WebRtcSpl_AddAffineVectorToVector(int16_t *out, int16_t *in,
                                        int16_t gain, int32_t add_constant,
-                                       int16_t right_shifts, int vector_length)
+                                       int16_t right_shifts,
+                                       size_t vector_length)
 {
-    int i;
+    size_t i;
 
     for (i = 0; i < vector_length; i++)
     {
@@ -78,9 +79,9 @@
 
 void WebRtcSpl_AffineTransformVector(int16_t *out, int16_t *in,
                                      int16_t gain, int32_t add_constant,
-                                     int16_t right_shifts, int vector_length)
+                                     int16_t right_shifts, size_t vector_length)
 {
-    int i;
+    size_t i;
 
     for (i = 0; i < vector_length; i++)
     {
diff --git a/webrtc/common_audio/signal_processing/include/signal_processing_library.h b/webrtc/common_audio/signal_processing/include/signal_processing_library.h
index 24656b7..37c21e7 100644
--- a/webrtc/common_audio/signal_processing/include/signal_processing_library.h
+++ b/webrtc/common_audio/signal_processing/include/signal_processing_library.h
@@ -115,28 +115,28 @@
 void WebRtcSpl_Init();
 
 int16_t WebRtcSpl_GetScalingSquare(int16_t* in_vector,
-                                   int in_vector_length,
-                                   int times);
+                                   size_t in_vector_length,
+                                   size_t times);
 
 // Copy and set operations. Implementation in copy_set_operations.c.
 // Descriptions at bottom of file.
 void WebRtcSpl_MemSetW16(int16_t* vector,
                          int16_t set_value,
-                         int vector_length);
+                         size_t vector_length);
 void WebRtcSpl_MemSetW32(int32_t* vector,
                          int32_t set_value,
-                         int vector_length);
+                         size_t vector_length);
 void WebRtcSpl_MemCpyReversedOrder(int16_t* out_vector,
                                    int16_t* in_vector,
-                                   int vector_length);
+                                   size_t vector_length);
 void WebRtcSpl_CopyFromEndW16(const int16_t* in_vector,
-                              int in_vector_length,
-                              int samples,
+                              size_t in_vector_length,
+                              size_t samples,
                               int16_t* out_vector);
 void WebRtcSpl_ZerosArrayW16(int16_t* vector,
-                             int vector_length);
+                             size_t vector_length);
 void WebRtcSpl_ZerosArrayW32(int32_t* vector,
-                             int vector_length);
+                             size_t vector_length);
 // End: Copy and set operations.
 
 
@@ -150,15 +150,15 @@
 //      - length : Number of samples in vector.
 //
 // Return value  : Maximum absolute value in vector;
-//                 or -1, if (vector == NULL || length <= 0).
-typedef int16_t (*MaxAbsValueW16)(const int16_t* vector, int length);
+//                 or -1, if (vector == NULL || length == 0).
+typedef int16_t (*MaxAbsValueW16)(const int16_t* vector, size_t length);
 extern MaxAbsValueW16 WebRtcSpl_MaxAbsValueW16;
-int16_t WebRtcSpl_MaxAbsValueW16C(const int16_t* vector, int length);
+int16_t WebRtcSpl_MaxAbsValueW16C(const int16_t* vector, size_t length);
 #if (defined WEBRTC_DETECT_NEON) || (defined WEBRTC_HAS_NEON)
-int16_t WebRtcSpl_MaxAbsValueW16Neon(const int16_t* vector, int length);
+int16_t WebRtcSpl_MaxAbsValueW16Neon(const int16_t* vector, size_t length);
 #endif
 #if defined(MIPS32_LE)
-int16_t WebRtcSpl_MaxAbsValueW16_mips(const int16_t* vector, int length);
+int16_t WebRtcSpl_MaxAbsValueW16_mips(const int16_t* vector, size_t length);
 #endif
 
 // Returns the largest absolute value in a signed 32-bit vector.
@@ -168,15 +168,15 @@
 //      - length : Number of samples in vector.
 //
 // Return value  : Maximum absolute value in vector;
-//                 or -1, if (vector == NULL || length <= 0).
-typedef int32_t (*MaxAbsValueW32)(const int32_t* vector, int length);
+//                 or -1, if (vector == NULL || length == 0).
+typedef int32_t (*MaxAbsValueW32)(const int32_t* vector, size_t length);
 extern MaxAbsValueW32 WebRtcSpl_MaxAbsValueW32;
-int32_t WebRtcSpl_MaxAbsValueW32C(const int32_t* vector, int length);
+int32_t WebRtcSpl_MaxAbsValueW32C(const int32_t* vector, size_t length);
 #if (defined WEBRTC_DETECT_NEON) || (defined WEBRTC_HAS_NEON)
-int32_t WebRtcSpl_MaxAbsValueW32Neon(const int32_t* vector, int length);
+int32_t WebRtcSpl_MaxAbsValueW32Neon(const int32_t* vector, size_t length);
 #endif
 #if defined(MIPS_DSP_R1_LE)
-int32_t WebRtcSpl_MaxAbsValueW32_mips(const int32_t* vector, int length);
+int32_t WebRtcSpl_MaxAbsValueW32_mips(const int32_t* vector, size_t length);
 #endif
 
 // Returns the maximum value of a 16-bit vector.
@@ -186,17 +186,17 @@
 //      - length : Number of samples in vector.
 //
 // Return value  : Maximum sample value in |vector|.
-//                 If (vector == NULL || length <= 0) WEBRTC_SPL_WORD16_MIN
+//                 If (vector == NULL || length == 0) WEBRTC_SPL_WORD16_MIN
 //                 is returned. Note that WEBRTC_SPL_WORD16_MIN is a feasible
 //                 value and we can't catch errors purely based on it.
-typedef int16_t (*MaxValueW16)(const int16_t* vector, int length);
+typedef int16_t (*MaxValueW16)(const int16_t* vector, size_t length);
 extern MaxValueW16 WebRtcSpl_MaxValueW16;
-int16_t WebRtcSpl_MaxValueW16C(const int16_t* vector, int length);
+int16_t WebRtcSpl_MaxValueW16C(const int16_t* vector, size_t length);
 #if (defined WEBRTC_DETECT_NEON) || (defined WEBRTC_HAS_NEON)
-int16_t WebRtcSpl_MaxValueW16Neon(const int16_t* vector, int length);
+int16_t WebRtcSpl_MaxValueW16Neon(const int16_t* vector, size_t length);
 #endif
 #if defined(MIPS32_LE)
-int16_t WebRtcSpl_MaxValueW16_mips(const int16_t* vector, int length);
+int16_t WebRtcSpl_MaxValueW16_mips(const int16_t* vector, size_t length);
 #endif
 
 // Returns the maximum value of a 32-bit vector.
@@ -206,17 +206,17 @@
 //      - length : Number of samples in vector.
 //
 // Return value  : Maximum sample value in |vector|.
-//                 If (vector == NULL || length <= 0) WEBRTC_SPL_WORD32_MIN
+//                 If (vector == NULL || length == 0) WEBRTC_SPL_WORD32_MIN
 //                 is returned. Note that WEBRTC_SPL_WORD32_MIN is a feasible
 //                 value and we can't catch errors purely based on it.
-typedef int32_t (*MaxValueW32)(const int32_t* vector, int length);
+typedef int32_t (*MaxValueW32)(const int32_t* vector, size_t length);
 extern MaxValueW32 WebRtcSpl_MaxValueW32;
-int32_t WebRtcSpl_MaxValueW32C(const int32_t* vector, int length);
+int32_t WebRtcSpl_MaxValueW32C(const int32_t* vector, size_t length);
 #if (defined WEBRTC_DETECT_NEON) || (defined WEBRTC_HAS_NEON)
-int32_t WebRtcSpl_MaxValueW32Neon(const int32_t* vector, int length);
+int32_t WebRtcSpl_MaxValueW32Neon(const int32_t* vector, size_t length);
 #endif
 #if defined(MIPS32_LE)
-int32_t WebRtcSpl_MaxValueW32_mips(const int32_t* vector, int length);
+int32_t WebRtcSpl_MaxValueW32_mips(const int32_t* vector, size_t length);
 #endif
 
 // Returns the minimum value of a 16-bit vector.
@@ -226,17 +226,17 @@
 //      - length : Number of samples in vector.
 //
 // Return value  : Minimum sample value in |vector|.
-//                 If (vector == NULL || length <= 0) WEBRTC_SPL_WORD16_MAX
+//                 If (vector == NULL || length == 0) WEBRTC_SPL_WORD16_MAX
 //                 is returned. Note that WEBRTC_SPL_WORD16_MAX is a feasible
 //                 value and we can't catch errors purely based on it.
-typedef int16_t (*MinValueW16)(const int16_t* vector, int length);
+typedef int16_t (*MinValueW16)(const int16_t* vector, size_t length);
 extern MinValueW16 WebRtcSpl_MinValueW16;
-int16_t WebRtcSpl_MinValueW16C(const int16_t* vector, int length);
+int16_t WebRtcSpl_MinValueW16C(const int16_t* vector, size_t length);
 #if (defined WEBRTC_DETECT_NEON) || (defined WEBRTC_HAS_NEON)
-int16_t WebRtcSpl_MinValueW16Neon(const int16_t* vector, int length);
+int16_t WebRtcSpl_MinValueW16Neon(const int16_t* vector, size_t length);
 #endif
 #if defined(MIPS32_LE)
-int16_t WebRtcSpl_MinValueW16_mips(const int16_t* vector, int length);
+int16_t WebRtcSpl_MinValueW16_mips(const int16_t* vector, size_t length);
 #endif
 
 // Returns the minimum value of a 32-bit vector.
@@ -246,17 +246,17 @@
 //      - length : Number of samples in vector.
 //
 // Return value  : Minimum sample value in |vector|.
-//                 If (vector == NULL || length <= 0) WEBRTC_SPL_WORD32_MAX
+//                 If (vector == NULL || length == 0) WEBRTC_SPL_WORD32_MAX
 //                 is returned. Note that WEBRTC_SPL_WORD32_MAX is a feasible
 //                 value and we can't catch errors purely based on it.
-typedef int32_t (*MinValueW32)(const int32_t* vector, int length);
+typedef int32_t (*MinValueW32)(const int32_t* vector, size_t length);
 extern MinValueW32 WebRtcSpl_MinValueW32;
-int32_t WebRtcSpl_MinValueW32C(const int32_t* vector, int length);
+int32_t WebRtcSpl_MinValueW32C(const int32_t* vector, size_t length);
 #if (defined WEBRTC_DETECT_NEON) || (defined WEBRTC_HAS_NEON)
-int32_t WebRtcSpl_MinValueW32Neon(const int32_t* vector, int length);
+int32_t WebRtcSpl_MinValueW32Neon(const int32_t* vector, size_t length);
 #endif
 #if defined(MIPS32_LE)
-int32_t WebRtcSpl_MinValueW32_mips(const int32_t* vector, int length);
+int32_t WebRtcSpl_MinValueW32_mips(const int32_t* vector, size_t length);
 #endif
 
 // Returns the vector index to the largest absolute value of a 16-bit vector.
@@ -266,11 +266,11 @@
 //      - length : Number of samples in vector.
 //
 // Return value  : Index to the maximum absolute value in vector, or -1,
-//                 if (vector == NULL || length <= 0).
+//                 if (vector == NULL || length == 0).
 //                 If there are multiple equal maxima, return the index of the
 //                 first. -32768 will always have precedence over 32767 (despite
 //                 -32768 presenting an int16 absolute value of 32767);
-int WebRtcSpl_MaxAbsIndexW16(const int16_t* vector, int length);
+int WebRtcSpl_MaxAbsIndexW16(const int16_t* vector, size_t length);
 
 // Returns the vector index to the maximum sample value of a 16-bit vector.
 //
@@ -280,8 +280,8 @@
 //
 // Return value  : Index to the maximum value in vector (if multiple
 //                 indexes have the maximum, return the first);
-//                 or -1, if (vector == NULL || length <= 0).
-int WebRtcSpl_MaxIndexW16(const int16_t* vector, int length);
+//                 or -1, if (vector == NULL || length == 0).
+int WebRtcSpl_MaxIndexW16(const int16_t* vector, size_t length);
 
 // Returns the vector index to the maximum sample value of a 32-bit vector.
 //
@@ -291,8 +291,8 @@
 //
 // Return value  : Index to the maximum value in vector (if multiple
 //                 indexes have the maximum, return the first);
-//                 or -1, if (vector == NULL || length <= 0).
-int WebRtcSpl_MaxIndexW32(const int32_t* vector, int length);
+//                 or -1, if (vector == NULL || length == 0).
+int WebRtcSpl_MaxIndexW32(const int32_t* vector, size_t length);
 
 // Returns the vector index to the minimum sample value of a 16-bit vector.
 //
@@ -302,8 +302,8 @@
 //
 // Return value  : Index to the mimimum value in vector  (if multiple
 //                 indexes have the minimum, return the first);
-//                 or -1, if (vector == NULL || length <= 0).
-int WebRtcSpl_MinIndexW16(const int16_t* vector, int length);
+//                 or -1, if (vector == NULL || length == 0).
+int WebRtcSpl_MinIndexW16(const int16_t* vector, size_t length);
 
 // Returns the vector index to the minimum sample value of a 32-bit vector.
 //
@@ -313,8 +313,8 @@
 //
 // Return value  : Index to the mimimum value in vector  (if multiple
 //                 indexes have the minimum, return the first);
-//                 or -1, if (vector == NULL || length <= 0).
-int WebRtcSpl_MinIndexW32(const int32_t* vector, int length);
+//                 or -1, if (vector == NULL || length == 0).
+int WebRtcSpl_MinIndexW32(const int32_t* vector, size_t length);
 
 // End: Minimum and maximum operations.
 
@@ -322,33 +322,33 @@
 // Vector scaling operations. Implementation in vector_scaling_operations.c.
 // Description at bottom of file.
 void WebRtcSpl_VectorBitShiftW16(int16_t* out_vector,
-                                 int16_t vector_length,
+                                 size_t vector_length,
                                  const int16_t* in_vector,
                                  int16_t right_shifts);
 void WebRtcSpl_VectorBitShiftW32(int32_t* out_vector,
-                                 int16_t vector_length,
+                                 size_t vector_length,
                                  const int32_t* in_vector,
                                  int16_t right_shifts);
 void WebRtcSpl_VectorBitShiftW32ToW16(int16_t* out_vector,
-                                      int vector_length,
+                                      size_t vector_length,
                                       const int32_t* in_vector,
                                       int right_shifts);
 void WebRtcSpl_ScaleVector(const int16_t* in_vector,
                            int16_t* out_vector,
                            int16_t gain,
-                           int16_t vector_length,
+                           size_t vector_length,
                            int16_t right_shifts);
 void WebRtcSpl_ScaleVectorWithSat(const int16_t* in_vector,
                                   int16_t* out_vector,
                                   int16_t gain,
-                                  int16_t vector_length,
+                                  size_t vector_length,
                                   int16_t right_shifts);
 void WebRtcSpl_ScaleAndAddVectors(const int16_t* in_vector1,
                                   int16_t gain1, int right_shifts1,
                                   const int16_t* in_vector2,
                                   int16_t gain2, int right_shifts2,
                                   int16_t* out_vector,
-                                  int vector_length);
+                                  size_t vector_length);
 
 // The functions (with related pointer) perform the vector operation:
 //   out_vector[k] = ((scale1 * in_vector1[k]) + (scale2 * in_vector2[k])
@@ -374,7 +374,7 @@
                                            int16_t in_vector2_scale,
                                            int right_shifts,
                                            int16_t* out_vector,
-                                           int length);
+                                           size_t length);
 extern ScaleAndAddVectorsWithRound WebRtcSpl_ScaleAndAddVectorsWithRound;
 int WebRtcSpl_ScaleAndAddVectorsWithRoundC(const int16_t* in_vector1,
                                            int16_t in_vector1_scale,
@@ -382,7 +382,7 @@
                                            int16_t in_vector2_scale,
                                            int right_shifts,
                                            int16_t* out_vector,
-                                           int length);
+                                           size_t length);
 #if defined(MIPS_DSP_R1_LE)
 int WebRtcSpl_ScaleAndAddVectorsWithRound_mips(const int16_t* in_vector1,
                                                int16_t in_vector1_scale,
@@ -390,7 +390,7 @@
                                                int16_t in_vector2_scale,
                                                int right_shifts,
                                                int16_t* out_vector,
-                                               int length);
+                                               size_t length);
 #endif
 // End: Vector scaling operations.
 
@@ -399,30 +399,30 @@
 void WebRtcSpl_ReverseOrderMultArrayElements(int16_t* out_vector,
                                              const int16_t* in_vector,
                                              const int16_t* window,
-                                             int16_t vector_length,
+                                             size_t vector_length,
                                              int16_t right_shifts);
 void WebRtcSpl_ElementwiseVectorMult(int16_t* out_vector,
                                      const int16_t* in_vector,
                                      const int16_t* window,
-                                     int16_t vector_length,
+                                     size_t vector_length,
                                      int16_t right_shifts);
 void WebRtcSpl_AddVectorsAndShift(int16_t* out_vector,
                                   const int16_t* in_vector1,
                                   const int16_t* in_vector2,
-                                  int16_t vector_length,
+                                  size_t vector_length,
                                   int16_t right_shifts);
 void WebRtcSpl_AddAffineVectorToVector(int16_t* out_vector,
                                        int16_t* in_vector,
                                        int16_t gain,
                                        int32_t add_constant,
                                        int16_t right_shifts,
-                                       int vector_length);
+                                       size_t vector_length);
 void WebRtcSpl_AffineTransformVector(int16_t* out_vector,
                                      int16_t* in_vector,
                                      int16_t gain,
                                      int32_t add_constant,
                                      int16_t right_shifts,
-                                     int vector_length);
+                                     size_t vector_length);
 // End: iLBC specific functions.
 
 // Signal processing operations.
@@ -447,8 +447,8 @@
 //      - -1, if |order| > |in_vector_length|;
 //      - Number of samples in |result|, i.e. (order+1), otherwise.
 int WebRtcSpl_AutoCorrelation(const int16_t* in_vector,
-                              int in_vector_length,
-                              int order,
+                              size_t in_vector_length,
+                              size_t order,
                               int32_t* result,
                               int* scale);
 
@@ -467,7 +467,7 @@
 int16_t WebRtcSpl_LevinsonDurbin(const int32_t* auto_corr,
                                  int16_t* lpc_coef,
                                  int16_t* refl_coef,
-                                 int16_t order);
+                                 size_t order);
 
 // Converts reflection coefficients |refl_coef| to LPC coefficients |lpc_coef|.
 // This version is a 16 bit operation.
@@ -540,24 +540,24 @@
 typedef void (*CrossCorrelation)(int32_t* cross_correlation,
                                  const int16_t* seq1,
                                  const int16_t* seq2,
-                                 int16_t dim_seq,
-                                 int16_t dim_cross_correlation,
+                                 size_t dim_seq,
+                                 size_t dim_cross_correlation,
                                  int right_shifts,
                                  int step_seq2);
 extern CrossCorrelation WebRtcSpl_CrossCorrelation;
 void WebRtcSpl_CrossCorrelationC(int32_t* cross_correlation,
                                  const int16_t* seq1,
                                  const int16_t* seq2,
-                                 int16_t dim_seq,
-                                 int16_t dim_cross_correlation,
+                                 size_t dim_seq,
+                                 size_t dim_cross_correlation,
                                  int right_shifts,
                                  int step_seq2);
 #if (defined WEBRTC_DETECT_NEON) || (defined WEBRTC_HAS_NEON)
 void WebRtcSpl_CrossCorrelationNeon(int32_t* cross_correlation,
                                     const int16_t* seq1,
                                     const int16_t* seq2,
-                                    int16_t dim_seq,
-                                    int16_t dim_cross_correlation,
+                                    size_t dim_seq,
+                                    size_t dim_cross_correlation,
                                     int right_shifts,
                                     int step_seq2);
 #endif
@@ -565,8 +565,8 @@
 void WebRtcSpl_CrossCorrelation_mips(int32_t* cross_correlation,
                                      const int16_t* seq1,
                                      const int16_t* seq2,
-                                     int16_t dim_seq,
-                                     int16_t dim_cross_correlation,
+                                     size_t dim_seq,
+                                     size_t dim_cross_correlation,
                                      int right_shifts,
                                      int step_seq2);
 #endif
@@ -579,7 +579,7 @@
 //
 // Output:
 //      - window    : Hanning vector in Q14.
-void WebRtcSpl_GetHanningWindow(int16_t* window, int16_t size);
+void WebRtcSpl_GetHanningWindow(int16_t* window, size_t size);
 
 // Calculates y[k] = sqrt(1 - x[k]^2) for each element of the input vector
 // |in_vector|. Input and output values are in Q15.
@@ -591,7 +591,7 @@
 // Output:
 //      - out_vector    : Output values in Q15
 void WebRtcSpl_SqrtOfOneMinusXSquared(int16_t* in_vector,
-                                      int vector_length,
+                                      size_t vector_length,
                                       int16_t* out_vector);
 // End: Signal processing operations.
 
@@ -617,7 +617,9 @@
 int32_t WebRtcSpl_DivW32HiLow(int32_t num, int16_t den_hi, int16_t den_low);
 // End: Divisions.
 
-int32_t WebRtcSpl_Energy(int16_t* vector, int vector_length, int* scale_factor);
+int32_t WebRtcSpl_Energy(int16_t* vector,
+                         size_t vector_length,
+                         int* scale_factor);
 
 // Calculates the dot product between two (int16_t) vectors.
 //
@@ -632,21 +634,21 @@
 // Return value         : The dot product in Q(-scaling)
 int32_t WebRtcSpl_DotProductWithScale(const int16_t* vector1,
                                       const int16_t* vector2,
-                                      int length,
+                                      size_t length,
                                       int scaling);
 
 // Filter operations.
-int WebRtcSpl_FilterAR(const int16_t* ar_coef,
-                       int ar_coef_length,
-                       const int16_t* in_vector,
-                       int in_vector_length,
-                       int16_t* filter_state,
-                       int filter_state_length,
-                       int16_t* filter_state_low,
-                       int filter_state_low_length,
-                       int16_t* out_vector,
-                       int16_t* out_vector_low,
-                       int out_vector_low_length);
+size_t WebRtcSpl_FilterAR(const int16_t* ar_coef,
+                          size_t ar_coef_length,
+                          const int16_t* in_vector,
+                          size_t in_vector_length,
+                          int16_t* filter_state,
+                          size_t filter_state_length,
+                          int16_t* filter_state_low,
+                          size_t filter_state_low_length,
+                          int16_t* out_vector,
+                          int16_t* out_vector_low,
+                          size_t out_vector_low_length);
 
 // WebRtcSpl_FilterMAFastQ12(...)
 //
@@ -665,8 +667,8 @@
 void WebRtcSpl_FilterMAFastQ12(const int16_t* in_vector,
                                int16_t* out_vector,
                                const int16_t* ma_coef,
-                               int16_t ma_coef_length,
-                               int16_t vector_length);
+                               size_t ma_coef_length,
+                               size_t vector_length);
 
 // Performs a AR filtering on a vector in Q12
 // Input:
@@ -681,8 +683,8 @@
 void WebRtcSpl_FilterARFastQ12(const int16_t* data_in,
                                int16_t* data_out,
                                const int16_t* __restrict coefficients,
-                               int coefficients_length,
-                               int data_length);
+                               size_t coefficients_length,
+                               size_t data_length);
 
 // The functions (with related pointer) perform a MA down sampling filter
 // on a vector.
@@ -701,41 +703,41 @@
 //      - data_out           : Filtered samples
 // Return value              : 0 if OK, -1 if |in_vector| is too short
 typedef int (*DownsampleFast)(const int16_t* data_in,
-                              int data_in_length,
+                              size_t data_in_length,
                               int16_t* data_out,
-                              int data_out_length,
+                              size_t data_out_length,
                               const int16_t* __restrict coefficients,
-                              int coefficients_length,
+                              size_t coefficients_length,
                               int factor,
-                              int delay);
+                              size_t delay);
 extern DownsampleFast WebRtcSpl_DownsampleFast;
 int WebRtcSpl_DownsampleFastC(const int16_t* data_in,
-                              int data_in_length,
+                              size_t data_in_length,
                               int16_t* data_out,
-                              int data_out_length,
+                              size_t data_out_length,
                               const int16_t* __restrict coefficients,
-                              int coefficients_length,
+                              size_t coefficients_length,
                               int factor,
-                              int delay);
+                              size_t delay);
 #if (defined WEBRTC_DETECT_NEON) || (defined WEBRTC_HAS_NEON)
 int WebRtcSpl_DownsampleFastNeon(const int16_t* data_in,
-                                 int data_in_length,
+                                 size_t data_in_length,
                                  int16_t* data_out,
-                                 int data_out_length,
+                                 size_t data_out_length,
                                  const int16_t* __restrict coefficients,
-                                 int coefficients_length,
+                                 size_t coefficients_length,
                                  int factor,
-                                 int delay);
+                                 size_t delay);
 #endif
 #if defined(MIPS32_LE)
 int WebRtcSpl_DownsampleFast_mips(const int16_t* data_in,
-                                  int data_in_length,
+                                  size_t data_in_length,
                                   int16_t* data_out,
-                                  int data_out_length,
+                                  size_t data_out_length,
                                   const int16_t* __restrict coefficients,
-                                  int coefficients_length,
+                                  size_t coefficients_length,
                                   int factor,
-                                  int delay);
+                                  size_t delay);
 #endif
 
 // End: Filter operations.
@@ -844,11 +846,11 @@
  *
  ******************************************************************/
 
-void WebRtcSpl_Resample48khzTo32khz(const int32_t* In, int32_t* Out, int32_t K);
+void WebRtcSpl_Resample48khzTo32khz(const int32_t* In, int32_t* Out, size_t K);
 
-void WebRtcSpl_Resample32khzTo24khz(const int32_t* In, int32_t* Out, int32_t K);
+void WebRtcSpl_Resample32khzTo24khz(const int32_t* In, int32_t* Out, size_t K);
 
-void WebRtcSpl_Resample44khzTo32khz(const int32_t* In, int32_t* Out, int32_t K);
+void WebRtcSpl_Resample44khzTo32khz(const int32_t* In, int32_t* Out, size_t K);
 
 /*******************************************************************
  * resample_48khz.c
@@ -918,24 +920,24 @@
  *
  ******************************************************************/
 
-void WebRtcSpl_DownsampleBy2(const int16_t* in, int len,
+void WebRtcSpl_DownsampleBy2(const int16_t* in, size_t len,
                              int16_t* out, int32_t* filtState);
 
-void WebRtcSpl_UpsampleBy2(const int16_t* in, int len,
+void WebRtcSpl_UpsampleBy2(const int16_t* in, size_t len,
                            int16_t* out, int32_t* filtState);
 
 /************************************************************
  * END OF RESAMPLING FUNCTIONS
  ************************************************************/
 void WebRtcSpl_AnalysisQMF(const int16_t* in_data,
-                           int in_data_length,
+                           size_t in_data_length,
                            int16_t* low_band,
                            int16_t* high_band,
                            int32_t* filter_state1,
                            int32_t* filter_state2);
 void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
                             const int16_t* high_band,
-                            int band_length,
+                            size_t band_length,
                             int16_t* out_data,
                             int32_t* filter_state1,
                             int32_t* filter_state2);
diff --git a/webrtc/common_audio/signal_processing/levinson_durbin.c b/webrtc/common_audio/signal_processing/levinson_durbin.c
index 536bd0b..d46e551 100644
--- a/webrtc/common_audio/signal_processing/levinson_durbin.c
+++ b/webrtc/common_audio/signal_processing/levinson_durbin.c
@@ -20,9 +20,9 @@
 #define SPL_LEVINSON_MAXORDER 20
 
 int16_t WebRtcSpl_LevinsonDurbin(const int32_t* R, int16_t* A, int16_t* K,
-                                 int16_t order)
+                                 size_t order)
 {
-    int16_t i, j;
+    size_t i, j;
     // Auto-correlation coefficients in high precision
     int16_t R_hi[SPL_LEVINSON_MAXORDER + 1], R_low[SPL_LEVINSON_MAXORDER + 1];
     // LPC coefficients in high precision
diff --git a/webrtc/common_audio/signal_processing/min_max_operations.c b/webrtc/common_audio/signal_processing/min_max_operations.c
index f6de072..22ce765 100644
--- a/webrtc/common_audio/signal_processing/min_max_operations.c
+++ b/webrtc/common_audio/signal_processing/min_max_operations.c
@@ -33,10 +33,11 @@
 // TODO(kma): Move the next six functions into min_max_operations_c.c.
 
 // Maximum absolute value of word16 vector. C version for generic platforms.
-int16_t WebRtcSpl_MaxAbsValueW16C(const int16_t* vector, int length) {
-  int i = 0, absolute = 0, maximum = 0;
+int16_t WebRtcSpl_MaxAbsValueW16C(const int16_t* vector, size_t length) {
+  size_t i = 0;
+  int absolute = 0, maximum = 0;
 
-  if (vector == NULL || length <= 0) {
+  if (vector == NULL || length == 0) {
     return -1;
   }
 
@@ -57,14 +58,14 @@
 }
 
 // Maximum absolute value of word32 vector. C version for generic platforms.
-int32_t WebRtcSpl_MaxAbsValueW32C(const int32_t* vector, int length) {
+int32_t WebRtcSpl_MaxAbsValueW32C(const int32_t* vector, size_t length) {
   // Use uint32_t for the local variables, to accommodate the return value
   // of abs(0x80000000), which is 0x80000000.
 
   uint32_t absolute = 0, maximum = 0;
-  int i = 0;
+  size_t i = 0;
 
-  if (vector == NULL || length <= 0) {
+  if (vector == NULL || length == 0) {
     return -1;
   }
 
@@ -81,11 +82,11 @@
 }
 
 // Maximum value of word16 vector. C version for generic platforms.
-int16_t WebRtcSpl_MaxValueW16C(const int16_t* vector, int length) {
+int16_t WebRtcSpl_MaxValueW16C(const int16_t* vector, size_t length) {
   int16_t maximum = WEBRTC_SPL_WORD16_MIN;
-  int i = 0;
+  size_t i = 0;
 
-  if (vector == NULL || length <= 0) {
+  if (vector == NULL || length == 0) {
     return maximum;
   }
 
@@ -97,11 +98,11 @@
 }
 
 // Maximum value of word32 vector. C version for generic platforms.
-int32_t WebRtcSpl_MaxValueW32C(const int32_t* vector, int length) {
+int32_t WebRtcSpl_MaxValueW32C(const int32_t* vector, size_t length) {
   int32_t maximum = WEBRTC_SPL_WORD32_MIN;
-  int i = 0;
+  size_t i = 0;
 
-  if (vector == NULL || length <= 0) {
+  if (vector == NULL || length == 0) {
     return maximum;
   }
 
@@ -113,11 +114,11 @@
 }
 
 // Minimum value of word16 vector. C version for generic platforms.
-int16_t WebRtcSpl_MinValueW16C(const int16_t* vector, int length) {
+int16_t WebRtcSpl_MinValueW16C(const int16_t* vector, size_t length) {
   int16_t minimum = WEBRTC_SPL_WORD16_MAX;
-  int i = 0;
+  size_t i = 0;
 
-  if (vector == NULL || length <= 0) {
+  if (vector == NULL || length == 0) {
     return minimum;
   }
 
@@ -129,11 +130,11 @@
 }
 
 // Minimum value of word32 vector. C version for generic platforms.
-int32_t WebRtcSpl_MinValueW32C(const int32_t* vector, int length) {
+int32_t WebRtcSpl_MinValueW32C(const int32_t* vector, size_t length) {
   int32_t minimum = WEBRTC_SPL_WORD32_MAX;
-  int i = 0;
+  size_t i = 0;
 
-  if (vector == NULL || length <= 0) {
+  if (vector == NULL || length == 0) {
     return minimum;
   }
 
@@ -145,12 +146,13 @@
 }
 
 // Index of maximum absolute value in a word16 vector.
-int WebRtcSpl_MaxAbsIndexW16(const int16_t* vector, int length) {
+int WebRtcSpl_MaxAbsIndexW16(const int16_t* vector, size_t length) {
   // Use type int for local variables, to accomodate the value of abs(-32768).
 
-  int i = 0, absolute = 0, maximum = 0, index = 0;
+  size_t i = 0, index = 0;
+  int absolute = 0, maximum = 0;
 
-  if (vector == NULL || length <= 0) {
+  if (vector == NULL || length == 0) {
     return -1;
   }
 
@@ -163,15 +165,15 @@
     }
   }
 
-  return index;
+  return (int)index;
 }
 
 // Index of maximum value in a word16 vector.
-int WebRtcSpl_MaxIndexW16(const int16_t* vector, int length) {
-  int i = 0, index = 0;
+int WebRtcSpl_MaxIndexW16(const int16_t* vector, size_t length) {
+  size_t i = 0, index = 0;
   int16_t maximum = WEBRTC_SPL_WORD16_MIN;
 
-  if (vector == NULL || length <= 0) {
+  if (vector == NULL || length == 0) {
     return -1;
   }
 
@@ -182,15 +184,15 @@
     }
   }
 
-  return index;
+  return (int)index;
 }
 
 // Index of maximum value in a word32 vector.
-int WebRtcSpl_MaxIndexW32(const int32_t* vector, int length) {
-  int i = 0, index = 0;
+int WebRtcSpl_MaxIndexW32(const int32_t* vector, size_t length) {
+  size_t i = 0, index = 0;
   int32_t maximum = WEBRTC_SPL_WORD32_MIN;
 
-  if (vector == NULL || length <= 0) {
+  if (vector == NULL || length == 0) {
     return -1;
   }
 
@@ -201,15 +203,15 @@
     }
   }
 
-  return index;
+  return (int)index;
 }
 
 // Index of minimum value in a word16 vector.
-int WebRtcSpl_MinIndexW16(const int16_t* vector, int length) {
-  int i = 0, index = 0;
+int WebRtcSpl_MinIndexW16(const int16_t* vector, size_t length) {
+  size_t i = 0, index = 0;
   int16_t minimum = WEBRTC_SPL_WORD16_MAX;
 
-  if (vector == NULL || length <= 0) {
+  if (vector == NULL || length == 0) {
     return -1;
   }
 
@@ -220,15 +222,15 @@
     }
   }
 
-  return index;
+  return (int)index;
 }
 
 // Index of minimum value in a word32 vector.
-int WebRtcSpl_MinIndexW32(const int32_t* vector, int length) {
-  int i = 0, index = 0;
+int WebRtcSpl_MinIndexW32(const int32_t* vector, size_t length) {
+  size_t i = 0, index = 0;
   int32_t minimum = WEBRTC_SPL_WORD32_MAX;
 
-  if (vector == NULL || length <= 0) {
+  if (vector == NULL || length == 0) {
     return -1;
   }
 
@@ -239,5 +241,5 @@
     }
   }
 
-  return index;
+  return (int)index;
 }
diff --git a/webrtc/common_audio/signal_processing/min_max_operations_mips.c b/webrtc/common_audio/signal_processing/min_max_operations_mips.c
index 5fd8600..a7d3251 100644
--- a/webrtc/common_audio/signal_processing/min_max_operations_mips.c
+++ b/webrtc/common_audio/signal_processing/min_max_operations_mips.c
@@ -19,12 +19,12 @@
 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
 
 // Maximum absolute value of word16 vector.
-int16_t WebRtcSpl_MaxAbsValueW16_mips(const int16_t* vector, int length) {
+int16_t WebRtcSpl_MaxAbsValueW16_mips(const int16_t* vector, size_t length) {
   int32_t totMax = 0;
   int32_t tmp32_0, tmp32_1, tmp32_2, tmp32_3;
-  int i, loop_size;
+  size_t i, loop_size;
 
-  if (vector == NULL || length <= 0) {
+  if (vector == NULL || length == 0) {
     return -1;
   }
 #if defined(MIPS_DSP_R1)
@@ -222,14 +222,14 @@
 
 #if defined(MIPS_DSP_R1_LE)
 // Maximum absolute value of word32 vector. Version for MIPS platform.
-int32_t WebRtcSpl_MaxAbsValueW32_mips(const int32_t* vector, int length) {
+int32_t WebRtcSpl_MaxAbsValueW32_mips(const int32_t* vector, size_t length) {
   // Use uint32_t for the local variables, to accommodate the return value
   // of abs(0x80000000), which is 0x80000000.
 
   uint32_t absolute = 0, maximum = 0;
   int tmp1 = 0, max_value = 0x7fffffff;
 
-  if (vector == NULL || length <= 0) {
+  if (vector == NULL || length == 0) {
     return -1;
   }
 
@@ -260,12 +260,12 @@
 #endif  // #if defined(MIPS_DSP_R1_LE)
 
 // Maximum value of word16 vector. Version for MIPS platform.
-int16_t WebRtcSpl_MaxValueW16_mips(const int16_t* vector, int length) {
+int16_t WebRtcSpl_MaxValueW16_mips(const int16_t* vector, size_t length) {
   int16_t maximum = WEBRTC_SPL_WORD16_MIN;
   int tmp1;
   int16_t value;
 
-  if (vector == NULL || length <= 0) {
+  if (vector == NULL || length == 0) {
     return maximum;
   }
 
@@ -291,11 +291,11 @@
 }
 
 // Maximum value of word32 vector. Version for MIPS platform.
-int32_t WebRtcSpl_MaxValueW32_mips(const int32_t* vector, int length) {
+int32_t WebRtcSpl_MaxValueW32_mips(const int32_t* vector, size_t length) {
   int32_t maximum = WEBRTC_SPL_WORD32_MIN;
   int tmp1, value;
 
-  if (vector == NULL || length <= 0) {
+  if (vector == NULL || length == 0) {
     return maximum;
   }
 
@@ -322,12 +322,12 @@
 }
 
 // Minimum value of word16 vector. Version for MIPS platform.
-int16_t WebRtcSpl_MinValueW16_mips(const int16_t* vector, int length) {
+int16_t WebRtcSpl_MinValueW16_mips(const int16_t* vector, size_t length) {
   int16_t minimum = WEBRTC_SPL_WORD16_MAX;
   int tmp1;
   int16_t value;
 
-  if (vector == NULL || length <= 0) {
+  if (vector == NULL || length == 0) {
     return minimum;
   }
 
@@ -354,11 +354,11 @@
 }
 
 // Minimum value of word32 vector. Version for MIPS platform.
-int32_t WebRtcSpl_MinValueW32_mips(const int32_t* vector, int length) {
+int32_t WebRtcSpl_MinValueW32_mips(const int32_t* vector, size_t length) {
   int32_t minimum = WEBRTC_SPL_WORD32_MAX;
   int tmp1, value;
 
-  if (vector == NULL || length <= 0) {
+  if (vector == NULL || length == 0) {
     return minimum;
   }
 
diff --git a/webrtc/common_audio/signal_processing/min_max_operations_neon.c b/webrtc/common_audio/signal_processing/min_max_operations_neon.c
index e4b3041..ee8bef1 100644
--- a/webrtc/common_audio/signal_processing/min_max_operations_neon.c
+++ b/webrtc/common_audio/signal_processing/min_max_operations_neon.c
@@ -14,15 +14,15 @@
 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
 
 // Maximum absolute value of word16 vector. C version for generic platforms.
-int16_t WebRtcSpl_MaxAbsValueW16Neon(const int16_t* vector, int length) {
+int16_t WebRtcSpl_MaxAbsValueW16Neon(const int16_t* vector, size_t length) {
   int absolute = 0, maximum = 0;
 
-  if (vector == NULL || length <= 0) {
+  if (vector == NULL || length == 0) {
     return -1;
   }
 
   const int16_t* p_start = vector;
-  int rest = length & 7;
+  size_t rest = length & 7;
   const int16_t* p_end = vector + length - rest;
 
   int16x8_t v;
@@ -69,15 +69,15 @@
 
 // Maximum absolute value of word32 vector. NEON intrinsics version for
 // ARM 32-bit/64-bit platforms.
-int32_t WebRtcSpl_MaxAbsValueW32Neon(const int32_t* vector, int length) {
+int32_t WebRtcSpl_MaxAbsValueW32Neon(const int32_t* vector, size_t length) {
   // Use uint32_t for the local variables, to accommodate the return value
   // of abs(0x80000000), which is 0x80000000.
 
   uint32_t absolute = 0, maximum = 0;
-  int i = 0;
-  int residual = length & 0x7;
+  size_t i = 0;
+  size_t residual = length & 0x7;
 
-  if (vector == NULL || length <= 0) {
+  if (vector == NULL || length == 0) {
     return -1;
   }
 
@@ -126,12 +126,12 @@
 
 // Maximum value of word16 vector. NEON intrinsics version for
 // ARM 32-bit/64-bit platforms.
-int16_t WebRtcSpl_MaxValueW16Neon(const int16_t* vector, int length) {
+int16_t WebRtcSpl_MaxValueW16Neon(const int16_t* vector, size_t length) {
   int16_t maximum = WEBRTC_SPL_WORD16_MIN;
-  int i = 0;
-  int residual = length & 0x7;
+  size_t i = 0;
+  size_t residual = length & 0x7;
 
-  if (vector == NULL || length <= 0) {
+  if (vector == NULL || length == 0) {
     return maximum;
   }
 
@@ -166,12 +166,12 @@
 
 // Maximum value of word32 vector. NEON intrinsics version for
 // ARM 32-bit/64-bit platforms.
-int32_t WebRtcSpl_MaxValueW32Neon(const int32_t* vector, int length) {
+int32_t WebRtcSpl_MaxValueW32Neon(const int32_t* vector, size_t length) {
   int32_t maximum = WEBRTC_SPL_WORD32_MIN;
-  int i = 0;
-  int residual = length & 0x7;
+  size_t i = 0;
+  size_t residual = length & 0x7;
 
-  if (vector == NULL || length <= 0) {
+  if (vector == NULL || length == 0) {
     return maximum;
   }
 
@@ -210,12 +210,12 @@
 
 // Minimum value of word16 vector. NEON intrinsics version for
 // ARM 32-bit/64-bit platforms.
-int16_t WebRtcSpl_MinValueW16Neon(const int16_t* vector, int length) {
+int16_t WebRtcSpl_MinValueW16Neon(const int16_t* vector, size_t length) {
   int16_t minimum = WEBRTC_SPL_WORD16_MAX;
-  int i = 0;
-  int residual = length & 0x7;
+  size_t i = 0;
+  size_t residual = length & 0x7;
 
-  if (vector == NULL || length <= 0) {
+  if (vector == NULL || length == 0) {
     return minimum;
   }
 
@@ -250,12 +250,12 @@
 
 // Minimum value of word32 vector. NEON intrinsics version for
 // ARM 32-bit/64-bit platforms.
-int32_t WebRtcSpl_MinValueW32Neon(const int32_t* vector, int length) {
+int32_t WebRtcSpl_MinValueW32Neon(const int32_t* vector, size_t length) {
   int32_t minimum = WEBRTC_SPL_WORD32_MAX;
-  int i = 0;
-  int residual = length & 0x7;
+  size_t i = 0;
+  size_t residual = length & 0x7;
 
-  if (vector == NULL || length <= 0) {
+  if (vector == NULL || length == 0) {
     return minimum;
   }
 
diff --git a/webrtc/common_audio/signal_processing/resample_by_2.c b/webrtc/common_audio/signal_processing/resample_by_2.c
index 9c0784e..dcba82e 100644
--- a/webrtc/common_audio/signal_processing/resample_by_2.c
+++ b/webrtc/common_audio/signal_processing/resample_by_2.c
@@ -67,10 +67,10 @@
 
 // decimator
 #if !defined(MIPS32_LE)
-void WebRtcSpl_DownsampleBy2(const int16_t* in, int len,
+void WebRtcSpl_DownsampleBy2(const int16_t* in, size_t len,
                              int16_t* out, int32_t* filtState) {
   int32_t tmp1, tmp2, diff, in32, out32;
-  int i;
+  size_t i;
 
   register int32_t state0 = filtState[0];
   register int32_t state1 = filtState[1];
@@ -125,10 +125,10 @@
 #endif  // #if defined(MIPS32_LE)
 
 
-void WebRtcSpl_UpsampleBy2(const int16_t* in, int len,
+void WebRtcSpl_UpsampleBy2(const int16_t* in, size_t len,
                            int16_t* out, int32_t* filtState) {
   int32_t tmp1, tmp2, diff, in32, out32;
-  int i;
+  size_t i;
 
   register int32_t state0 = filtState[0];
   register int32_t state1 = filtState[1];
diff --git a/webrtc/common_audio/signal_processing/resample_by_2_mips.c b/webrtc/common_audio/signal_processing/resample_by_2_mips.c
index 6ffce55..ec5fc8b 100644
--- a/webrtc/common_audio/signal_processing/resample_by_2_mips.c
+++ b/webrtc/common_audio/signal_processing/resample_by_2_mips.c
@@ -29,11 +29,11 @@
 
 // decimator
 void WebRtcSpl_DownsampleBy2(const int16_t* in,
-                             int len,
+                             size_t len,
                              int16_t* out,
                              int32_t* filtState) {
   int32_t out32;
-  int i, len1;
+  size_t i, len1;
 
   register int32_t state0 = filtState[0];
   register int32_t state1 = filtState[1];
diff --git a/webrtc/common_audio/signal_processing/resample_fractional.c b/webrtc/common_audio/signal_processing/resample_fractional.c
index b8fb22b..6409fba 100644
--- a/webrtc/common_audio/signal_processing/resample_fractional.c
+++ b/webrtc/common_audio/signal_processing/resample_fractional.c
@@ -41,7 +41,7 @@
 // output: int32_t (shifted 15 positions to the left, + offset 16384) :: size 2 * K
 //      K: number of blocks
 
-void WebRtcSpl_Resample48khzTo32khz(const int32_t *In, int32_t *Out, int32_t K)
+void WebRtcSpl_Resample48khzTo32khz(const int32_t *In, int32_t *Out, size_t K)
 {
     /////////////////////////////////////////////////////////////
     // Filter operation:
@@ -49,7 +49,7 @@
     // Perform resampling (3 input samples -> 2 output samples);
     // process in sub blocks of size 3 samples.
     int32_t tmp;
-    int32_t m;
+    size_t m;
 
     for (m = 0; m < K; m++)
     {
@@ -86,14 +86,14 @@
 // output: int32_t (shifted 15 positions to the left, + offset 16384) :: size 3 * K
 //      K: number of blocks
 
-void WebRtcSpl_Resample32khzTo24khz(const int32_t *In, int32_t *Out, int32_t K)
+void WebRtcSpl_Resample32khzTo24khz(const int32_t *In, int32_t *Out, size_t K)
 {
     /////////////////////////////////////////////////////////////
     // Filter operation:
     //
     // Perform resampling (4 input samples -> 3 output samples);
     // process in sub blocks of size 4 samples.
-    int32_t m;
+    size_t m;
     int32_t tmp;
 
     for (m = 0; m < K; m++)
@@ -194,7 +194,7 @@
 // output: int32_t (shifted 15 positions to the left, + offset 16384) :: size  8 * K
 //      K: number of blocks
 
-void WebRtcSpl_Resample44khzTo32khz(const int32_t *In, int32_t *Out, int32_t K)
+void WebRtcSpl_Resample44khzTo32khz(const int32_t *In, int32_t *Out, size_t K)
 {
     /////////////////////////////////////////////////////////////
     // Filter operation:
@@ -202,7 +202,7 @@
     // Perform resampling (11 input samples -> 8 output samples);
     // process in sub blocks of size 11 samples.
     int32_t tmp;
-    int32_t m;
+    size_t m;
 
     for (m = 0; m < K; m++)
     {
diff --git a/webrtc/common_audio/signal_processing/signal_processing_unittest.cc b/webrtc/common_audio/signal_processing/signal_processing_unittest.cc
index cc05dfa..a593c4d 100644
--- a/webrtc/common_audio/signal_processing/signal_processing_unittest.cc
+++ b/webrtc/common_audio/signal_processing/signal_processing_unittest.cc
@@ -11,7 +11,7 @@
 #include "testing/gtest/include/gtest/gtest.h"
 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
 
-static const int kVector16Size = 9;
+static const size_t kVector16Size = 9;
 static const int16_t vector16[kVector16Size] = {1, -15511, 4323, 1963,
   WEBRTC_SPL_WORD16_MAX, 0, WEBRTC_SPL_WORD16_MIN + 5, -3333, 345};
 
@@ -157,7 +157,7 @@
 }
 
 TEST_F(SplTest, BasicArrayOperationsTest) {
-    const int kVectorSize = 4;
+    const size_t kVectorSize = 4;
     int B[] = {4, 12, 133, 1100};
     int16_t b16[kVectorSize];
     int32_t b32[kVectorSize];
@@ -166,27 +166,27 @@
     int32_t bTmp32[kVectorSize];
 
     WebRtcSpl_MemSetW16(b16, 3, kVectorSize);
-    for (int kk = 0; kk < kVectorSize; ++kk) {
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
         EXPECT_EQ(3, b16[kk]);
     }
     WebRtcSpl_ZerosArrayW16(b16, kVectorSize);
-    for (int kk = 0; kk < kVectorSize; ++kk) {
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
         EXPECT_EQ(0, b16[kk]);
     }
     WebRtcSpl_MemSetW32(b32, 3, kVectorSize);
-    for (int kk = 0; kk < kVectorSize; ++kk) {
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
         EXPECT_EQ(3, b32[kk]);
     }
     WebRtcSpl_ZerosArrayW32(b32, kVectorSize);
-    for (int kk = 0; kk < kVectorSize; ++kk) {
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
         EXPECT_EQ(0, b32[kk]);
     }
-    for (int kk = 0; kk < kVectorSize; ++kk) {
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
         bTmp16[kk] = (int16_t)kk;
         bTmp32[kk] = (int32_t)kk;
     }
     WEBRTC_SPL_MEMCPY_W16(b16, bTmp16, kVectorSize);
-    for (int kk = 0; kk < kVectorSize; ++kk) {
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
         EXPECT_EQ(b16[kk], bTmp16[kk]);
     }
 //    WEBRTC_SPL_MEMCPY_W32(b32, bTmp32, kVectorSize);
@@ -194,36 +194,36 @@
 //        EXPECT_EQ(b32[kk], bTmp32[kk]);
 //    }
     WebRtcSpl_CopyFromEndW16(b16, kVectorSize, 2, bTmp16);
-    for (int kk = 0; kk < 2; ++kk) {
-        EXPECT_EQ(kk+2, bTmp16[kk]);
+    for (size_t kk = 0; kk < 2; ++kk) {
+        EXPECT_EQ(static_cast<int16_t>(kk+2), bTmp16[kk]);
     }
 
-    for (int kk = 0; kk < kVectorSize; ++kk) {
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
         b32[kk] = B[kk];
         b16[kk] = (int16_t)B[kk];
     }
     WebRtcSpl_VectorBitShiftW32ToW16(bTmp16, kVectorSize, b32, 1);
-    for (int kk = 0; kk < kVectorSize; ++kk) {
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
         EXPECT_EQ((B[kk]>>1), bTmp16[kk]);
     }
     WebRtcSpl_VectorBitShiftW16(bTmp16, kVectorSize, b16, 1);
-    for (int kk = 0; kk < kVectorSize; ++kk) {
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
         EXPECT_EQ((B[kk]>>1), bTmp16[kk]);
     }
     WebRtcSpl_VectorBitShiftW32(bTmp32, kVectorSize, b32, 1);
-    for (int kk = 0; kk < kVectorSize; ++kk) {
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
         EXPECT_EQ((B[kk]>>1), bTmp32[kk]);
     }
 
     WebRtcSpl_MemCpyReversedOrder(&bTmp16[3], b16, kVectorSize);
-    for (int kk = 0; kk < kVectorSize; ++kk) {
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
         EXPECT_EQ(b16[3-kk], bTmp16[kk]);
     }
 }
 
 TEST_F(SplTest, ExeptionsHandlingMinMaxOperationsTest) {
   // Test how the functions handle exceptional cases.
-  const int kVectorSize = 2;
+  const size_t kVectorSize = 2;
   int16_t vector16[kVectorSize] = {0};
   int32_t vector32[kVectorSize] = {0};
 
@@ -252,7 +252,7 @@
 }
 
 TEST_F(SplTest, MinMaxOperationsTest) {
-  const int kVectorSize = 17;
+  const size_t kVectorSize = 17;
 
   // Vectors to test the cases where minimum values have to be caught
   // outside of the unrolled loops in ARM-Neon.
@@ -267,8 +267,10 @@
             WebRtcSpl_MinValueW16(vector16, kVectorSize));
   EXPECT_EQ(WEBRTC_SPL_WORD32_MIN,
             WebRtcSpl_MinValueW32(vector32, kVectorSize));
-  EXPECT_EQ(kVectorSize - 1, WebRtcSpl_MinIndexW16(vector16, kVectorSize));
-  EXPECT_EQ(kVectorSize - 1, WebRtcSpl_MinIndexW32(vector32, kVectorSize));
+  EXPECT_EQ(static_cast<int>(kVectorSize - 1),
+            WebRtcSpl_MinIndexW16(vector16, kVectorSize));
+  EXPECT_EQ(static_cast<int>(kVectorSize - 1),
+            WebRtcSpl_MinIndexW32(vector32, kVectorSize));
 
   // Test the cases where maximum values have to be caught
   // outside of the unrolled loops in ARM-Neon.
@@ -283,9 +285,12 @@
             WebRtcSpl_MaxAbsValueW32(vector32, kVectorSize));
   EXPECT_EQ(WEBRTC_SPL_WORD32_MAX,
             WebRtcSpl_MaxValueW32(vector32, kVectorSize));
-  EXPECT_EQ(kVectorSize - 1, WebRtcSpl_MaxAbsIndexW16(vector16, kVectorSize));
-  EXPECT_EQ(kVectorSize - 1, WebRtcSpl_MaxIndexW16(vector16, kVectorSize));
-  EXPECT_EQ(kVectorSize - 1, WebRtcSpl_MaxIndexW32(vector32, kVectorSize));
+  EXPECT_EQ(static_cast<int>(kVectorSize - 1),
+            WebRtcSpl_MaxAbsIndexW16(vector16, kVectorSize));
+  EXPECT_EQ(static_cast<int>(kVectorSize - 1),
+            WebRtcSpl_MaxIndexW16(vector16, kVectorSize));
+  EXPECT_EQ(static_cast<int>(kVectorSize - 1),
+            WebRtcSpl_MaxIndexW32(vector32, kVectorSize));
 
   // Test the cases where multiple maximum and minimum values are present.
   vector16[1] = WEBRTC_SPL_WORD16_MAX;
@@ -315,59 +320,59 @@
 }
 
 TEST_F(SplTest, VectorOperationsTest) {
-    const int kVectorSize = 4;
+    const size_t kVectorSize = 4;
     int B[] = {4, 12, 133, 1100};
     int16_t a16[kVectorSize];
     int16_t b16[kVectorSize];
     int16_t bTmp16[kVectorSize];
 
-    for (int kk = 0; kk < kVectorSize; ++kk) {
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
         a16[kk] = B[kk];
         b16[kk] = B[kk];
     }
 
     WebRtcSpl_AffineTransformVector(bTmp16, b16, 3, 7, 2, kVectorSize);
-    for (int kk = 0; kk < kVectorSize; ++kk) {
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
         EXPECT_EQ((B[kk]*3+7)>>2, bTmp16[kk]);
     }
     WebRtcSpl_ScaleAndAddVectorsWithRound(b16, 3, b16, 2, 2, bTmp16, kVectorSize);
-    for (int kk = 0; kk < kVectorSize; ++kk) {
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
         EXPECT_EQ((B[kk]*3+B[kk]*2+2)>>2, bTmp16[kk]);
     }
 
     WebRtcSpl_AddAffineVectorToVector(bTmp16, b16, 3, 7, 2, kVectorSize);
-    for (int kk = 0; kk < kVectorSize; ++kk) {
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
         EXPECT_EQ(((B[kk]*3+B[kk]*2+2)>>2)+((b16[kk]*3+7)>>2), bTmp16[kk]);
     }
 
     WebRtcSpl_ScaleVector(b16, bTmp16, 13, kVectorSize, 2);
-    for (int kk = 0; kk < kVectorSize; ++kk) {
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
         EXPECT_EQ((b16[kk]*13)>>2, bTmp16[kk]);
     }
     WebRtcSpl_ScaleVectorWithSat(b16, bTmp16, 13, kVectorSize, 2);
-    for (int kk = 0; kk < kVectorSize; ++kk) {
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
         EXPECT_EQ((b16[kk]*13)>>2, bTmp16[kk]);
     }
     WebRtcSpl_ScaleAndAddVectors(a16, 13, 2, b16, 7, 2, bTmp16, kVectorSize);
-    for (int kk = 0; kk < kVectorSize; ++kk) {
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
         EXPECT_EQ(((a16[kk]*13)>>2)+((b16[kk]*7)>>2), bTmp16[kk]);
     }
 
     WebRtcSpl_AddVectorsAndShift(bTmp16, a16, b16, kVectorSize, 2);
-    for (int kk = 0; kk < kVectorSize; ++kk) {
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
         EXPECT_EQ(B[kk] >> 1, bTmp16[kk]);
     }
     WebRtcSpl_ReverseOrderMultArrayElements(bTmp16, a16, &b16[3], kVectorSize, 2);
-    for (int kk = 0; kk < kVectorSize; ++kk) {
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
         EXPECT_EQ((a16[kk]*b16[3-kk])>>2, bTmp16[kk]);
     }
     WebRtcSpl_ElementwiseVectorMult(bTmp16, a16, b16, kVectorSize, 6);
-    for (int kk = 0; kk < kVectorSize; ++kk) {
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
         EXPECT_EQ((a16[kk]*b16[kk])>>6, bTmp16[kk]);
     }
 
     WebRtcSpl_SqrtOfOneMinusXSquared(b16, kVectorSize, bTmp16);
-    for (int kk = 0; kk < kVectorSize - 1; ++kk) {
+    for (size_t kk = 0; kk < kVectorSize - 1; ++kk) {
         EXPECT_EQ(32767, bTmp16[kk]);
     }
     EXPECT_EQ(32749, bTmp16[kVectorSize - 1]);
@@ -376,7 +381,7 @@
 }
 
 TEST_F(SplTest, EstimatorsTest) {
-  const int16_t kOrder = 2;
+  const size_t kOrder = 2;
   const int32_t unstable_filter[] = { 4, 12, 133, 1100 };
   const int32_t stable_filter[] = { 1100, 133, 12, 4 };
   int16_t lpc[kOrder + 2] = { 0 };
@@ -386,15 +391,15 @@
 
   EXPECT_EQ(0, WebRtcSpl_LevinsonDurbin(unstable_filter, lpc, refl, kOrder));
   EXPECT_EQ(1, WebRtcSpl_LevinsonDurbin(stable_filter, lpc, refl, kOrder));
-  for (int i = 0; i < kOrder + 2; ++i) {
+  for (size_t i = 0; i < kOrder + 2; ++i) {
     EXPECT_EQ(lpc_result[i], lpc[i]);
     EXPECT_EQ(refl_result[i], refl[i]);
   }
 }
 
 TEST_F(SplTest, FilterTest) {
-    const int kVectorSize = 4;
-    const int kFilterOrder = 3;
+    const size_t kVectorSize = 4;
+    const size_t kFilterOrder = 3;
     int16_t A[] = {1, 2, 33, 100};
     int16_t A5[] = {1, 2, 33, 100, -5};
     int16_t B[] = {4, 12, 133, 110};
@@ -407,7 +412,7 @@
     WebRtcSpl_ZerosArrayW16(bState, kVectorSize);
     WebRtcSpl_ZerosArrayW16(bStateLow, kVectorSize);
 
-    for (int kk = 0; kk < kVectorSize; ++kk) {
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
         data_in[kk] = A[kk];
         data_out[kk] = 0;
     }
@@ -460,10 +465,10 @@
 
 TEST_F(SplTest, CrossCorrelationTest) {
   // Note the function arguments relation specificed by API.
-  const int kCrossCorrelationDimension = 3;
+  const size_t kCrossCorrelationDimension = 3;
   const int kShift = 2;
   const int kStep = 1;
-  const int kSeqDimension = 6;
+  const size_t kSeqDimension = 6;
 
   const int16_t kVector16[kVector16Size] = {1, 4323, 1963,
     WEBRTC_SPL_WORD16_MAX, WEBRTC_SPL_WORD16_MIN + 5, -3333, -876, 8483, 142};
@@ -484,7 +489,7 @@
     expected = kExpectedNeon;
   }
 #endif
-  for (int i = 0; i < kCrossCorrelationDimension; ++i) {
+  for (size_t i = 0; i < kCrossCorrelationDimension; ++i) {
     EXPECT_EQ(expected[i], vector32[i]);
   }
 }
@@ -497,17 +502,17 @@
 
   EXPECT_EQ(-1, WebRtcSpl_AutoCorrelation(vector16, kVector16Size,
                                           kVector16Size + 1, vector32, &scale));
-  EXPECT_EQ(kVector16Size,
+  EXPECT_EQ(static_cast<int>(kVector16Size),
             WebRtcSpl_AutoCorrelation(vector16, kVector16Size,
                                       kVector16Size - 1, vector32, &scale));
   EXPECT_EQ(3, scale);
-  for (int i = 0; i < kVector16Size; ++i) {
+  for (size_t i = 0; i < kVector16Size; ++i) {
     EXPECT_EQ(expected[i], vector32[i]);
   }
 }
 
 TEST_F(SplTest, SignalProcessingTest) {
-    const int kVectorSize = 4;
+    const size_t kVectorSize = 4;
     int A[] = {1, 2, 33, 100};
     const int16_t kHanning[4] = { 2399, 8192, 13985, 16384 };
     int16_t b16[kVectorSize];
@@ -516,7 +521,7 @@
 
     int bScale = 0;
 
-    for (int kk = 0; kk < kVectorSize; ++kk) {
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
         b16[kk] = A[kk];
     }
 
@@ -535,11 +540,11 @@
 ////    }
 
     WebRtcSpl_GetHanningWindow(bTmp16, kVectorSize);
-    for (int kk = 0; kk < kVectorSize; ++kk) {
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
         EXPECT_EQ(kHanning[kk], bTmp16[kk]);
     }
 
-    for (int kk = 0; kk < kVectorSize; ++kk) {
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
         b16[kk] = A[kk];
     }
     EXPECT_EQ(11094 , WebRtcSpl_Energy(b16, kVectorSize, &bScale));
@@ -569,7 +574,7 @@
 TEST_F(SplTest, Resample48WithSaturationTest) {
   // The test resamples 3*kBlockSize number of samples to 2*kBlockSize number
   // of samples.
-  const int kBlockSize = 16;
+  const size_t kBlockSize = 16;
 
   // Saturated input vector of 48 samples.
   const int32_t kVectorSaturated[3 * kBlockSize + 7] = {
@@ -600,11 +605,11 @@
 
   // Comparing output values against references. The values at position
   // 12-15 are skipped to account for the filter lag.
-  for (int i = 0; i < 12; ++i) {
+  for (size_t i = 0; i < 12; ++i) {
     EXPECT_EQ(kRefValue32kHz1, out_vector[i]);
     EXPECT_EQ(kRefValue16kHz1, out_vector_w16[i]);
   }
-  for (int i = 16; i < 2 * kBlockSize; ++i) {
+  for (size_t i = 16; i < 2 * kBlockSize; ++i) {
     EXPECT_EQ(kRefValue32kHz2, out_vector[i]);
     EXPECT_EQ(kRefValue16kHz2, out_vector_w16[i]);
   }
diff --git a/webrtc/common_audio/signal_processing/splitting_filter.c b/webrtc/common_audio/signal_processing/splitting_filter.c
index 7ae281c..36fcf35 100644
--- a/webrtc/common_audio/signal_processing/splitting_filter.c
+++ b/webrtc/common_audio/signal_processing/splitting_filter.c
@@ -45,7 +45,7 @@
 //                            |data_length|
 //
 
-void WebRtcSpl_AllPassQMF(int32_t* in_data, int data_length,
+void WebRtcSpl_AllPassQMF(int32_t* in_data, size_t data_length,
                           int32_t* out_data, const uint16_t* filter_coefficients,
                           int32_t* filter_state)
 {
@@ -65,7 +65,7 @@
     // filter operation takes the |in_data| (which is the output from the previous cascade
     // filter) and store the output in |out_data|.
     // Note that the input vector values are changed during the process.
-    int k;
+    size_t k;
     int32_t diff;
     // First all-pass cascade; filter from in_data to out_data.
 
@@ -124,18 +124,18 @@
     filter_state[5] = out_data[data_length - 1]; // y[N-1], becomes y[-1] next time
 }
 
-void WebRtcSpl_AnalysisQMF(const int16_t* in_data, int in_data_length,
+void WebRtcSpl_AnalysisQMF(const int16_t* in_data, size_t in_data_length,
                            int16_t* low_band, int16_t* high_band,
                            int32_t* filter_state1, int32_t* filter_state2)
 {
-    int16_t i;
+    size_t i;
     int16_t k;
     int32_t tmp;
     int32_t half_in1[kMaxBandFrameLength];
     int32_t half_in2[kMaxBandFrameLength];
     int32_t filter1[kMaxBandFrameLength];
     int32_t filter2[kMaxBandFrameLength];
-    const int band_length = in_data_length / 2;
+    const size_t band_length = in_data_length / 2;
     assert(in_data_length % 2 == 0);
     assert(band_length <= kMaxBandFrameLength);
 
@@ -165,7 +165,7 @@
 }
 
 void WebRtcSpl_SynthesisQMF(const int16_t* low_band, const int16_t* high_band,
-                            int band_length, int16_t* out_data,
+                            size_t band_length, int16_t* out_data,
                             int32_t* filter_state1, int32_t* filter_state2)
 {
     int32_t tmp;
@@ -173,7 +173,7 @@
     int32_t half_in2[kMaxBandFrameLength];
     int32_t filter1[kMaxBandFrameLength];
     int32_t filter2[kMaxBandFrameLength];
-    int16_t i;
+    size_t i;
     int16_t k;
     assert(band_length <= kMaxBandFrameLength);
 
diff --git a/webrtc/common_audio/signal_processing/sqrt_of_one_minus_x_squared.c b/webrtc/common_audio/signal_processing/sqrt_of_one_minus_x_squared.c
index f9570f3..ff78b52 100644
--- a/webrtc/common_audio/signal_processing/sqrt_of_one_minus_x_squared.c
+++ b/webrtc/common_audio/signal_processing/sqrt_of_one_minus_x_squared.c
@@ -17,11 +17,11 @@
 
 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
 
-void WebRtcSpl_SqrtOfOneMinusXSquared(int16_t *xQ15, int vector_length,
+void WebRtcSpl_SqrtOfOneMinusXSquared(int16_t *xQ15, size_t vector_length,
                                       int16_t *yQ15)
 {
     int32_t sq;
-    int m;
+    size_t m;
     int16_t tmp;
 
     for (m = 0; m < vector_length; m++)
diff --git a/webrtc/common_audio/signal_processing/vector_scaling_operations.c b/webrtc/common_audio/signal_processing/vector_scaling_operations.c
index 9ae7480..fdefd06 100644
--- a/webrtc/common_audio/signal_processing/vector_scaling_operations.c
+++ b/webrtc/common_audio/signal_processing/vector_scaling_operations.c
@@ -22,10 +22,10 @@
 
 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
 
-void WebRtcSpl_VectorBitShiftW16(int16_t *res, int16_t length,
+void WebRtcSpl_VectorBitShiftW16(int16_t *res, size_t length,
                                  const int16_t *in, int16_t right_shifts)
 {
-    int i;
+    size_t i;
 
     if (right_shifts > 0)
     {
@@ -43,11 +43,11 @@
 }
 
 void WebRtcSpl_VectorBitShiftW32(int32_t *out_vector,
-                                 int16_t vector_length,
+                                 size_t vector_length,
                                  const int32_t *in_vector,
                                  int16_t right_shifts)
 {
-    int i;
+    size_t i;
 
     if (right_shifts > 0)
     {
@@ -64,9 +64,9 @@
     }
 }
 
-void WebRtcSpl_VectorBitShiftW32ToW16(int16_t* out, int length,
+void WebRtcSpl_VectorBitShiftW32ToW16(int16_t* out, size_t length,
                                       const int32_t* in, int right_shifts) {
-  int i;
+  size_t i;
   int32_t tmp_w32;
 
   if (right_shifts >= 0) {
@@ -84,11 +84,11 @@
 }
 
 void WebRtcSpl_ScaleVector(const int16_t *in_vector, int16_t *out_vector,
-                           int16_t gain, int16_t in_vector_length,
+                           int16_t gain, size_t in_vector_length,
                            int16_t right_shifts)
 {
     // Performs vector operation: out_vector = (gain*in_vector)>>right_shifts
-    int i;
+    size_t i;
     const int16_t *inptr;
     int16_t *outptr;
 
@@ -102,11 +102,11 @@
 }
 
 void WebRtcSpl_ScaleVectorWithSat(const int16_t *in_vector, int16_t *out_vector,
-                                 int16_t gain, int16_t in_vector_length,
+                                 int16_t gain, size_t in_vector_length,
                                  int16_t right_shifts)
 {
     // Performs vector operation: out_vector = (gain*in_vector)>>right_shifts
-    int i;
+    size_t i;
     const int16_t *inptr;
     int16_t *outptr;
 
@@ -120,10 +120,10 @@
 
 void WebRtcSpl_ScaleAndAddVectors(const int16_t *in1, int16_t gain1, int shift1,
                                   const int16_t *in2, int16_t gain2, int shift2,
-                                  int16_t *out, int vector_length)
+                                  int16_t *out, size_t vector_length)
 {
     // Performs vector operation: out = (gain1*in1)>>shift1 + (gain2*in2)>>shift2
-    int i;
+    size_t i;
     const int16_t *in1ptr;
     const int16_t *in2ptr;
     int16_t *outptr;
@@ -146,12 +146,12 @@
                                            int16_t in_vector2_scale,
                                            int right_shifts,
                                            int16_t* out_vector,
-                                           int length) {
-  int i = 0;
+                                           size_t length) {
+  size_t i = 0;
   int round_value = (1 << right_shifts) >> 1;
 
   if (in_vector1 == NULL || in_vector2 == NULL || out_vector == NULL ||
-      length <= 0 || right_shifts < 0) {
+      length == 0 || right_shifts < 0) {
     return -1;
   }
 
diff --git a/webrtc/common_audio/signal_processing/vector_scaling_operations_mips.c b/webrtc/common_audio/signal_processing/vector_scaling_operations_mips.c
index 5ddcd2d..dd73eea 100644
--- a/webrtc/common_audio/signal_processing/vector_scaling_operations_mips.c
+++ b/webrtc/common_audio/signal_processing/vector_scaling_operations_mips.c
@@ -22,15 +22,16 @@
                                                int16_t in_vector2_scale,
                                                int right_shifts,
                                                int16_t* out_vector,
-                                               int length) {
+                                               size_t length) {
   int16_t r0 = 0, r1 = 0;
   int16_t *in1 = (int16_t*)in_vector1;
   int16_t *in2 = (int16_t*)in_vector2;
   int16_t *out = out_vector;
-  int i = 0, value32 = 0;
+  size_t i = 0;
+  int value32 = 0;
 
   if (in_vector1 == NULL || in_vector2 == NULL || out_vector == NULL ||
-      length <= 0 || right_shifts < 0) {
+      length == 0 || right_shifts < 0) {
     return -1;
   }
   for (i = 0; i < length; i++) {
diff --git a/webrtc/common_audio/sparse_fir_filter.cc b/webrtc/common_audio/sparse_fir_filter.cc
index ce98846..28bc013 100644
--- a/webrtc/common_audio/sparse_fir_filter.cc
+++ b/webrtc/common_audio/sparse_fir_filter.cc
@@ -15,24 +15,24 @@
 namespace webrtc {
 
 SparseFIRFilter::SparseFIRFilter(const float* nonzero_coeffs,
-                                 int num_nonzero_coeffs,
-                                 int sparsity,
-                                 int offset)
+                                 size_t num_nonzero_coeffs,
+                                 size_t sparsity,
+                                 size_t offset)
     : sparsity_(sparsity),
       offset_(offset),
       nonzero_coeffs_(nonzero_coeffs, nonzero_coeffs + num_nonzero_coeffs),
       state_(sparsity_ * (num_nonzero_coeffs - 1) + offset_, 0.f) {
-  CHECK_GE(num_nonzero_coeffs, 1);
-  CHECK_GE(sparsity, 1);
+  CHECK_GE(num_nonzero_coeffs, 1u);
+  CHECK_GE(sparsity, 1u);
 }
 
-void SparseFIRFilter::Filter(const float* in, int length, float* out) {
+void SparseFIRFilter::Filter(const float* in, size_t length, float* out) {
   // Convolves the input signal |in| with the filter kernel |nonzero_coeffs_|
   // taking into account the previous state.
-  for (int i = 0; i < length; ++i) {
+  for (size_t i = 0; i < length; ++i) {
     out[i] = 0.f;
     size_t j;
-    for (j = 0; i >= static_cast<int>(j) * sparsity_ + offset_ &&
+    for (j = 0; i >= j * sparsity_ + offset_ &&
                 j < nonzero_coeffs_.size(); ++j) {
       out[i] += in[i - j * sparsity_ - offset_] * nonzero_coeffs_[j];
     }
@@ -44,7 +44,7 @@
 
   // Update current state.
   if (state_.size() > 0u) {
-    if (length >= static_cast<int>(state_.size())) {
+    if (length >= state_.size()) {
       std::memcpy(&state_[0],
                   &in[length - state_.size()],
                   state_.size() * sizeof(*in));
diff --git a/webrtc/common_audio/sparse_fir_filter.h b/webrtc/common_audio/sparse_fir_filter.h
index 4526ad7..9322afc 100644
--- a/webrtc/common_audio/sparse_fir_filter.h
+++ b/webrtc/common_audio/sparse_fir_filter.h
@@ -30,17 +30,17 @@
   // B = [0 coeffs[0] 0 0 coeffs[1] 0 0 coeffs[2] ... ]
   // All initial state values will be zeros.
   SparseFIRFilter(const float* nonzero_coeffs,
-                  int num_nonzero_coeffs,
-                  int sparsity,
-                  int offset);
+                  size_t num_nonzero_coeffs,
+                  size_t sparsity,
+                  size_t offset);
 
   // Filters the |in| data supplied.
   // |out| must be previously allocated and it must be at least of |length|.
-  void Filter(const float* in, int length, float* out);
+  void Filter(const float* in, size_t length, float* out);
 
  private:
-  const int sparsity_;
-  const int offset_;
+  const size_t sparsity_;
+  const size_t offset_;
   const std::vector<float> nonzero_coeffs_;
   std::vector<float> state_;
 
diff --git a/webrtc/common_audio/sparse_fir_filter_unittest.cc b/webrtc/common_audio/sparse_fir_filter_unittest.cc
index 28e3fbb..82a53a5 100644
--- a/webrtc/common_audio/sparse_fir_filter_unittest.cc
+++ b/webrtc/common_audio/sparse_fir_filter_unittest.cc
@@ -31,9 +31,9 @@
 
 TEST(SparseFIRFilterTest, FilterAsIdentity) {
   const float kCoeff = 1.f;
-  const int kNumCoeff = 1;
-  const int kSparsity = 3;
-  const int kOffset = 0;
+  const size_t kNumCoeff = 1;
+  const size_t kSparsity = 3;
+  const size_t kOffset = 0;
   float output[arraysize(kInput)];
   SparseFIRFilter filter(&kCoeff, kNumCoeff, kSparsity, kOffset);
   filter.Filter(kInput, arraysize(kInput), output);
@@ -42,10 +42,10 @@
 
 TEST(SparseFIRFilterTest, SameOutputForScalarCoefficientAndDifferentSparsity) {
   const float kCoeff = 2.f;
-  const int kNumCoeff = 1;
-  const int kLowSparsity = 1;
-  const int kHighSparsity = 7;
-  const int kOffset = 0;
+  const size_t kNumCoeff = 1;
+  const size_t kLowSparsity = 1;
+  const size_t kHighSparsity = 7;
+  const size_t kOffset = 0;
   float low_sparsity_output[arraysize(kInput)];
   float high_sparsity_output[arraysize(kInput)];
   SparseFIRFilter low_sparsity_filter(&kCoeff,
@@ -63,9 +63,9 @@
 
 TEST(SparseFIRFilterTest, FilterUsedAsScalarMultiplication) {
   const float kCoeff = 5.f;
-  const int kNumCoeff = 1;
-  const int kSparsity = 5;
-  const int kOffset = 0;
+  const size_t kNumCoeff = 1;
+  const size_t kSparsity = 5;
+  const size_t kOffset = 0;
   float output[arraysize(kInput)];
   SparseFIRFilter filter(&kCoeff, kNumCoeff, kSparsity, kOffset);
   filter.Filter(kInput, arraysize(kInput), output);
@@ -77,9 +77,9 @@
 
 TEST(SparseFIRFilterTest, FilterUsedAsInputShifting) {
   const float kCoeff = 1.f;
-  const int kNumCoeff = 1;
-  const int kSparsity = 1;
-  const int kOffset = 4;
+  const size_t kNumCoeff = 1;
+  const size_t kSparsity = 1;
+  const size_t kOffset = 4;
   float output[arraysize(kInput)];
   SparseFIRFilter filter(&kCoeff, kNumCoeff, kSparsity, kOffset);
   filter.Filter(kInput, arraysize(kInput), output);
@@ -91,8 +91,8 @@
 }
 
 TEST(SparseFIRFilterTest, FilterUsedAsArbitraryWeighting) {
-  const int kSparsity = 2;
-  const int kOffset = 1;
+  const size_t kSparsity = 2;
+  const size_t kOffset = 1;
   float output[arraysize(kInput)];
   SparseFIRFilter filter(kCoeffs, arraysize(kCoeffs), kSparsity, kOffset);
   filter.Filter(kInput, arraysize(kInput), output);
@@ -104,8 +104,8 @@
 }
 
 TEST(SparseFIRFilterTest, FilterInLengthLesserOrEqualToCoefficientsLength) {
-  const int kSparsity = 1;
-  const int kOffset = 0;
+  const size_t kSparsity = 1;
+  const size_t kOffset = 0;
   float output[arraysize(kInput)];
   SparseFIRFilter filter(kCoeffs, arraysize(kCoeffs), kSparsity, kOffset);
   filter.Filter(kInput, 2, output);
@@ -114,8 +114,8 @@
 }
 
 TEST(SparseFIRFilterTest, MultipleFilterCalls) {
-  const int kSparsity = 1;
-  const int kOffset = 0;
+  const size_t kSparsity = 1;
+  const size_t kOffset = 0;
   float output[arraysize(kInput)];
   SparseFIRFilter filter(kCoeffs, arraysize(kCoeffs), kSparsity, kOffset);
   filter.Filter(kInput, 2, output);
@@ -141,8 +141,8 @@
 }
 
 TEST(SparseFIRFilterTest, VerifySampleBasedVsBlockBasedFiltering) {
-  const int kSparsity = 3;
-  const int kOffset = 1;
+  const size_t kSparsity = 3;
+  const size_t kOffset = 1;
   float output_block_based[arraysize(kInput)];
   SparseFIRFilter filter_block(kCoeffs,
                                arraysize(kCoeffs),
@@ -160,8 +160,8 @@
 }
 
 TEST(SparseFIRFilterTest, SimpleHighPassFilter) {
-  const int kSparsity = 2;
-  const int kOffset = 2;
+  const size_t kSparsity = 2;
+  const size_t kOffset = 2;
   const float kHPCoeffs[] = {1.f, -1.f};
   const float kConstantInput[] =
       {1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f};
@@ -177,8 +177,8 @@
 }
 
 TEST(SparseFIRFilterTest, SimpleLowPassFilter) {
-  const int kSparsity = 2;
-  const int kOffset = 2;
+  const size_t kSparsity = 2;
+  const size_t kOffset = 2;
   const float kLPCoeffs[] = {1.f, 1.f};
   const float kHighFrequencyInput[] =
       {1.f, 1.f, -1.f, -1.f, 1.f, 1.f, -1.f, -1.f, 1.f, 1.f};
@@ -194,8 +194,8 @@
 }
 
 TEST(SparseFIRFilterTest, SameOutputWhenSwappedCoefficientsAndInput) {
-  const int kSparsity = 1;
-  const int kOffset = 0;
+  const size_t kSparsity = 1;
+  const size_t kOffset = 0;
   float output[arraysize(kCoeffs)];
   float output_swapped[arraysize(kCoeffs)];
   SparseFIRFilter filter(kCoeffs, arraysize(kCoeffs), kSparsity, kOffset);
@@ -210,8 +210,8 @@
 }
 
 TEST(SparseFIRFilterTest, SameOutputAsFIRFilterWhenSparsityOneAndOffsetZero) {
-  const int kSparsity = 1;
-  const int kOffset = 0;
+  const size_t kSparsity = 1;
+  const size_t kOffset = 0;
   float output[arraysize(kInput)];
   float sparse_output[arraysize(kInput)];
   rtc::scoped_ptr<FIRFilter> filter(FIRFilter::Create(kCoeffs,
diff --git a/webrtc/common_audio/vad/include/webrtc_vad.h b/webrtc/common_audio/vad/include/webrtc_vad.h
index 51f7b8d..91308ee 100644
--- a/webrtc/common_audio/vad/include/webrtc_vad.h
+++ b/webrtc/common_audio/vad/include/webrtc_vad.h
@@ -16,6 +16,8 @@
 #ifndef WEBRTC_COMMON_AUDIO_VAD_INCLUDE_WEBRTC_VAD_H_  // NOLINT
 #define WEBRTC_COMMON_AUDIO_VAD_INCLUDE_WEBRTC_VAD_H_
 
+#include <stddef.h>
+
 #include "webrtc/typedefs.h"
 
 typedef struct WebRtcVadInst VadInst;
@@ -66,7 +68,7 @@
 //                        0 - (Non-active Voice),
 //                       -1 - (Error)
 int WebRtcVad_Process(VadInst* handle, int fs, const int16_t* audio_frame,
-                      int frame_length);
+                      size_t frame_length);
 
 // Checks for valid combinations of |rate| and |frame_length|. We support 10,
 // 20 and 30 ms frames and the rates 8000, 16000 and 32000 Hz.
@@ -75,7 +77,7 @@
 // - frame_length [i] : Speech frame buffer length in number of samples.
 //
 // returns            : 0 - (valid combination), -1 - (invalid combination)
-int WebRtcVad_ValidRateAndFrameLength(int rate, int frame_length);
+int WebRtcVad_ValidRateAndFrameLength(int rate, size_t frame_length);
 
 #ifdef __cplusplus
 }
diff --git a/webrtc/common_audio/vad/vad.cc b/webrtc/common_audio/vad/vad.cc
index fd09c3f..764d024 100644
--- a/webrtc/common_audio/vad/vad.cc
+++ b/webrtc/common_audio/vad/vad.cc
@@ -28,8 +28,7 @@
 enum Vad::Activity Vad::VoiceActivity(const int16_t* audio,
                                       size_t num_samples,
                                       int sample_rate_hz) {
-  int ret = WebRtcVad_Process(
-      handle_, sample_rate_hz, audio, static_cast<int>(num_samples));
+  int ret = WebRtcVad_Process(handle_, sample_rate_hz, audio, num_samples);
   switch (ret) {
     case 0:
       return kPassive;
diff --git a/webrtc/common_audio/vad/vad_core.c b/webrtc/common_audio/vad/vad_core.c
index d246a4d..51797ee 100644
--- a/webrtc/common_audio/vad/vad_core.c
+++ b/webrtc/common_audio/vad/vad_core.c
@@ -122,7 +122,7 @@
 //
 // - returns              : the VAD decision (0 - noise, 1 - speech).
 static int16_t GmmProbability(VadInstT* self, int16_t* features,
-                              int16_t total_power, int frame_length) {
+                              int16_t total_power, size_t frame_length) {
   int channel, k;
   int16_t feature_minimum;
   int16_t h0, h1;
@@ -596,16 +596,16 @@
 // probability for both speech and background noise.
 
 int WebRtcVad_CalcVad48khz(VadInstT* inst, const int16_t* speech_frame,
-                           int frame_length) {
+                           size_t frame_length) {
   int vad;
-  int i;
+  size_t i;
   int16_t speech_nb[240];  // 30 ms in 8 kHz.
   // |tmp_mem| is a temporary memory used by resample function, length is
   // frame length in 10 ms (480 samples) + 256 extra.
   int32_t tmp_mem[480 + 256] = { 0 };
-  const int kFrameLen10ms48khz = 480;
-  const int kFrameLen10ms8khz = 80;
-  int num_10ms_frames = frame_length / kFrameLen10ms48khz;
+  const size_t kFrameLen10ms48khz = 480;
+  const size_t kFrameLen10ms8khz = 80;
+  size_t num_10ms_frames = frame_length / kFrameLen10ms48khz;
 
   for (i = 0; i < num_10ms_frames; i++) {
     WebRtcSpl_Resample48khzTo8khz(speech_frame,
@@ -621,9 +621,10 @@
 }
 
 int WebRtcVad_CalcVad32khz(VadInstT* inst, const int16_t* speech_frame,
-                           int frame_length)
+                           size_t frame_length)
 {
-    int len, vad;
+    size_t len;
+    int vad;
     int16_t speechWB[480]; // Downsampled speech frame: 960 samples (30ms in SWB)
     int16_t speechNB[240]; // Downsampled speech frame: 480 samples (30ms in WB)
 
@@ -643,9 +644,10 @@
 }
 
 int WebRtcVad_CalcVad16khz(VadInstT* inst, const int16_t* speech_frame,
-                           int frame_length)
+                           size_t frame_length)
 {
-    int len, vad;
+    size_t len;
+    int vad;
     int16_t speechNB[240]; // Downsampled speech frame: 480 samples (30ms in WB)
 
     // Wideband: Downsample signal before doing VAD
@@ -659,7 +661,7 @@
 }
 
 int WebRtcVad_CalcVad8khz(VadInstT* inst, const int16_t* speech_frame,
-                          int frame_length)
+                          size_t frame_length)
 {
     int16_t feature_vector[kNumChannels], total_power;
 
diff --git a/webrtc/common_audio/vad/vad_core.h b/webrtc/common_audio/vad/vad_core.h
index 202963d..b38c515 100644
--- a/webrtc/common_audio/vad/vad_core.h
+++ b/webrtc/common_audio/vad/vad_core.h
@@ -104,12 +104,12 @@
  *                        1-6 - Active speech
  */
 int WebRtcVad_CalcVad48khz(VadInstT* inst, const int16_t* speech_frame,
-                           int frame_length);
+                           size_t frame_length);
 int WebRtcVad_CalcVad32khz(VadInstT* inst, const int16_t* speech_frame,
-                           int frame_length);
+                           size_t frame_length);
 int WebRtcVad_CalcVad16khz(VadInstT* inst, const int16_t* speech_frame,
-                           int frame_length);
+                           size_t frame_length);
 int WebRtcVad_CalcVad8khz(VadInstT* inst, const int16_t* speech_frame,
-                          int frame_length);
+                          size_t frame_length);
 
 #endif  // WEBRTC_COMMON_AUDIO_VAD_VAD_CORE_H_
diff --git a/webrtc/common_audio/vad/vad_core_unittest.cc b/webrtc/common_audio/vad/vad_core_unittest.cc
index 00f5841..ee69484 100644
--- a/webrtc/common_audio/vad/vad_core_unittest.cc
+++ b/webrtc/common_audio/vad/vad_core_unittest.cc
@@ -82,7 +82,7 @@
 
   // Construct a speech signal that will trigger the VAD in all modes. It is
   // known that (i * i) will wrap around, but that doesn't matter in this case.
-  for (int16_t i = 0; i < kMaxFrameLength; ++i) {
+  for (size_t i = 0; i < kMaxFrameLength; ++i) {
     speech[i] = static_cast<int16_t>(i * i);
   }
   for (size_t j = 0; j < kFrameLengthsSize; ++j) {
diff --git a/webrtc/common_audio/vad/vad_filterbank.c b/webrtc/common_audio/vad/vad_filterbank.c
index 4094f91..8b9df93 100644
--- a/webrtc/common_audio/vad/vad_filterbank.c
+++ b/webrtc/common_audio/vad/vad_filterbank.c
@@ -38,9 +38,9 @@
 // - filter_state [i/o] : State of the filter.
 // - data_out     [o]   : Output audio data in the frequency interval
 //                        80 - 250 Hz.
-static void HighPassFilter(const int16_t* data_in, int data_length,
+static void HighPassFilter(const int16_t* data_in, size_t data_length,
                            int16_t* filter_state, int16_t* data_out) {
-  int i;
+  size_t i;
   const int16_t* in_ptr = data_in;
   int16_t* out_ptr = data_out;
   int32_t tmp32 = 0;
@@ -80,7 +80,7 @@
 // - filter_coefficient [i]   : Given in Q15.
 // - filter_state       [i/o] : State of the filter given in Q(-1).
 // - data_out           [o]   : Output audio signal given in Q(-1).
-static void AllPassFilter(const int16_t* data_in, int data_length,
+static void AllPassFilter(const int16_t* data_in, size_t data_length,
                           int16_t filter_coefficient, int16_t* filter_state,
                           int16_t* data_out) {
   // The filter can only cause overflow (in the w16 output variable)
@@ -89,7 +89,7 @@
   // First 6 taps of the impulse response:
   // 0.6399 0.5905 -0.3779 0.2418 -0.1547 0.0990
 
-  int i;
+  size_t i;
   int16_t tmp16 = 0;
   int32_t tmp32 = 0;
   int32_t state32 = ((int32_t) (*filter_state) << 16);  // Q15
@@ -117,11 +117,11 @@
 //                        The length is |data_length| / 2.
 // - lp_data_out  [o]   : Output audio data of the lower half of the spectrum.
 //                        The length is |data_length| / 2.
-static void SplitFilter(const int16_t* data_in, int data_length,
+static void SplitFilter(const int16_t* data_in, size_t data_length,
                         int16_t* upper_state, int16_t* lower_state,
                         int16_t* hp_data_out, int16_t* lp_data_out) {
-  int i;
-  int half_length = data_length >> 1;  // Downsampling by 2.
+  size_t i;
+  size_t half_length = data_length >> 1;  // Downsampling by 2.
   int16_t tmp_out;
 
   // All-pass filtering upper branch.
@@ -151,7 +151,7 @@
 //                        NOTE: |total_energy| is only updated if
 //                        |total_energy| <= |kMinEnergy|.
 // - log_energy   [o]   : 10 * log10("energy of |data_in|") given in Q4.
-static void LogOfEnergy(const int16_t* data_in, int data_length,
+static void LogOfEnergy(const int16_t* data_in, size_t data_length,
                         int16_t offset, int16_t* total_energy,
                         int16_t* log_energy) {
   // |tot_rshifts| accumulates the number of right shifts performed on |energy|.
@@ -243,7 +243,7 @@
 }
 
 int16_t WebRtcVad_CalculateFeatures(VadInstT* self, const int16_t* data_in,
-                                    int data_length, int16_t* features) {
+                                    size_t data_length, int16_t* features) {
   int16_t total_energy = 0;
   // We expect |data_length| to be 80, 160 or 240 samples, which corresponds to
   // 10, 20 or 30 ms in 8 kHz. Therefore, the intermediate downsampled data will
@@ -251,9 +251,9 @@
   // the second split.
   int16_t hp_120[120], lp_120[120];
   int16_t hp_60[60], lp_60[60];
-  const int half_data_length = data_length >> 1;
-  int length = half_data_length;  // |data_length| / 2, corresponds to
-                                  // bandwidth = 2000 Hz after downsampling.
+  const size_t half_data_length = data_length >> 1;
+  size_t length = half_data_length;  // |data_length| / 2, corresponds to
+                                     // bandwidth = 2000 Hz after downsampling.
 
   // Initialize variables for the first SplitFilter().
   int frequency_band = 0;
@@ -261,7 +261,6 @@
   int16_t* hp_out_ptr = hp_120;  // [2000 - 4000] Hz.
   int16_t* lp_out_ptr = lp_120;  // [0 - 2000] Hz.
 
-  assert(data_length >= 0);
   assert(data_length <= 240);
   assert(4 < kNumChannels - 1);  // Checking maximum |frequency_band|.
 
diff --git a/webrtc/common_audio/vad/vad_filterbank.h b/webrtc/common_audio/vad/vad_filterbank.h
index e9195e5..42bf3fc 100644
--- a/webrtc/common_audio/vad/vad_filterbank.h
+++ b/webrtc/common_audio/vad/vad_filterbank.h
@@ -39,6 +39,6 @@
 // - returns            : Total energy of the signal (NOTE! This value is not
 //                        exact. It is only used in a comparison.)
 int16_t WebRtcVad_CalculateFeatures(VadInstT* self, const int16_t* data_in,
-                                    int data_length, int16_t* features);
+                                    size_t data_length, int16_t* features);
 
 #endif  // WEBRTC_COMMON_AUDIO_VAD_VAD_FILTERBANK_H_
diff --git a/webrtc/common_audio/vad/vad_filterbank_unittest.cc b/webrtc/common_audio/vad/vad_filterbank_unittest.cc
index 4232cbc..11b503a 100644
--- a/webrtc/common_audio/vad/vad_filterbank_unittest.cc
+++ b/webrtc/common_audio/vad/vad_filterbank_unittest.cc
@@ -38,7 +38,7 @@
   // Construct a speech signal that will trigger the VAD in all modes. It is
   // known that (i * i) will wrap around, but that doesn't matter in this case.
   int16_t speech[kMaxFrameLength];
-  for (int16_t i = 0; i < kMaxFrameLength; ++i) {
+  for (size_t i = 0; i < kMaxFrameLength; ++i) {
     speech[i] = static_cast<int16_t>(i * i);
   }
 
@@ -73,7 +73,7 @@
 
   // Verify that all ones in gives kOffsetVector out. Any other constant input
   // will have a small impact in the sub bands.
-  for (int16_t i = 0; i < kMaxFrameLength; ++i) {
+  for (size_t i = 0; i < kMaxFrameLength; ++i) {
     speech[i] = 1;
   }
   for (size_t j = 0; j < kFrameLengthsSize; ++j) {
diff --git a/webrtc/common_audio/vad/vad_sp.c b/webrtc/common_audio/vad/vad_sp.c
index 217ef26..a54be17 100644
--- a/webrtc/common_audio/vad/vad_sp.c
+++ b/webrtc/common_audio/vad/vad_sp.c
@@ -27,12 +27,13 @@
 void WebRtcVad_Downsampling(const int16_t* signal_in,
                             int16_t* signal_out,
                             int32_t* filter_state,
-                            int in_length) {
+                            size_t in_length) {
   int16_t tmp16_1 = 0, tmp16_2 = 0;
   int32_t tmp32_1 = filter_state[0];
   int32_t tmp32_2 = filter_state[1];
-  int n = 0;
-  int half_length = (in_length >> 1);  // Downsampling by 2 gives half length.
+  size_t n = 0;
+  // Downsampling by 2 gives half length.
+  size_t half_length = (in_length >> 1);
 
   // Filter coefficients in Q13, filter state in Q0.
   for (n = 0; n < half_length; n++) {
diff --git a/webrtc/common_audio/vad/vad_sp.h b/webrtc/common_audio/vad/vad_sp.h
index b5e6259..4d2b02a 100644
--- a/webrtc/common_audio/vad/vad_sp.h
+++ b/webrtc/common_audio/vad/vad_sp.h
@@ -33,7 +33,7 @@
 void WebRtcVad_Downsampling(const int16_t* signal_in,
                             int16_t* signal_out,
                             int32_t* filter_state,
-                            int in_length);
+                            size_t in_length);
 
 // Updates and returns the smoothed feature minimum. As minimum we use the
 // median of the five smallest feature values in a 100 frames long window.
diff --git a/webrtc/common_audio/vad/vad_sp_unittest.cc b/webrtc/common_audio/vad/vad_sp_unittest.cc
index 9127bf3..6d5e2a6 100644
--- a/webrtc/common_audio/vad/vad_sp_unittest.cc
+++ b/webrtc/common_audio/vad/vad_sp_unittest.cc
@@ -23,7 +23,7 @@
 
 TEST_F(VadTest, vad_sp) {
   VadInstT* self = reinterpret_cast<VadInstT*>(malloc(sizeof(VadInstT)));
-  const int kMaxFrameLenSp = 960;  // Maximum frame length in this unittest.
+  const size_t kMaxFrameLenSp = 960;  // Maximum frame length in this unittest.
   int16_t zeros[kMaxFrameLenSp] = { 0 };
   int32_t state[2] = { 0 };
   int16_t data_in[kMaxFrameLenSp];
@@ -40,14 +40,14 @@
 
   // Construct a speech signal that will trigger the VAD in all modes. It is
   // known that (i * i) will wrap around, but that doesn't matter in this case.
-  for (int16_t i = 0; i < kMaxFrameLenSp; ++i) {
+  for (size_t i = 0; i < kMaxFrameLenSp; ++i) {
     data_in[i] = static_cast<int16_t>(i * i);
   }
   // Input values all zeros, expect all zeros out.
   WebRtcVad_Downsampling(zeros, data_out, state, kMaxFrameLenSp);
   EXPECT_EQ(0, state[0]);
   EXPECT_EQ(0, state[1]);
-  for (int16_t i = 0; i < kMaxFrameLenSp / 2; ++i) {
+  for (size_t i = 0; i < kMaxFrameLenSp / 2; ++i) {
     EXPECT_EQ(0, data_out[i]);
   }
   // Make a simple non-zero data test.
diff --git a/webrtc/common_audio/vad/vad_unittest.cc b/webrtc/common_audio/vad/vad_unittest.cc
index 48eddbf..ecc4734 100644
--- a/webrtc/common_audio/vad/vad_unittest.cc
+++ b/webrtc/common_audio/vad/vad_unittest.cc
@@ -27,7 +27,7 @@
 void VadTest::TearDown() {}
 
 // Returns true if the rate and frame length combination is valid.
-bool VadTest::ValidRatesAndFrameLengths(int rate, int frame_length) {
+bool VadTest::ValidRatesAndFrameLengths(int rate, size_t frame_length) {
   if (rate == 8000) {
     if (frame_length == 80 || frame_length == 160 || frame_length == 240) {
       return true;
@@ -65,7 +65,7 @@
   // Construct a speech signal that will trigger the VAD in all modes. It is
   // known that (i * i) will wrap around, but that doesn't matter in this case.
   int16_t speech[kMaxFrameLength];
-  for (int16_t i = 0; i < kMaxFrameLength; i++) {
+  for (size_t i = 0; i < kMaxFrameLength; i++) {
     speech[i] = static_cast<int16_t>(i * i);
   }
 
@@ -134,8 +134,8 @@
     -8000, -4000, 0, 4000, 8000, 8001, 15999, 16000, 32000, 48000, 48001, 96000
   };
 
-  const int kFrameLengths[] = {
-    -10, 0, 80, 81, 159, 160, 240, 320, 480, 640, 960, 1440, 2000
+  const size_t kFrameLengths[] = {
+    0, 80, 81, 159, 160, 240, 320, 480, 640, 960, 1440, 2000
   };
 
   for (size_t i = 0; i < arraysize(kRates); i++) {
diff --git a/webrtc/common_audio/vad/vad_unittest.h b/webrtc/common_audio/vad/vad_unittest.h
index 5fb726d..3efe61b 100644
--- a/webrtc/common_audio/vad/vad_unittest.h
+++ b/webrtc/common_audio/vad/vad_unittest.h
@@ -28,8 +28,8 @@
 const size_t kRatesSize = sizeof(kRates) / sizeof(*kRates);
 
 // Frame lengths we support.
-const int kMaxFrameLength = 1440;
-const int kFrameLengths[] = { 80, 120, 160, 240, 320, 480, 640, 960,
+const size_t kMaxFrameLength = 1440;
+const size_t kFrameLengths[] = { 80, 120, 160, 240, 320, 480, 640, 960,
     kMaxFrameLength };
 const size_t kFrameLengthsSize = sizeof(kFrameLengths) / sizeof(*kFrameLengths);
 
@@ -42,7 +42,7 @@
   virtual void TearDown();
 
   // Returns true if the rate and frame length combination is valid.
-  bool ValidRatesAndFrameLengths(int rate, int frame_length);
+  bool ValidRatesAndFrameLengths(int rate, size_t frame_length);
 };
 
 #endif  // WEBRTC_COMMON_AUDIO_VAD_VAD_UNITTEST_H
diff --git a/webrtc/common_audio/vad/webrtc_vad.c b/webrtc/common_audio/vad/webrtc_vad.c
index 5af1b5b..80c8f3c 100644
--- a/webrtc/common_audio/vad/webrtc_vad.c
+++ b/webrtc/common_audio/vad/webrtc_vad.c
@@ -56,7 +56,7 @@
 }
 
 int WebRtcVad_Process(VadInst* handle, int fs, const int16_t* audio_frame,
-                      int frame_length) {
+                      size_t frame_length) {
   int vad = -1;
   VadInstT* self = (VadInstT*) handle;
 
@@ -90,11 +90,11 @@
   return vad;
 }
 
-int WebRtcVad_ValidRateAndFrameLength(int rate, int frame_length) {
+int WebRtcVad_ValidRateAndFrameLength(int rate, size_t frame_length) {
   int return_value = -1;
   size_t i;
   int valid_length_ms;
-  int valid_length;
+  size_t valid_length;
 
   // We only allow 10, 20 or 30 ms frames. Loop through valid frame rates and
   // see if we have a matching pair.
@@ -102,7 +102,7 @@
     if (kValidRates[i] == rate) {
       for (valid_length_ms = 10; valid_length_ms <= kMaxFrameLengthMs;
           valid_length_ms += 10) {
-        valid_length = (kValidRates[i] / 1000 * valid_length_ms);
+        valid_length = (size_t)(kValidRates[i] / 1000 * valid_length_ms);
         if (frame_length == valid_length) {
           return_value = 0;
           break;
diff --git a/webrtc/common_audio/window_generator.cc b/webrtc/common_audio/window_generator.cc
index 1d61368..ae6cbc9 100644
--- a/webrtc/common_audio/window_generator.cc
+++ b/webrtc/common_audio/window_generator.cc
@@ -46,20 +46,20 @@
   }
 }
 
-void WindowGenerator::KaiserBesselDerived(float alpha, int length,
+void WindowGenerator::KaiserBesselDerived(float alpha, size_t length,
                                           float* window) {
-  CHECK_GT(length, 1);
+  CHECK_GT(length, 1U);
   CHECK(window != nullptr);
 
-  const int half = (length + 1) / 2;
+  const size_t half = (length + 1) / 2;
   float sum = 0.0f;
 
-  for (int i = 0; i <= half; ++i) {
+  for (size_t i = 0; i <= half; ++i) {
     complex<float> r = (4.0f * i) / length - 1.0f;
     sum += I0(static_cast<float>(M_PI) * alpha * sqrt(1.0f - r * r)).real();
     window[i] = sum;
   }
-  for (int i = length - 1; i >= half; --i) {
+  for (size_t i = length - 1; i >= half; --i) {
     window[length - i - 1] = sqrtf(window[length - i - 1] / sum);
     window[i] = window[length - i - 1];
   }
diff --git a/webrtc/common_audio/window_generator.h b/webrtc/common_audio/window_generator.h
index ee0acad..f28542c 100644
--- a/webrtc/common_audio/window_generator.h
+++ b/webrtc/common_audio/window_generator.h
@@ -11,6 +11,8 @@
 #ifndef WEBRTC_COMMON_AUDIO_WINDOW_GENERATOR_H_
 #define WEBRTC_COMMON_AUDIO_WINDOW_GENERATOR_H_
 
+#include <stddef.h>
+
 #include "webrtc/base/constructormagic.h"
 
 namespace webrtc {
@@ -19,7 +21,7 @@
 class WindowGenerator {
  public:
   static void Hanning(int length, float* window);
-  static void KaiserBesselDerived(float alpha, int length, float* window);
+  static void KaiserBesselDerived(float alpha, size_t length, float* window);
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(WindowGenerator);
diff --git a/webrtc/common_types.h b/webrtc/common_types.h
index 39ece41..693d36d 100644
--- a/webrtc/common_types.h
+++ b/webrtc/common_types.h
@@ -381,7 +381,7 @@
     // max packet waiting time in the jitter buffer (ms)
     int maxWaitingTimeMs;
     // added samples in off mode due to packet loss
-    int addedSamples;
+    size_t addedSamples;
 };
 
 // Statistics for calls to AudioCodingModule::PlayoutData10Ms().
diff --git a/webrtc/modules/audio_coding/codecs/audio_decoder.cc b/webrtc/modules/audio_coding/codecs/audio_decoder.cc
index 0a4a6a9..08d101c 100644
--- a/webrtc/modules/audio_coding/codecs/audio_decoder.cc
+++ b/webrtc/modules/audio_coding/codecs/audio_decoder.cc
@@ -56,7 +56,9 @@
 
 bool AudioDecoder::HasDecodePlc() const { return false; }
 
-int AudioDecoder::DecodePlc(int num_frames, int16_t* decoded) { return 0; }
+size_t AudioDecoder::DecodePlc(size_t num_frames, int16_t* decoded) {
+  return 0;
+}
 
 int AudioDecoder::IncomingPacket(const uint8_t* payload,
                                  size_t payload_len,
diff --git a/webrtc/modules/audio_coding/codecs/audio_decoder.h b/webrtc/modules/audio_coding/codecs/audio_decoder.h
index 8947e81..480b1aa 100644
--- a/webrtc/modules/audio_coding/codecs/audio_decoder.h
+++ b/webrtc/modules/audio_coding/codecs/audio_decoder.h
@@ -62,7 +62,7 @@
 
   // Calls the packet-loss concealment of the decoder to update the state after
   // one or several lost packets.
-  virtual int DecodePlc(int num_frames, int16_t* decoded);
+  virtual size_t DecodePlc(size_t num_frames, int16_t* decoded);
 
   // Initializes the decoder.
   virtual int Init() = 0;
diff --git a/webrtc/modules/audio_coding/codecs/audio_encoder.h b/webrtc/modules/audio_coding/codecs/audio_encoder.h
index fe6fd87..0a40316 100644
--- a/webrtc/modules/audio_coding/codecs/audio_encoder.h
+++ b/webrtc/modules/audio_coding/codecs/audio_encoder.h
@@ -90,11 +90,11 @@
   // the encoder may vary the number of 10 ms frames from packet to packet, but
   // it must decide the length of the next packet no later than when outputting
   // the preceding packet.
-  virtual int Num10MsFramesInNextPacket() const = 0;
+  virtual size_t Num10MsFramesInNextPacket() const = 0;
 
   // Returns the maximum value that can be returned by
   // Num10MsFramesInNextPacket().
-  virtual int Max10MsFramesInAPacket() const = 0;
+  virtual size_t Max10MsFramesInAPacket() const = 0;
 
   // Returns the current target bitrate in bits/s. The value -1 means that the
   // codec adapts the target automatically, and a current target cannot be
diff --git a/webrtc/modules/audio_coding/codecs/audio_encoder_mutable_impl.h b/webrtc/modules/audio_coding/codecs/audio_encoder_mutable_impl.h
index 553d8ad..c1184e1 100644
--- a/webrtc/modules/audio_coding/codecs/audio_encoder_mutable_impl.h
+++ b/webrtc/modules/audio_coding/codecs/audio_encoder_mutable_impl.h
@@ -74,11 +74,11 @@
     CriticalSectionScoped cs(encoder_lock_.get());
     return encoder_->RtpTimestampRateHz();
   }
-  int Num10MsFramesInNextPacket() const override {
+  size_t Num10MsFramesInNextPacket() const override {
     CriticalSectionScoped cs(encoder_lock_.get());
     return encoder_->Num10MsFramesInNextPacket();
   }
-  int Max10MsFramesInAPacket() const override {
+  size_t Max10MsFramesInAPacket() const override {
     CriticalSectionScoped cs(encoder_lock_.get());
     return encoder_->Max10MsFramesInAPacket();
   }
diff --git a/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc b/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc
index d2acaa1..279616e 100644
--- a/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc
+++ b/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc
@@ -38,7 +38,8 @@
     return false;
   if (num_channels != speech_encoder->NumChannels())
     return false;
-  if (sid_frame_interval_ms < speech_encoder->Max10MsFramesInAPacket() * 10)
+  if (sid_frame_interval_ms <
+      static_cast<int>(speech_encoder->Max10MsFramesInAPacket() * 10))
     return false;
   if (num_cng_coefficients > WEBRTC_CNG_MAX_LPC_ORDER ||
       num_cng_coefficients <= 0)
@@ -89,11 +90,11 @@
   return std::max(max_encoded_bytes_active, max_encoded_bytes_passive);
 }
 
-int AudioEncoderCng::Num10MsFramesInNextPacket() const {
+size_t AudioEncoderCng::Num10MsFramesInNextPacket() const {
   return speech_encoder_->Num10MsFramesInNextPacket();
 }
 
-int AudioEncoderCng::Max10MsFramesInAPacket() const {
+size_t AudioEncoderCng::Max10MsFramesInAPacket() const {
   return speech_encoder_->Max10MsFramesInAPacket();
 }
 
@@ -124,11 +125,11 @@
   for (size_t i = 0; i < samples_per_10ms_frame; ++i) {
     speech_buffer_.push_back(audio[i]);
   }
-  const int frames_to_encode = speech_encoder_->Num10MsFramesInNextPacket();
-  if (rtp_timestamps_.size() < static_cast<size_t>(frames_to_encode)) {
+  const size_t frames_to_encode = speech_encoder_->Num10MsFramesInNextPacket();
+  if (rtp_timestamps_.size() < frames_to_encode) {
     return EncodedInfo();
   }
-  CHECK_LE(frames_to_encode * 10, kMaxFrameSizeMs)
+  CHECK_LE(static_cast<int>(frames_to_encode * 10), kMaxFrameSizeMs)
       << "Frame size cannot be larger than " << kMaxFrameSizeMs
       << " ms when using VAD/CNG.";
 
@@ -136,12 +137,12 @@
   // following split sizes:
   // 10 ms = 10 + 0 ms; 20 ms = 20 + 0 ms; 30 ms = 30 + 0 ms;
   // 40 ms = 20 + 20 ms; 50 ms = 30 + 20 ms; 60 ms = 30 + 30 ms.
-  int blocks_in_first_vad_call =
+  size_t blocks_in_first_vad_call =
       (frames_to_encode > 3 ? 3 : frames_to_encode);
   if (frames_to_encode == 4)
     blocks_in_first_vad_call = 2;
   CHECK_GE(frames_to_encode, blocks_in_first_vad_call);
-  const int blocks_in_second_vad_call =
+  const size_t blocks_in_second_vad_call =
       frames_to_encode - blocks_in_first_vad_call;
 
   // Check if all of the buffer is passive speech. Start with checking the first
@@ -183,7 +184,7 @@
 }
 
 AudioEncoder::EncodedInfo AudioEncoderCng::EncodePassive(
-    int frames_to_encode,
+    size_t frames_to_encode,
     size_t max_encoded_bytes,
     uint8_t* encoded) {
   bool force_sid = last_frame_active_;
@@ -191,15 +192,19 @@
   const size_t samples_per_10ms_frame = SamplesPer10msFrame();
   CHECK_GE(max_encoded_bytes, frames_to_encode * samples_per_10ms_frame);
   AudioEncoder::EncodedInfo info;
-  for (int i = 0; i < frames_to_encode; ++i) {
-    int16_t encoded_bytes_tmp = 0;
+  for (size_t i = 0; i < frames_to_encode; ++i) {
+    // It's important not to pass &info.encoded_bytes directly to
+    // WebRtcCng_Encode(), since later loop iterations may return zero in that
+    // value, in which case we don't want to overwrite any value from an earlier
+    // iteration.
+    size_t encoded_bytes_tmp = 0;
     CHECK_GE(WebRtcCng_Encode(cng_inst_.get(),
                               &speech_buffer_[i * samples_per_10ms_frame],
-                              static_cast<int16_t>(samples_per_10ms_frame),
+                              samples_per_10ms_frame,
                               encoded, &encoded_bytes_tmp, force_sid), 0);
     if (encoded_bytes_tmp > 0) {
       CHECK(!output_produced);
-      info.encoded_bytes = static_cast<size_t>(encoded_bytes_tmp);
+      info.encoded_bytes = encoded_bytes_tmp;
       output_produced = true;
       force_sid = false;
     }
@@ -212,12 +217,12 @@
 }
 
 AudioEncoder::EncodedInfo AudioEncoderCng::EncodeActive(
-    int frames_to_encode,
+    size_t frames_to_encode,
     size_t max_encoded_bytes,
     uint8_t* encoded) {
   const size_t samples_per_10ms_frame = SamplesPer10msFrame();
   AudioEncoder::EncodedInfo info;
-  for (int i = 0; i < frames_to_encode; ++i) {
+  for (size_t i = 0; i < frames_to_encode; ++i) {
     info = speech_encoder_->Encode(
         rtp_timestamps_.front(), &speech_buffer_[i * samples_per_10ms_frame],
         samples_per_10ms_frame, max_encoded_bytes, encoded);
diff --git a/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc b/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc
index d16dd3b..a2ab6e8 100644
--- a/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc
+++ b/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc
@@ -59,14 +59,14 @@
   void CreateCng() {
     // The config_ parameters may be changed by the TEST_Fs up until CreateCng()
     // is called, thus we cannot use the values until now.
-    num_audio_samples_10ms_ = 10 * sample_rate_hz_ / 1000;
+    num_audio_samples_10ms_ = static_cast<size_t>(10 * sample_rate_hz_ / 1000);
     ASSERT_LE(num_audio_samples_10ms_, kMaxNumSamples);
     EXPECT_CALL(mock_encoder_, SampleRateHz())
         .WillRepeatedly(Return(sample_rate_hz_));
     // Max10MsFramesInAPacket() is just used to verify that the SID frame period
     // is not too small. The return value does not matter that much, as long as
     // it is smaller than 10.
-    EXPECT_CALL(mock_encoder_, Max10MsFramesInAPacket()).WillOnce(Return(1));
+    EXPECT_CALL(mock_encoder_, Max10MsFramesInAPacket()).WillOnce(Return(1u));
     EXPECT_CALL(mock_encoder_, MaxEncodedBytes())
         .WillRepeatedly(Return(kMockMaxEncodedBytes));
     cng_.reset(new AudioEncoderCng(config_));
@@ -83,10 +83,10 @@
   // Expect |num_calls| calls to the encoder, all successful. The last call
   // claims to have encoded |kMockMaxEncodedBytes| bytes, and all the preceding
   // ones 0 bytes.
-  void ExpectEncodeCalls(int num_calls) {
+  void ExpectEncodeCalls(size_t num_calls) {
     InSequence s;
     AudioEncoder::EncodedInfo info;
-    for (int j = 0; j < num_calls - 1; ++j) {
+    for (size_t j = 0; j < num_calls - 1; ++j) {
       EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
           .WillOnce(Return(info));
     }
@@ -98,7 +98,7 @@
   // Verifies that the cng_ object waits until it has collected
   // |blocks_per_frame| blocks of audio, and then dispatches all of them to
   // the underlying codec (speech or cng).
-  void CheckBlockGrouping(int blocks_per_frame, bool active_speech) {
+  void CheckBlockGrouping(size_t blocks_per_frame, bool active_speech) {
     EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket())
         .WillRepeatedly(Return(blocks_per_frame));
     CreateCng();
@@ -107,7 +107,7 @@
 
     // Don't expect any calls to the encoder yet.
     EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).Times(0);
-    for (int i = 0; i < blocks_per_frame - 1; ++i) {
+    for (size_t i = 0; i < blocks_per_frame - 1; ++i) {
       Encode();
       EXPECT_EQ(0u, encoded_info_.encoded_bytes);
     }
@@ -127,14 +127,15 @@
   void CheckVadInputSize(int input_frame_size_ms,
                          int expected_first_block_size_ms,
                          int expected_second_block_size_ms) {
-    const int blocks_per_frame = input_frame_size_ms / 10;
+    const size_t blocks_per_frame =
+        static_cast<size_t>(input_frame_size_ms / 10);
 
     EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket())
         .WillRepeatedly(Return(blocks_per_frame));
 
     // Expect nothing to happen before the last block is sent to cng_.
     EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _)).Times(0);
-    for (int i = 0; i < blocks_per_frame - 1; ++i) {
+    for (size_t i = 0; i < blocks_per_frame - 1; ++i) {
       Encode();
     }
 
@@ -163,7 +164,7 @@
                                Vad::Activity second_type) {
     // Set the speech encoder frame size to 60 ms, to ensure that the VAD will
     // be called twice.
-    const int blocks_per_frame = 6;
+    const size_t blocks_per_frame = 6;
     EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket())
         .WillRepeatedly(Return(blocks_per_frame));
     InSequence s;
@@ -175,7 +176,7 @@
           .WillOnce(Return(second_type));
     }
     encoded_info_.payload_type = 0;
-    for (int i = 0; i < blocks_per_frame; ++i) {
+    for (size_t i = 0; i < blocks_per_frame; ++i) {
       Encode();
     }
     return encoded_info_.payload_type != kCngPayloadType;
@@ -199,8 +200,8 @@
 
 TEST_F(AudioEncoderCngTest, CheckFrameSizePropagation) {
   CreateCng();
-  EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(17));
-  EXPECT_EQ(17, cng_->Num10MsFramesInNextPacket());
+  EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(17U));
+  EXPECT_EQ(17U, cng_->Num10MsFramesInNextPacket());
 }
 
 TEST_F(AudioEncoderCngTest, CheckChangeBitratePropagation) {
@@ -217,7 +218,7 @@
 
 TEST_F(AudioEncoderCngTest, EncodeCallsVad) {
   EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket())
-      .WillRepeatedly(Return(1));
+      .WillRepeatedly(Return(1U));
   CreateCng();
   EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
       .WillOnce(Return(Vad::kPassive));
@@ -249,7 +250,7 @@
 }
 
 TEST_F(AudioEncoderCngTest, EncodePassive) {
-  const int kBlocksPerFrame = 3;
+  const size_t kBlocksPerFrame = 3;
   EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket())
       .WillRepeatedly(Return(kBlocksPerFrame));
   CreateCng();
@@ -258,7 +259,7 @@
   // Expect no calls at all to the speech encoder mock.
   EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).Times(0);
   uint32_t expected_timestamp = timestamp_;
-  for (int i = 0; i < 100; ++i) {
+  for (size_t i = 0; i < 100; ++i) {
     Encode();
     // Check if it was time to call the cng encoder. This is done once every
     // |kBlocksPerFrame| calls.
@@ -339,7 +340,7 @@
 TEST_F(AudioEncoderCngTest, VerifyCngPayloadType) {
   CreateCng();
   EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).Times(0);
-  EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(1));
+  EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(1U));
   EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
       .WillOnce(Return(Vad::kPassive));
   encoded_info_.payload_type = 0;
@@ -352,7 +353,7 @@
 TEST_F(AudioEncoderCngTest, VerifySidFrameAfterSpeech) {
   CreateCng();
   EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket())
-      .WillRepeatedly(Return(1));
+      .WillRepeatedly(Return(1U));
   // Start with encoding noise.
   EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
       .Times(2)
@@ -443,7 +444,7 @@
 TEST_F(AudioEncoderCngDeathTest, EncoderFrameSizeTooLarge) {
   CreateCng();
   EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket())
-      .WillRepeatedly(Return(7));
+      .WillRepeatedly(Return(7U));
   for (int i = 0; i < 6; ++i)
     Encode();
   EXPECT_DEATH(Encode(),
diff --git a/webrtc/modules/audio_coding/codecs/cng/cng_unittest.cc b/webrtc/modules/audio_coding/codecs/cng/cng_unittest.cc
index 0d1c670..2409540 100644
--- a/webrtc/modules/audio_coding/codecs/cng/cng_unittest.cc
+++ b/webrtc/modules/audio_coding/codecs/cng/cng_unittest.cc
@@ -99,7 +99,7 @@
 
 TEST_F(CngTest, CngEncode) {
   uint8_t sid_data[WEBRTC_CNG_MAX_LPC_ORDER + 1];
-  int16_t number_bytes;
+  size_t number_bytes;
 
   // Create encoder memory.
   EXPECT_EQ(0, WebRtcCng_CreateEnc(&cng_enc_inst_));
@@ -151,7 +151,7 @@
 // Encode Cng with too long input vector.
 TEST_F(CngTest, CngEncodeTooLong) {
   uint8_t sid_data[WEBRTC_CNG_MAX_LPC_ORDER + 1];
-  int16_t number_bytes;
+  size_t number_bytes;
 
   // Create and init encoder memory.
   EXPECT_EQ(0, WebRtcCng_CreateEnc(&cng_enc_inst_));
@@ -170,7 +170,7 @@
 // Call encode without calling init.
 TEST_F(CngTest, CngEncodeNoInit) {
   uint8_t sid_data[WEBRTC_CNG_MAX_LPC_ORDER + 1];
-  int16_t number_bytes;
+  size_t number_bytes;
 
   // Create encoder memory.
   EXPECT_EQ(0, WebRtcCng_CreateEnc(&cng_enc_inst_));
@@ -187,7 +187,7 @@
 // Update SID parameters, for both 9 and 16 parameters.
 TEST_F(CngTest, CngUpdateSid) {
   uint8_t sid_data[WEBRTC_CNG_MAX_LPC_ORDER + 1];
-  int16_t number_bytes;
+  size_t number_bytes;
 
   // Create and initialize encoder and decoder memory.
   EXPECT_EQ(0, WebRtcCng_CreateEnc(&cng_enc_inst_));
@@ -224,7 +224,7 @@
 // Update SID parameters, with wrong parameters or without calling decode.
 TEST_F(CngTest, CngUpdateSidErroneous) {
   uint8_t sid_data[WEBRTC_CNG_MAX_LPC_ORDER + 1];
-  int16_t number_bytes;
+  size_t number_bytes;
 
   // Create encoder and decoder memory.
   EXPECT_EQ(0, WebRtcCng_CreateEnc(&cng_enc_inst_));
@@ -261,7 +261,7 @@
 TEST_F(CngTest, CngGenerate) {
   uint8_t sid_data[WEBRTC_CNG_MAX_LPC_ORDER + 1];
   int16_t out_data[640];
-  int16_t number_bytes;
+  size_t number_bytes;
 
   // Create and initialize encoder and decoder memory.
   EXPECT_EQ(0, WebRtcCng_CreateEnc(&cng_enc_inst_));
@@ -294,7 +294,7 @@
 // Test automatic SID.
 TEST_F(CngTest, CngAutoSid) {
   uint8_t sid_data[WEBRTC_CNG_MAX_LPC_ORDER + 1];
-  int16_t number_bytes;
+  size_t number_bytes;
 
   // Create and initialize encoder and decoder memory.
   EXPECT_EQ(0, WebRtcCng_CreateEnc(&cng_enc_inst_));
@@ -321,7 +321,7 @@
 // Test automatic SID, with very short interval.
 TEST_F(CngTest, CngAutoSidShort) {
   uint8_t sid_data[WEBRTC_CNG_MAX_LPC_ORDER + 1];
-  int16_t number_bytes;
+  size_t number_bytes;
 
   // Create and initialize encoder and decoder memory.
   EXPECT_EQ(0, WebRtcCng_CreateEnc(&cng_enc_inst_));
diff --git a/webrtc/modules/audio_coding/codecs/cng/include/audio_encoder_cng.h b/webrtc/modules/audio_coding/codecs/cng/include/audio_encoder_cng.h
index 51d2feb..6a11366 100644
--- a/webrtc/modules/audio_coding/codecs/cng/include/audio_encoder_cng.h
+++ b/webrtc/modules/audio_coding/codecs/cng/include/audio_encoder_cng.h
@@ -50,8 +50,8 @@
   int NumChannels() const override;
   size_t MaxEncodedBytes() const override;
   int RtpTimestampRateHz() const override;
-  int Num10MsFramesInNextPacket() const override;
-  int Max10MsFramesInAPacket() const override;
+  size_t Num10MsFramesInNextPacket() const override;
+  size_t Max10MsFramesInAPacket() const override;
   int GetTargetBitrate() const override;
   void SetTargetBitrate(int bits_per_second) override;
   void SetProjectedPacketLossRate(double fraction) override;
@@ -67,10 +67,10 @@
     inline void operator()(CNG_enc_inst* ptr) const { WebRtcCng_FreeEnc(ptr); }
   };
 
-  EncodedInfo EncodePassive(int frames_to_encode,
+  EncodedInfo EncodePassive(size_t frames_to_encode,
                             size_t max_encoded_bytes,
                             uint8_t* encoded);
-  EncodedInfo EncodeActive(int frames_to_encode,
+  EncodedInfo EncodeActive(size_t frames_to_encode,
                            size_t max_encoded_bytes,
                            uint8_t* encoded);
   size_t SamplesPer10msFrame() const;
diff --git a/webrtc/modules/audio_coding/codecs/cng/include/webrtc_cng.h b/webrtc/modules/audio_coding/codecs/cng/include/webrtc_cng.h
index 1ec5d67..6c7e50b 100644
--- a/webrtc/modules/audio_coding/codecs/cng/include/webrtc_cng.h
+++ b/webrtc/modules/audio_coding/codecs/cng/include/webrtc_cng.h
@@ -104,8 +104,8 @@
  *                      -1 - Error
  */
 int WebRtcCng_Encode(CNG_enc_inst* cng_inst, int16_t* speech,
-                     int16_t nrOfSamples, uint8_t* SIDdata,
-                     int16_t* bytesOut, int16_t forceSID);
+                     size_t nrOfSamples, uint8_t* SIDdata,
+                     size_t* bytesOut, int16_t forceSID);
 
 /****************************************************************************
  * WebRtcCng_UpdateSid(...)
@@ -138,7 +138,7 @@
  *                      -1 - Error
  */
 int16_t WebRtcCng_Generate(CNG_dec_inst* cng_inst, int16_t* outData,
-                           int16_t nrOfSamples, int16_t new_period);
+                           size_t nrOfSamples, int16_t new_period);
 
 /*****************************************************************************
  * WebRtcCng_GetErrorCodeEnc/Dec(...)
diff --git a/webrtc/modules/audio_coding/codecs/cng/webrtc_cng.c b/webrtc/modules/audio_coding/codecs/cng/webrtc_cng.c
index 1f6974a..a0c166a 100644
--- a/webrtc/modules/audio_coding/codecs/cng/webrtc_cng.c
+++ b/webrtc/modules/audio_coding/codecs/cng/webrtc_cng.c
@@ -35,7 +35,7 @@
 } WebRtcCngDecoder;
 
 typedef struct WebRtcCngEncoder_ {
-  int16_t enc_nrOfCoefs;
+  size_t enc_nrOfCoefs;
   int enc_sampfreq;
   int16_t enc_interval;
   int16_t enc_msSinceSID;
@@ -228,8 +228,8 @@
  *                      -1 - Error
  */
 int WebRtcCng_Encode(CNG_enc_inst* cng_inst, int16_t* speech,
-                     int16_t nrOfSamples, uint8_t* SIDdata,
-                     int16_t* bytesOut, int16_t forceSID) {
+                     size_t nrOfSamples, uint8_t* SIDdata,
+                     size_t* bytesOut, int16_t forceSID) {
   WebRtcCngEncoder* inst = (WebRtcCngEncoder*) cng_inst;
 
   int16_t arCoefs[WEBRTC_CNG_MAX_LPC_ORDER + 1];
@@ -240,10 +240,11 @@
   int16_t ReflBetaComp = 13107;  /* 0.4 in q15. */
   int32_t outEnergy;
   int outShifts;
-  int i, stab;
+  size_t i;
+  int stab;
   int acorrScale;
-  int index;
-  int16_t ind, factor;
+  size_t index;
+  size_t ind, factor;
   int32_t* bptr;
   int32_t blo, bhi;
   int16_t negate;
@@ -281,7 +282,7 @@
       outShifts--;
     }
   }
-  outEnergy = WebRtcSpl_DivW32W16(outEnergy, factor);
+  outEnergy = WebRtcSpl_DivW32W16(outEnergy, (int16_t)factor);
 
   if (outEnergy > 1) {
     /* Create Hanning Window. */
@@ -390,7 +391,7 @@
 
     inst->enc_msSinceSID +=
         (int16_t)((1000 * nrOfSamples) / inst->enc_sampfreq);
-    return inst->enc_nrOfCoefs + 1;
+    return (int)(inst->enc_nrOfCoefs + 1);
   } else {
     inst->enc_msSinceSID +=
         (int16_t)((1000 * nrOfSamples) / inst->enc_sampfreq);
@@ -475,10 +476,10 @@
  *                       -1 - Error
  */
 int16_t WebRtcCng_Generate(CNG_dec_inst* cng_inst, int16_t* outData,
-                           int16_t nrOfSamples, int16_t new_period) {
+                           size_t nrOfSamples, int16_t new_period) {
   WebRtcCngDecoder* inst = (WebRtcCngDecoder*) cng_inst;
 
-  int i;
+  size_t i;
   int16_t excitation[WEBRTC_CNG_MAX_OUTSIZE_ORDER];
   int16_t low[WEBRTC_CNG_MAX_OUTSIZE_ORDER];
   int16_t lpPoly[WEBRTC_CNG_MAX_LPC_ORDER + 1];
diff --git a/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc b/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc
index 905a715..ba5959d 100644
--- a/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc
+++ b/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc
@@ -37,7 +37,8 @@
     : sample_rate_hz_(sample_rate_hz),
       num_channels_(config.num_channels),
       payload_type_(config.payload_type),
-      num_10ms_frames_per_packet_(config.frame_size_ms / 10),
+      num_10ms_frames_per_packet_(
+          static_cast<size_t>(config.frame_size_ms / 10)),
       full_frame_samples_(NumSamplesPerFrame(config.num_channels,
                                              config.frame_size_ms,
                                              sample_rate_hz_)),
@@ -63,11 +64,11 @@
   return full_frame_samples_ * BytesPerSample();
 }
 
-int AudioEncoderPcm::Num10MsFramesInNextPacket() const {
+size_t AudioEncoderPcm::Num10MsFramesInNextPacket() const {
   return num_10ms_frames_per_packet_;
 }
 
-int AudioEncoderPcm::Max10MsFramesInAPacket() const {
+size_t AudioEncoderPcm::Max10MsFramesInAPacket() const {
   return num_10ms_frames_per_packet_;
 }
 
@@ -95,27 +96,26 @@
   EncodedInfo info;
   info.encoded_timestamp = first_timestamp_in_buffer_;
   info.payload_type = payload_type_;
-  int16_t ret = EncodeCall(&speech_buffer_[0], full_frame_samples_, encoded);
-  CHECK_GE(ret, 0);
-  info.encoded_bytes = static_cast<size_t>(ret);
+  info.encoded_bytes =
+      EncodeCall(&speech_buffer_[0], full_frame_samples_, encoded);
   speech_buffer_.clear();
   return info;
 }
 
-int16_t AudioEncoderPcmA::EncodeCall(const int16_t* audio,
-                                     size_t input_len,
-                                     uint8_t* encoded) {
-  return WebRtcG711_EncodeA(audio, static_cast<int16_t>(input_len), encoded);
+size_t AudioEncoderPcmA::EncodeCall(const int16_t* audio,
+                                    size_t input_len,
+                                    uint8_t* encoded) {
+  return WebRtcG711_EncodeA(audio, input_len, encoded);
 }
 
 int AudioEncoderPcmA::BytesPerSample() const {
   return 1;
 }
 
-int16_t AudioEncoderPcmU::EncodeCall(const int16_t* audio,
-                                     size_t input_len,
-                                     uint8_t* encoded) {
-  return WebRtcG711_EncodeU(audio, static_cast<int16_t>(input_len), encoded);
+size_t AudioEncoderPcmU::EncodeCall(const int16_t* audio,
+                                    size_t input_len,
+                                    uint8_t* encoded) {
+  return WebRtcG711_EncodeU(audio, input_len, encoded);
 }
 
 int AudioEncoderPcmU::BytesPerSample() const {
diff --git a/webrtc/modules/audio_coding/codecs/g711/g711_interface.c b/webrtc/modules/audio_coding/codecs/g711/g711_interface.c
index b579520..5b96a9c 100644
--- a/webrtc/modules/audio_coding/codecs/g711/g711_interface.c
+++ b/webrtc/modules/audio_coding/codecs/g711/g711_interface.c
@@ -12,40 +12,40 @@
 #include "g711_interface.h"
 #include "webrtc/typedefs.h"
 
-int16_t WebRtcG711_EncodeA(const int16_t* speechIn,
-                           int16_t len,
-                           uint8_t* encoded) {
-  int n;
+size_t WebRtcG711_EncodeA(const int16_t* speechIn,
+                          size_t len,
+                          uint8_t* encoded) {
+  size_t n;
   for (n = 0; n < len; n++)
     encoded[n] = linear_to_alaw(speechIn[n]);
   return len;
 }
 
-int16_t WebRtcG711_EncodeU(const int16_t* speechIn,
-                           int16_t len,
-                           uint8_t* encoded) {
-  int n;
+size_t WebRtcG711_EncodeU(const int16_t* speechIn,
+                          size_t len,
+                          uint8_t* encoded) {
+  size_t n;
   for (n = 0; n < len; n++)
     encoded[n] = linear_to_ulaw(speechIn[n]);
   return len;
 }
 
-int16_t WebRtcG711_DecodeA(const uint8_t* encoded,
-                           int16_t len,
-                           int16_t* decoded,
-                           int16_t* speechType) {
-  int n;
+size_t WebRtcG711_DecodeA(const uint8_t* encoded,
+                          size_t len,
+                          int16_t* decoded,
+                          int16_t* speechType) {
+  size_t n;
   for (n = 0; n < len; n++)
     decoded[n] = alaw_to_linear(encoded[n]);
   *speechType = 1;
   return len;
 }
 
-int16_t WebRtcG711_DecodeU(const uint8_t* encoded,
-                           int16_t len,
-                           int16_t* decoded,
-                           int16_t* speechType) {
-  int n;
+size_t WebRtcG711_DecodeU(const uint8_t* encoded,
+                          size_t len,
+                          int16_t* decoded,
+                          int16_t* speechType) {
+  size_t n;
   for (n = 0; n < len; n++)
     decoded[n] = ulaw_to_linear(encoded[n]);
   *speechType = 1;
diff --git a/webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h b/webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h
index c869037..7d45f3f 100644
--- a/webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h
+++ b/webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h
@@ -39,8 +39,8 @@
   int SampleRateHz() const override;
   int NumChannels() const override;
   size_t MaxEncodedBytes() const override;
-  int Num10MsFramesInNextPacket() const override;
-  int Max10MsFramesInAPacket() const override;
+  size_t Num10MsFramesInNextPacket() const override;
+  size_t Max10MsFramesInAPacket() const override;
   int GetTargetBitrate() const override;
   EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
                              const int16_t* audio,
@@ -50,9 +50,9 @@
  protected:
   AudioEncoderPcm(const Config& config, int sample_rate_hz);
 
-  virtual int16_t EncodeCall(const int16_t* audio,
-                             size_t input_len,
-                             uint8_t* encoded) = 0;
+  virtual size_t EncodeCall(const int16_t* audio,
+                            size_t input_len,
+                            uint8_t* encoded) = 0;
 
   virtual int BytesPerSample() const = 0;
 
@@ -60,7 +60,7 @@
   const int sample_rate_hz_;
   const int num_channels_;
   const int payload_type_;
-  const int num_10ms_frames_per_packet_;
+  const size_t num_10ms_frames_per_packet_;
   const size_t full_frame_samples_;
   std::vector<int16_t> speech_buffer_;
   uint32_t first_timestamp_in_buffer_;
@@ -76,9 +76,9 @@
       : AudioEncoderPcm(config, kSampleRateHz) {}
 
  protected:
-  int16_t EncodeCall(const int16_t* audio,
-                     size_t input_len,
-                     uint8_t* encoded) override;
+  size_t EncodeCall(const int16_t* audio,
+                    size_t input_len,
+                    uint8_t* encoded) override;
 
   int BytesPerSample() const override;
 
@@ -96,9 +96,9 @@
       : AudioEncoderPcm(config, kSampleRateHz) {}
 
  protected:
-  int16_t EncodeCall(const int16_t* audio,
-                     size_t input_len,
-                     uint8_t* encoded) override;
+  size_t EncodeCall(const int16_t* audio,
+                    size_t input_len,
+                    uint8_t* encoded) override;
 
   int BytesPerSample() const override;
 
diff --git a/webrtc/modules/audio_coding/codecs/g711/include/g711_interface.h b/webrtc/modules/audio_coding/codecs/g711/include/g711_interface.h
index 5c71e98..9d67222 100644
--- a/webrtc/modules/audio_coding/codecs/g711/include/g711_interface.h
+++ b/webrtc/modules/audio_coding/codecs/g711/include/g711_interface.h
@@ -38,9 +38,9 @@
  *                             Always equal to len input parameter.
  */
 
-int16_t WebRtcG711_EncodeA(const int16_t* speechIn,
-                           int16_t len,
-                           uint8_t* encoded);
+size_t WebRtcG711_EncodeA(const int16_t* speechIn,
+                          size_t len,
+                          uint8_t* encoded);
 
 /****************************************************************************
  * WebRtcG711_EncodeU(...)
@@ -59,9 +59,9 @@
  *                             Always equal to len input parameter.
  */
 
-int16_t WebRtcG711_EncodeU(const int16_t* speechIn,
-                           int16_t len,
-                           uint8_t* encoded);
+size_t WebRtcG711_EncodeU(const int16_t* speechIn,
+                          size_t len,
+                          uint8_t* encoded);
 
 /****************************************************************************
  * WebRtcG711_DecodeA(...)
@@ -82,10 +82,10 @@
  *                             -1 - Error
  */
 
-int16_t WebRtcG711_DecodeA(const uint8_t* encoded,
-                           int16_t len,
-                           int16_t* decoded,
-                           int16_t* speechType);
+size_t WebRtcG711_DecodeA(const uint8_t* encoded,
+                          size_t len,
+                          int16_t* decoded,
+                          int16_t* speechType);
 
 /****************************************************************************
  * WebRtcG711_DecodeU(...)
@@ -106,10 +106,10 @@
  *                             -1 - Error
  */
 
-int16_t WebRtcG711_DecodeU(const uint8_t* encoded,
-                           int16_t len,
-                           int16_t* decoded,
-                           int16_t* speechType);
+size_t WebRtcG711_DecodeU(const uint8_t* encoded,
+                          size_t len,
+                          int16_t* decoded,
+                          int16_t* speechType);
 
 /**********************************************************************
 * WebRtcG711_Version(...)
diff --git a/webrtc/modules/audio_coding/codecs/g711/test/testG711.cc b/webrtc/modules/audio_coding/codecs/g711/test/testG711.cc
index 49c671c..94248f7 100644
--- a/webrtc/modules/audio_coding/codecs/g711/test/testG711.cc
+++ b/webrtc/modules/audio_coding/codecs/g711/test/testG711.cc
@@ -24,8 +24,8 @@
 #define CLOCKS_PER_SEC_G711 1000
 
 /* function for reading audio data from PCM file */
-bool readframe(int16_t* data, FILE* inp, int length) {
-  short rlen = (short) fread(data, sizeof(int16_t), length, inp);
+bool readframe(int16_t* data, FILE* inp, size_t length) {
+  size_t rlen = fread(data, sizeof(int16_t), length, inp);
   if (rlen >= length)
     return false;
   memset(data + rlen, 0, (length - rlen) * sizeof(int16_t));
@@ -40,16 +40,14 @@
   int framecnt;
   bool endfile;
 
-  int16_t framelength = 80;
-
-  int err;
+  size_t framelength = 80;
 
   /* Runtime statistics */
   double starttime;
   double runtime;
   double length_file;
 
-  int16_t stream_len = 0;
+  size_t stream_len = 0;
   int16_t shortdata[480];
   int16_t decoded[480];
   uint8_t streamdata[1000];
@@ -80,11 +78,12 @@
   printf("-----------------------------------\n");
   printf("G.711 version: %s\n\n", versionNumber);
   /* Get frame length */
-  framelength = atoi(argv[1]);
-  if (framelength < 0) {
-    printf("  G.711: Invalid framelength %d.\n", framelength);
-    exit(1);
+  int framelength_int = atoi(argv[1]);
+  if (framelength_int < 0) {
+      printf("  G.722: Invalid framelength %d.\n", framelength_int);
+      exit(1);
   }
+  framelength = static_cast<size_t>(framelength_int);
 
   /* Get compression law */
   strcpy(law, argv[2]);
@@ -130,36 +129,29 @@
       if (argc == 6) {
         /* Write bits to file */
         if (fwrite(streamdata, sizeof(unsigned char), stream_len, bitp) !=
-            static_cast<size_t>(stream_len)) {
+            stream_len) {
           return -1;
         }
       }
-      err = WebRtcG711_DecodeA(streamdata, stream_len, decoded,
-                               speechType);
+      WebRtcG711_DecodeA(streamdata, stream_len, decoded, speechType);
     } else if (!strcmp(law, "u")) {
       /* u-law encoding */
       stream_len = WebRtcG711_EncodeU(shortdata, framelength, streamdata);
       if (argc == 6) {
         /* Write bits to file */
         if (fwrite(streamdata, sizeof(unsigned char), stream_len, bitp) !=
-            static_cast<size_t>(stream_len)) {
+            stream_len) {
           return -1;
         }
       }
-      err = WebRtcG711_DecodeU(streamdata, stream_len, decoded, speechType);
+      WebRtcG711_DecodeU(streamdata, stream_len, decoded, speechType);
     } else {
       printf("Wrong law mode\n");
       exit(1);
     }
-    if (stream_len < 0 || err < 0) {
-      /* exit if returned with error */
-      printf("Error in encoder/decoder\n");
-    } else {
-      /* Write coded speech to file */
-      if (fwrite(decoded, sizeof(short), framelength, outp) !=
-          static_cast<size_t>(framelength)) {
-        return -1;
-      }
+    /* Write coded speech to file */
+    if (fwrite(decoded, sizeof(short), framelength, outp) != framelength) {
+      return -1;
     }
   }
 
diff --git a/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc b/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc
index a0d1720..9eb7a11 100644
--- a/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc
+++ b/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc
@@ -19,7 +19,7 @@
 
 namespace {
 
-const int kSampleRateHz = 16000;
+const size_t kSampleRateHz = 16000;
 
 }  // namespace
 
@@ -40,13 +40,14 @@
 AudioEncoderG722::AudioEncoderG722(const Config& config)
     : num_channels_(config.num_channels),
       payload_type_(config.payload_type),
-      num_10ms_frames_per_packet_(config.frame_size_ms / 10),
+      num_10ms_frames_per_packet_(
+          static_cast<size_t>(config.frame_size_ms / 10)),
       num_10ms_frames_buffered_(0),
       first_timestamp_in_buffer_(0),
       encoders_(new EncoderState[num_channels_]),
       interleave_buffer_(2 * num_channels_) {
   CHECK(config.IsOk());
-  const int samples_per_channel =
+  const size_t samples_per_channel =
       kSampleRateHz / 100 * num_10ms_frames_per_packet_;
   for (int i = 0; i < num_channels_; ++i) {
     encoders_[i].speech_buffer.reset(new int16_t[samples_per_channel]);
@@ -71,14 +72,14 @@
 }
 
 size_t AudioEncoderG722::MaxEncodedBytes() const {
-  return static_cast<size_t>(SamplesPerChannel() / 2 * num_channels_);
+  return SamplesPerChannel() / 2 * num_channels_;
 }
 
-int AudioEncoderG722::Num10MsFramesInNextPacket() const {
+size_t AudioEncoderG722::Num10MsFramesInNextPacket() const {
   return num_10ms_frames_per_packet_;
 }
 
-int AudioEncoderG722::Max10MsFramesInAPacket() const {
+size_t AudioEncoderG722::Max10MsFramesInAPacket() const {
   return num_10ms_frames_per_packet_;
 }
 
@@ -98,8 +99,8 @@
     first_timestamp_in_buffer_ = rtp_timestamp;
 
   // Deinterleave samples and save them in each channel's buffer.
-  const int start = kSampleRateHz / 100 * num_10ms_frames_buffered_;
-  for (int i = 0; i < kSampleRateHz / 100; ++i)
+  const size_t start = kSampleRateHz / 100 * num_10ms_frames_buffered_;
+  for (size_t i = 0; i < kSampleRateHz / 100; ++i)
     for (int j = 0; j < num_channels_; ++j)
       encoders_[j].speech_buffer[start + i] = audio[i * num_channels_ + j];
 
@@ -111,19 +112,18 @@
   // Encode each channel separately.
   CHECK_EQ(num_10ms_frames_buffered_, num_10ms_frames_per_packet_);
   num_10ms_frames_buffered_ = 0;
-  const int samples_per_channel = SamplesPerChannel();
+  const size_t samples_per_channel = SamplesPerChannel();
   for (int i = 0; i < num_channels_; ++i) {
-    const int encoded = WebRtcG722_Encode(
+    const size_t encoded = WebRtcG722_Encode(
         encoders_[i].encoder, encoders_[i].speech_buffer.get(),
         samples_per_channel, encoders_[i].encoded_buffer.data<uint8_t>());
-    CHECK_GE(encoded, 0);
     CHECK_EQ(encoded, samples_per_channel / 2);
   }
 
   // Interleave the encoded bytes of the different channels. Each separate
   // channel and the interleaved stream encodes two samples per byte, most
   // significant half first.
-  for (int i = 0; i < samples_per_channel / 2; ++i) {
+  for (size_t i = 0; i < samples_per_channel / 2; ++i) {
     for (int j = 0; j < num_channels_; ++j) {
       uint8_t two_samples = encoders_[j].encoded_buffer.data()[i];
       interleave_buffer_.data()[j] = two_samples >> 4;
@@ -140,7 +140,7 @@
   return info;
 }
 
-int AudioEncoderG722::SamplesPerChannel() const {
+size_t AudioEncoderG722::SamplesPerChannel() const {
   return kSampleRateHz / 100 * num_10ms_frames_per_packet_;
 }
 
diff --git a/webrtc/modules/audio_coding/codecs/g722/g722_decode.c b/webrtc/modules/audio_coding/codecs/g722/g722_decode.c
index ee0eb89..8fdeec1 100644
--- a/webrtc/modules/audio_coding/codecs/g722/g722_decode.c
+++ b/webrtc/modules/audio_coding/codecs/g722/g722_decode.c
@@ -188,8 +188,8 @@
 }
 /*- End of function --------------------------------------------------------*/
 
-int WebRtc_g722_decode(G722DecoderState *s, int16_t amp[],
-                       const uint8_t g722_data[], int len)
+size_t WebRtc_g722_decode(G722DecoderState *s, int16_t amp[],
+                          const uint8_t g722_data[], size_t len)
 {
     static const int wl[8] = {-60, -30, 58, 172, 334, 538, 1198, 3042 };
     static const int rl42[16] = {0, 7, 6, 5, 4, 3, 2, 1,
@@ -258,9 +258,9 @@
     int wd2;
     int wd3;
     int code;
-    int outlen;
+    size_t outlen;
     int i;
-    int j;
+    size_t j;
 
     outlen = 0;
     rhigh = 0;
diff --git a/webrtc/modules/audio_coding/codecs/g722/g722_enc_dec.h b/webrtc/modules/audio_coding/codecs/g722/g722_enc_dec.h
index 5cd1b2d..7db4895 100644
--- a/webrtc/modules/audio_coding/codecs/g722/g722_enc_dec.h
+++ b/webrtc/modules/audio_coding/codecs/g722/g722_enc_dec.h
@@ -139,19 +139,19 @@
                                           int rate,
                                           int options);
 int WebRtc_g722_encode_release(G722EncoderState *s);
-int WebRtc_g722_encode(G722EncoderState *s,
-                       uint8_t g722_data[],
-                       const int16_t amp[],
-                       int len);
+size_t WebRtc_g722_encode(G722EncoderState *s,
+                          uint8_t g722_data[],
+                          const int16_t amp[],
+                          size_t len);
 
 G722DecoderState* WebRtc_g722_decode_init(G722DecoderState* s,
                                           int rate,
                                           int options);
 int WebRtc_g722_decode_release(G722DecoderState *s);
-int WebRtc_g722_decode(G722DecoderState *s,
-                       int16_t amp[],
-                       const uint8_t g722_data[],
-                       int len);
+size_t WebRtc_g722_decode(G722DecoderState *s,
+                          int16_t amp[],
+                          const uint8_t g722_data[],
+                          size_t len);
 
 #ifdef __cplusplus
 }
diff --git a/webrtc/modules/audio_coding/codecs/g722/g722_encode.c b/webrtc/modules/audio_coding/codecs/g722/g722_encode.c
index bed2d21..01ec127 100644
--- a/webrtc/modules/audio_coding/codecs/g722/g722_encode.c
+++ b/webrtc/modules/audio_coding/codecs/g722/g722_encode.c
@@ -202,8 +202,8 @@
 }
 #endif
 
-int WebRtc_g722_encode(G722EncoderState *s, uint8_t g722_data[],
-                       const int16_t amp[], int len)
+size_t WebRtc_g722_encode(G722EncoderState *s, uint8_t g722_data[],
+                          const int16_t amp[], size_t len)
 {
     static const int q6[32] =
     {
@@ -275,11 +275,11 @@
     int eh;
     int mih;
     int i;
-    int j;
+    size_t j;
     /* Low and high band PCM from the QMF */
     int xlow;
     int xhigh;
-    int g722_bytes;
+    size_t g722_bytes;
     /* Even and odd tap accumulators */
     int sumeven;
     int sumodd;
diff --git a/webrtc/modules/audio_coding/codecs/g722/g722_interface.c b/webrtc/modules/audio_coding/codecs/g722/g722_interface.c
index 1edf58d..f6b9842 100644
--- a/webrtc/modules/audio_coding/codecs/g722/g722_interface.c
+++ b/webrtc/modules/audio_coding/codecs/g722/g722_interface.c
@@ -45,10 +45,10 @@
     return WebRtc_g722_encode_release((G722EncoderState*) G722enc_inst);
 }
 
-int16_t WebRtcG722_Encode(G722EncInst *G722enc_inst,
-                          const int16_t* speechIn,
-                          int16_t len,
-                          uint8_t* encoded)
+size_t WebRtcG722_Encode(G722EncInst *G722enc_inst,
+                         const int16_t* speechIn,
+                         size_t len,
+                         uint8_t* encoded)
 {
     unsigned char *codechar = (unsigned char*) encoded;
     // Encode the input speech vector
@@ -85,11 +85,11 @@
     return WebRtc_g722_decode_release((G722DecoderState*) G722dec_inst);
 }
 
-int16_t WebRtcG722_Decode(G722DecInst *G722dec_inst,
-                          const uint8_t *encoded,
-                          int16_t len,
-                          int16_t *decoded,
-                          int16_t *speechType)
+size_t WebRtcG722_Decode(G722DecInst *G722dec_inst,
+                         const uint8_t *encoded,
+                         size_t len,
+                         int16_t *decoded,
+                         int16_t *speechType)
 {
     // Decode the G.722 encoder stream
     *speechType=G722_WEBRTC_SPEECH;
diff --git a/webrtc/modules/audio_coding/codecs/g722/include/audio_encoder_g722.h b/webrtc/modules/audio_coding/codecs/g722/include/audio_encoder_g722.h
index 9b57fbe..1f36fac 100644
--- a/webrtc/modules/audio_coding/codecs/g722/include/audio_encoder_g722.h
+++ b/webrtc/modules/audio_coding/codecs/g722/include/audio_encoder_g722.h
@@ -37,8 +37,8 @@
   int NumChannels() const override;
   size_t MaxEncodedBytes() const override;
   int RtpTimestampRateHz() const override;
-  int Num10MsFramesInNextPacket() const override;
-  int Max10MsFramesInAPacket() const override;
+  size_t Num10MsFramesInNextPacket() const override;
+  size_t Max10MsFramesInAPacket() const override;
   int GetTargetBitrate() const override;
   EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
                              const int16_t* audio,
@@ -55,12 +55,12 @@
     ~EncoderState();
   };
 
-  int SamplesPerChannel() const;
+  size_t SamplesPerChannel() const;
 
   const int num_channels_;
   const int payload_type_;
-  const int num_10ms_frames_per_packet_;
-  int num_10ms_frames_buffered_;
+  const size_t num_10ms_frames_per_packet_;
+  size_t num_10ms_frames_buffered_;
   uint32_t first_timestamp_in_buffer_;
   const rtc::scoped_ptr<EncoderState[]> encoders_;
   rtc::Buffer interleave_buffer_;
diff --git a/webrtc/modules/audio_coding/codecs/g722/include/g722_interface.h b/webrtc/modules/audio_coding/codecs/g722/include/g722_interface.h
index 46ff3b0..fa4a48c 100644
--- a/webrtc/modules/audio_coding/codecs/g722/include/g722_interface.h
+++ b/webrtc/modules/audio_coding/codecs/g722/include/g722_interface.h
@@ -94,10 +94,10 @@
  * Return value               : Length (in bytes) of coded data
  */
 
-int16_t WebRtcG722_Encode(G722EncInst* G722enc_inst,
-                          const int16_t* speechIn,
-                          int16_t len,
-                          uint8_t* encoded);
+size_t WebRtcG722_Encode(G722EncInst* G722enc_inst,
+                         const int16_t* speechIn,
+                         size_t len,
+                         uint8_t* encoded);
 
 
 /****************************************************************************
@@ -162,15 +162,14 @@
  *      - speechType        : 1 normal, 2 CNG (Since G722 does not have its own
  *                            DTX/CNG scheme it should always return 1)
  *
- * Return value             : >0 - Samples in decoded vector
- *                            -1 - Error
+ * Return value             : Samples in decoded vector
  */
 
-int16_t WebRtcG722_Decode(G722DecInst *G722dec_inst,
-                          const uint8_t* encoded,
-                          int16_t len,
-                          int16_t *decoded,
-                          int16_t *speechType);
+size_t WebRtcG722_Decode(G722DecInst *G722dec_inst,
+                         const uint8_t* encoded,
+                         size_t len,
+                         int16_t *decoded,
+                         int16_t *speechType);
 
 /****************************************************************************
  * WebRtcG722_Version(...)
diff --git a/webrtc/modules/audio_coding/codecs/g722/test/testG722.cc b/webrtc/modules/audio_coding/codecs/g722/test/testG722.cc
index 6a6f03c..b473c13 100644
--- a/webrtc/modules/audio_coding/codecs/g722/test/testG722.cc
+++ b/webrtc/modules/audio_coding/codecs/g722/test/testG722.cc
@@ -29,9 +29,9 @@
 typedef struct WebRtcG722DecInst    G722DecInst;
 
 /* function for reading audio data from PCM file */
-bool readframe(int16_t *data, FILE *inp, int length)
+bool readframe(int16_t *data, FILE *inp, size_t length)
 {
-    short rlen = (short)fread(data, sizeof(int16_t), length, inp);
+    size_t rlen = fread(data, sizeof(int16_t), length, inp);
     if (rlen >= length)
       return false;
     memset(data + rlen, 0, (length - rlen) * sizeof(int16_t));
@@ -45,17 +45,16 @@
 
     int framecnt;
     bool endfile;
-    int16_t framelength = 160;
+    size_t framelength = 160;
     G722EncInst *G722enc_inst;
     G722DecInst *G722dec_inst;
-    int err;
 
     /* Runtime statistics */
     double starttime;
     double runtime = 0;
     double length_file;
 
-    int16_t stream_len = 0;
+    size_t stream_len = 0;
     int16_t shortdata[960];
     int16_t decoded[960];
     uint8_t streamdata[80 * 6];
@@ -78,11 +77,12 @@
     }
 
     /* Get frame length */
-    framelength = atoi(argv[1]);
-    if (framelength < 0) {
-        printf("  G.722: Invalid framelength %d.\n", framelength);
+    int framelength_int = atoi(argv[1]);
+    if (framelength_int < 0) {
+        printf("  G.722: Invalid framelength %d.\n", framelength_int);
         exit(1);
     }
+    framelength = static_cast<size_t>(framelength_int);
 
     /* Get Input and Output files */
     sscanf(argv[2], "%s", inname);
@@ -124,26 +124,21 @@
 
         /* G.722 encoding + decoding */
         stream_len = WebRtcG722_Encode((G722EncInst *)G722enc_inst, shortdata, framelength, streamdata);
-        err = WebRtcG722_Decode(G722dec_inst, streamdata, stream_len, decoded,
-                                speechType);
+        WebRtcG722_Decode(G722dec_inst, streamdata, stream_len, decoded,
+                          speechType);
 
         /* Stop clock after call to encoder and decoder */
         runtime += (double)((clock()/(double)CLOCKS_PER_SEC_G722)-starttime);
 
-        if (stream_len < 0 || err < 0) {
-            /* exit if returned with error */
-            printf("Error in encoder/decoder\n");
-        } else {
-          /* Write coded bits to file */
-          if (fwrite(streamdata, sizeof(short), stream_len / 2, outbitp) !=
-              static_cast<size_t>(stream_len / 2)) {
-            return -1;
-          }
-          /* Write coded speech to file */
-          if (fwrite(decoded, sizeof(short), framelength, outp) !=
-              static_cast<size_t>(framelength)) {
-            return -1;
-          }
+        /* Write coded bits to file */
+        if (fwrite(streamdata, sizeof(short), stream_len / 2, outbitp) !=
+            stream_len / 2) {
+          return -1;
+        }
+        /* Write coded speech to file */
+        if (fwrite(decoded, sizeof(short), framelength, outp) !=
+            framelength) {
+          return -1;
         }
     }
 
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/abs_quant.c b/webrtc/modules/audio_coding/codecs/ilbc/abs_quant.c
index 75fc970..263749a 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/abs_quant.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/abs_quant.c
@@ -36,7 +36,7 @@
     int16_t *weightDenum   /* (i) denominator of synthesis filter */
                             ) {
   int16_t *syntOut;
-  int16_t quantLen[2];
+  size_t quantLen[2];
 
   /* Stack based */
   int16_t syntOutBuf[LPC_FILTERORDER+STATE_SHORT_LEN_30MS];
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.c b/webrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.c
index d26fb5d..4b76453 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.c
@@ -21,9 +21,9 @@
 #include "sort_sq.h"
 
 void WebRtcIlbcfix_AbsQuantLoop(int16_t *syntOutIN, int16_t *in_weightedIN,
-                                int16_t *weightDenumIN, int16_t *quantLenIN,
+                                int16_t *weightDenumIN, size_t *quantLenIN,
                                 int16_t *idxVecIN ) {
-  int k1, k2;
+  size_t k1, k2;
   int16_t index;
   int32_t toQW32;
   int32_t toQ32;
@@ -33,7 +33,7 @@
   int16_t *syntOut   = syntOutIN;
   int16_t *in_weighted  = in_weightedIN;
   int16_t *weightDenum  = weightDenumIN;
-  int16_t *quantLen  = quantLenIN;
+  size_t *quantLen  = quantLenIN;
   int16_t *idxVec   = idxVecIN;
 
   for(k1=0;k1<2;k1++) {
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.h b/webrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.h
index 50c6ffe..c8bf675 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.h
@@ -27,7 +27,7 @@
  *---------------------------------------------------------------*/
 
 void WebRtcIlbcfix_AbsQuantLoop(int16_t *syntOutIN, int16_t *in_weightedIN,
-                                int16_t *weightDenumIN, int16_t *quantLenIN,
+                                int16_t *weightDenumIN, size_t *quantLenIN,
                                 int16_t *idxVecIN);
 
 #endif
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc b/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc
index 8dc9bdf..33aba38 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc
+++ b/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc
@@ -24,15 +24,20 @@
 
 }  // namespace
 
+// static
+const size_t AudioEncoderIlbc::kMaxSamplesPerPacket;
+
 bool AudioEncoderIlbc::Config::IsOk() const {
   return (frame_size_ms == 20 || frame_size_ms == 30 || frame_size_ms == 40 ||
           frame_size_ms == 60) &&
-      (kSampleRateHz / 100 * (frame_size_ms / 10)) <= kMaxSamplesPerPacket;
+      static_cast<size_t>(kSampleRateHz / 100 * (frame_size_ms / 10)) <=
+          kMaxSamplesPerPacket;
 }
 
 AudioEncoderIlbc::AudioEncoderIlbc(const Config& config)
     : payload_type_(config.payload_type),
-      num_10ms_frames_per_packet_(config.frame_size_ms / 10),
+      num_10ms_frames_per_packet_(
+          static_cast<size_t>(config.frame_size_ms / 10)),
       num_10ms_frames_buffered_(0) {
   CHECK(config.IsOk());
   CHECK_EQ(0, WebRtcIlbcfix_EncoderCreate(&encoder_));
@@ -58,11 +63,11 @@
   return RequiredOutputSizeBytes();
 }
 
-int AudioEncoderIlbc::Num10MsFramesInNextPacket() const {
+size_t AudioEncoderIlbc::Num10MsFramesInNextPacket() const {
   return num_10ms_frames_per_packet_;
 }
 
-int AudioEncoderIlbc::Max10MsFramesInAPacket() const {
+size_t AudioEncoderIlbc::Max10MsFramesInAPacket() const {
   return num_10ms_frames_per_packet_;
 }
 
@@ -111,7 +116,7 @@
       encoded);
   CHECK_GE(output_len, 0);
   EncodedInfo info;
-  info.encoded_bytes = output_len;
+  info.encoded_bytes = static_cast<size_t>(output_len);
   DCHECK_EQ(info.encoded_bytes, RequiredOutputSizeBytes());
   info.encoded_timestamp = first_timestamp_in_buffer_;
   info.payload_type = payload_type_;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.c b/webrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.c
index c24b4a6..1a3735f 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.c
@@ -28,14 +28,14 @@
     int32_t *crossDot,  /* (o) The cross correlation between
                                  the target and the Augmented
                                  vector */
-    int16_t low,    /* (i) Lag to start from (typically
+    size_t low,    /* (i) Lag to start from (typically
                              20) */
-    int16_t high,   /* (i) Lag to end at (typically 39) */
+    size_t high,   /* (i) Lag to end at (typically 39) */
     int scale)   /* (i) Scale factor to use for
                               the crossDot */
 {
-  int lagcount;
-  int16_t ilow;
+  size_t lagcount;
+  size_t ilow;
   int16_t *targetPtr;
   int32_t *crossDotPtr;
   int16_t *iSPtr=interpSamples;
@@ -46,7 +46,7 @@
   crossDotPtr=crossDot;
   for (lagcount=low; lagcount<=high; lagcount++) {
 
-    ilow = (int16_t) (lagcount-4);
+    ilow = lagcount - 4;
 
     /* Compute dot product for the first (lagcount-4) samples */
     (*crossDotPtr) = WebRtcSpl_DotProductWithScale(target, buffer-lagcount, ilow, scale);
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.h b/webrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.h
index a0435c4..c5c4088 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.h
@@ -33,9 +33,9 @@
     int32_t *crossDot,  /* (o) The cross correlation between
                                            the target and the Augmented
                                            vector */
-    int16_t low,    /* (i) Lag to start from (typically
+    size_t low,    /* (i) Lag to start from (typically
                                                    20) */
-    int16_t high,   /* (i) Lag to end at (typically 39 */
+    size_t high,   /* (i) Lag to end at (typically 39 */
     int scale);   /* (i) Scale factor to use for the crossDot */
 
 #endif
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/cb_construct.c b/webrtc/modules/audio_coding/codecs/ilbc/cb_construct.c
index 9d11b83..cacf3ac 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/cb_construct.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/cb_construct.c
@@ -29,10 +29,10 @@
     int16_t *index,   /* (i) Codebook indices */
     int16_t *gain_index,  /* (i) Gain quantization indices */
     int16_t *mem,   /* (i) Buffer for codevector construction */
-    int16_t lMem,   /* (i) Length of buffer */
-    int16_t veclen   /* (i) Length of vector */
+    size_t lMem,   /* (i) Length of buffer */
+    size_t veclen   /* (i) Length of vector */
                                ){
-  int j;
+  size_t j;
   int16_t gain[CB_NSTAGES];
   /* Stack based */
   int16_t cbvec0[SUBL];
@@ -50,9 +50,9 @@
   /* codebook vector construction and construction of total vector */
 
   /* Stack based */
-  WebRtcIlbcfix_GetCbVec(cbvec0, mem, index[0], lMem, veclen);
-  WebRtcIlbcfix_GetCbVec(cbvec1, mem, index[1], lMem, veclen);
-  WebRtcIlbcfix_GetCbVec(cbvec2, mem, index[2], lMem, veclen);
+  WebRtcIlbcfix_GetCbVec(cbvec0, mem, (size_t)index[0], lMem, veclen);
+  WebRtcIlbcfix_GetCbVec(cbvec1, mem, (size_t)index[1], lMem, veclen);
+  WebRtcIlbcfix_GetCbVec(cbvec2, mem, (size_t)index[2], lMem, veclen);
 
   gainPtr = &gain[0];
   for (j=0;j<veclen;j++) {
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/cb_construct.h b/webrtc/modules/audio_coding/codecs/ilbc/cb_construct.h
index 2e9080f..b676ef9 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/cb_construct.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/cb_construct.h
@@ -30,8 +30,8 @@
     int16_t *index,   /* (i) Codebook indices */
     int16_t *gain_index,  /* (i) Gain quantization indices */
     int16_t *mem,   /* (i) Buffer for codevector construction */
-    int16_t lMem,   /* (i) Length of buffer */
-    int16_t veclen   /* (i) Length of vector */
+    size_t lMem,   /* (i) Length of buffer */
+    size_t veclen   /* (i) Length of vector */
                                );
 
 
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.c b/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.c
index 1b8c506..6ad2f8e 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.c
@@ -27,15 +27,15 @@
  *----------------------------------------------------------------*/
 
 void WebRtcIlbcfix_CbMemEnergy(
-    int16_t range,
+    size_t range,
     int16_t *CB,   /* (i) The CB memory (1:st section) */
     int16_t *filteredCB,  /* (i) The filtered CB memory (2:nd section) */
-    int16_t lMem,   /* (i) Length of the CB memory */
-    int16_t lTarget,   /* (i) Length of the target vector */
+    size_t lMem,   /* (i) Length of the CB memory */
+    size_t lTarget,   /* (i) Length of the target vector */
     int16_t *energyW16,  /* (o) Energy in the CB vectors */
     int16_t *energyShifts, /* (o) Shift value of the energy */
     int scale,   /* (i) The scaling of all energy values */
-    int16_t base_size  /* (i) Index to where energy values should be stored */
+    size_t base_size  /* (i) Index to where energy values should be stored */
                                ) {
   int16_t *ppi, *ppo, *pp;
   int32_t energy, tmp32;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.h b/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.h
index 34ff8aa..6da2f43 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.h
@@ -20,15 +20,15 @@
 #define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_MEM_ENERGY_H_
 
 void WebRtcIlbcfix_CbMemEnergy(
-    int16_t range,
+    size_t range,
     int16_t *CB,   /* (i) The CB memory (1:st section) */
     int16_t *filteredCB,  /* (i) The filtered CB memory (2:nd section) */
-    int16_t lMem,   /* (i) Length of the CB memory */
-    int16_t lTarget,   /* (i) Length of the target vector */
+    size_t lMem,   /* (i) Length of the CB memory */
+    size_t lTarget,   /* (i) Length of the target vector */
     int16_t *energyW16,  /* (o) Energy in the CB vectors */
     int16_t *energyShifts, /* (o) Shift value of the energy */
     int scale,   /* (i) The scaling of all energy values */
-    int16_t base_size  /* (i) Index to where energy values should be stored */
+    size_t base_size  /* (i) Index to where energy values should be stored */
                                );
 
 #endif
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.c b/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.c
index 2f3c299..acd6b9c 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.c
@@ -23,13 +23,14 @@
     int16_t *interpSamples, /* (i) The interpolated samples */
     int16_t *CBmem,   /* (i) The CB memory */
     int scale,   /* (i) The scaling of all energy values */
-    int16_t base_size,  /* (i) Index to where energy values should be stored */
+    size_t base_size,  /* (i) Index to where energy values should be stored */
     int16_t *energyW16,  /* (o) Energy in the CB vectors */
     int16_t *energyShifts /* (o) Shift value of the energy */
                                            ){
   int32_t energy, tmp32;
   int16_t *ppe, *pp, *interpSamplesPtr;
-  int16_t *CBmemPtr, lagcount;
+  int16_t *CBmemPtr;
+  size_t lagcount;
   int16_t *enPtr=&energyW16[base_size-20];
   int16_t *enShPtr=&energyShifts[base_size-20];
   int32_t nrjRecursive;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.h b/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.h
index 46fb2fd..594ba5f 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.h
@@ -23,7 +23,7 @@
     int16_t *interpSamples, /* (i) The interpolated samples */
     int16_t *CBmem,   /* (i) The CB memory */
     int scale,   /* (i) The scaling of all energy values */
-    int16_t base_size,  /* (i) Index to where energy values should be stored */
+    size_t base_size,  /* (i) Index to where energy values should be stored */
     int16_t *energyW16,  /* (o) Energy in the CB vectors */
     int16_t *energyShifts /* (o) Shift value of the energy */
                                            );
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.c b/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.c
index 481dfba..f2415fe 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.c
@@ -23,16 +23,17 @@
  * sample and the last sample respectively */
 void WebRtcIlbcfix_CbMemEnergyCalc(
     int32_t energy,   /* (i) input start energy */
-    int16_t range,   /* (i) number of iterations */
+    size_t range,   /* (i) number of iterations */
     int16_t *ppi,   /* (i) input pointer 1 */
     int16_t *ppo,   /* (i) input pointer 2 */
     int16_t *energyW16,  /* (o) Energy in the CB vectors */
     int16_t *energyShifts, /* (o) Shift value of the energy */
     int scale,   /* (i) The scaling of all energy values */
-    int16_t base_size  /* (i) Index to where energy values should be stored */
+    size_t base_size  /* (i) Index to where energy values should be stored */
                                    )
 {
-  int16_t j,shft;
+  size_t j;
+  int16_t shft;
   int32_t tmp;
   int16_t *eSh_ptr;
   int16_t *eW16_ptr;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.h b/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.h
index 7f0cadf..2991869 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.h
@@ -21,13 +21,13 @@
 
 void WebRtcIlbcfix_CbMemEnergyCalc(
     int32_t energy,   /* (i) input start energy */
-    int16_t range,   /* (i) number of iterations */
+    size_t range,   /* (i) number of iterations */
     int16_t *ppi,   /* (i) input pointer 1 */
     int16_t *ppo,   /* (i) input pointer 2 */
     int16_t *energyW16,  /* (o) Energy in the CB vectors */
     int16_t *energyShifts, /* (o) Shift value of the energy */
     int scale,   /* (i) The scaling of all energy values */
-    int16_t base_size  /* (i) Index to where energy values should be stored */
+    size_t base_size  /* (i) Index to where energy values should be stored */
                                    );
 
 #endif
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/cb_search.c b/webrtc/modules/audio_coding/codecs/ilbc/cb_search.c
index d502cf0..be94951 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/cb_search.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/cb_search.c
@@ -40,29 +40,31 @@
     int16_t *gain_index, /* (o) Gain quantization indices */
     int16_t *intarget, /* (i) Target vector for encoding */
     int16_t *decResidual,/* (i) Decoded residual for codebook construction */
-    int16_t lMem,  /* (i) Length of buffer */
-    int16_t lTarget,  /* (i) Length of vector */
+    size_t lMem,  /* (i) Length of buffer */
+    size_t lTarget,  /* (i) Length of vector */
     int16_t *weightDenum,/* (i) weighting filter coefficients in Q12 */
-    int16_t block  /* (i) the subblock number */
+    size_t block  /* (i) the subblock number */
                             ) {
-  int16_t i, j, stage, range;
+  size_t i, range;
+  int16_t ii, j, stage;
   int16_t *pp;
   int16_t tmp;
   int scale;
   int16_t bits, temp1, temp2;
-  int16_t base_size;
+  size_t base_size;
   int32_t codedEner, targetEner;
   int16_t gains[CB_NSTAGES+1];
   int16_t *cb_vecPtr;
-  int16_t indexOffset, sInd, eInd;
+  size_t indexOffset, sInd, eInd;
   int32_t CritMax=0;
   int16_t shTotMax=WEBRTC_SPL_WORD16_MIN;
-  int16_t bestIndex=0;
+  size_t bestIndex=0;
   int16_t bestGain=0;
-  int16_t indexNew, CritNewSh;
+  size_t indexNew;
+  int16_t CritNewSh;
   int32_t CritNew;
   int32_t *cDotPtr;
-  int16_t noOfZeros;
+  size_t noOfZeros;
   int16_t *gainPtr;
   int32_t t32, tmpW32;
   int16_t *WebRtcIlbcfix_kGainSq5_ptr;
@@ -148,9 +150,9 @@
                                           scale, 20, energyW16, energyShifts);
 
     /* Compute the CB vectors' energies for the second cb section (filtered cb) */
-    WebRtcIlbcfix_CbMemEnergyAugmentation(interpSamplesFilt, cbvectors,
-                                          scale, (int16_t)(base_size + 20),
-                                          energyW16, energyShifts);
+    WebRtcIlbcfix_CbMemEnergyAugmentation(interpSamplesFilt, cbvectors, scale,
+                                          base_size + 20, energyW16,
+                                          energyShifts);
 
     /* Compute the CB vectors' energies and store them in the vector
      * energyW16. Also the corresponding shift values are stored. The
@@ -224,7 +226,7 @@
 
     /* Update the global best index and the corresponding gain */
     WebRtcIlbcfix_CbUpdateBestIndex(
-        CritNew, CritNewSh, (int16_t)(indexNew+indexOffset), cDot[indexNew+indexOffset],
+        CritNew, CritNewSh, indexNew+indexOffset, cDot[indexNew+indexOffset],
         inverseEnergy[indexNew+indexOffset], inverseEnergyShifts[indexNew+indexOffset],
         &CritMax, &shTotMax, &bestIndex, &bestGain);
 
@@ -242,11 +244,8 @@
       i=sInd;
       if (sInd<20) {
         WebRtcIlbcfix_AugmentedCbCorr(target, cbvectors + lMem,
-                                      interpSamplesFilt, cDot,
-                                      (int16_t)(sInd + 20),
-                                      (int16_t)(WEBRTC_SPL_MIN(39,
-                                                               (eInd + 20))),
-                                      scale);
+                                      interpSamplesFilt, cDot, sInd + 20,
+                                      WEBRTC_SPL_MIN(39, (eInd + 20)), scale);
         i=20;
         cDotPtr = &cDot[20 - sInd];
       } else {
@@ -257,7 +256,7 @@
 
       /* Calculate the cross correlations (main part of the filtered CB) */
       WebRtcSpl_CrossCorrelation(cDotPtr, target, cb_vecPtr, lTarget,
-                                 (int16_t)(eInd - i + 1), scale, -1);
+                                 eInd - i + 1, scale, -1);
 
     } else {
       cDotPtr = cDot;
@@ -265,7 +264,7 @@
 
       /* Calculate the cross correlations (main part of the filtered CB) */
       WebRtcSpl_CrossCorrelation(cDotPtr, target, cb_vecPtr, lTarget,
-                                 (int16_t)(eInd - sInd + 1), scale, -1);
+                                 eInd - sInd + 1, scale, -1);
 
     }
 
@@ -274,17 +273,17 @@
 
     /* Search for best index in this part of the vector */
     WebRtcIlbcfix_CbSearchCore(
-        cDot, (int16_t)(eInd-sInd+1), stage, inverseEnergy+indexOffset,
+        cDot, eInd-sInd+1, stage, inverseEnergy+indexOffset,
         inverseEnergyShifts+indexOffset, Crit,
         &indexNew, &CritNew, &CritNewSh);
 
     /* Update the global best index and the corresponding gain */
     WebRtcIlbcfix_CbUpdateBestIndex(
-        CritNew, CritNewSh, (int16_t)(indexNew+indexOffset), cDot[indexNew],
+        CritNew, CritNewSh, indexNew+indexOffset, cDot[indexNew],
         inverseEnergy[indexNew+indexOffset], inverseEnergyShifts[indexNew+indexOffset],
         &CritMax, &shTotMax, &bestIndex, &bestGain);
 
-    index[stage] = bestIndex;
+    index[stage] = (int16_t)bestIndex;
 
 
     bestGain = WebRtcIlbcfix_GainQuant(bestGain,
@@ -297,7 +296,7 @@
 
     if(lTarget==(STATE_LEN-iLBCenc_inst->state_short_len)) {
 
-      if(index[stage]<base_size) {
+      if((size_t)index[stage]<base_size) {
         pp=buf+lMem-lTarget-index[stage];
       } else {
         pp=cbvectors+lMem-lTarget-
@@ -306,16 +305,16 @@
 
     } else {
 
-      if (index[stage]<base_size) {
+      if ((size_t)index[stage]<base_size) {
         if (index[stage]>=20) {
           /* Adjust index and extract vector */
           index[stage]-=20;
           pp=buf+lMem-lTarget-index[stage];
         } else {
           /* Adjust index and extract vector */
-          index[stage]+=(base_size-20);
+          index[stage]+=(int16_t)(base_size-20);
 
-          WebRtcIlbcfix_CreateAugmentedVec((int16_t)(index[stage]-base_size+40),
+          WebRtcIlbcfix_CreateAugmentedVec(index[stage]-base_size+40,
                                            buf+lMem, aug_vec);
           pp = aug_vec;
 
@@ -329,8 +328,8 @@
               index[stage]+base_size;
         } else {
           /* Adjust index and extract vector */
-          index[stage]+=(base_size-20);
-          WebRtcIlbcfix_CreateAugmentedVec((int16_t)(index[stage]-2*base_size+40),
+          index[stage]+=(int16_t)(base_size-20);
+          WebRtcIlbcfix_CreateAugmentedVec(index[stage]-2*base_size+40,
                                            cbvectors+lMem, aug_vec);
           pp = aug_vec;
         }
@@ -381,7 +380,7 @@
   WebRtcIlbcfix_kGainSq5_ptr = (int16_t*)&WebRtcIlbcfix_kGainSq5[j];
 
   /* targetEner and codedEner are in Q(-2*scale) */
-  for (i=gain_index[0];i<32;i++) {
+  for (ii=gain_index[0];ii<32;ii++) {
 
     /* Change the index if
        (codedEnergy*gainTbl[i]*gainTbl[i])<(targetEn*gain[0]*gain[0]) AND
@@ -392,8 +391,8 @@
     t32 = t32 - targetEner;
     if (t32 < 0) {
       if ((*WebRtcIlbcfix_kGainSq5_ptr) < tmpW32) {
-        j=i;
-        WebRtcIlbcfix_kGainSq5_ptr = (int16_t*)&WebRtcIlbcfix_kGainSq5[i];
+        j=ii;
+        WebRtcIlbcfix_kGainSq5_ptr = (int16_t*)&WebRtcIlbcfix_kGainSq5[ii];
       }
     }
     gainPtr++;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/cb_search.h b/webrtc/modules/audio_coding/codecs/ilbc/cb_search.h
index 2fe236f..ed1580c 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/cb_search.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/cb_search.h
@@ -26,10 +26,10 @@
     int16_t *gain_index, /* (o) Gain quantization indices */
     int16_t *intarget, /* (i) Target vector for encoding */
     int16_t *decResidual,/* (i) Decoded residual for codebook construction */
-    int16_t lMem,  /* (i) Length of buffer */
-    int16_t lTarget,  /* (i) Length of vector */
+    size_t lMem,  /* (i) Length of buffer */
+    size_t lTarget,  /* (i) Length of vector */
     int16_t *weightDenum,/* (i) weighting filter coefficients in Q12 */
-    int16_t block  /* (i) the subblock number */
+    size_t block  /* (i) the subblock number */
                             );
 
 #endif
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/cb_search_core.c b/webrtc/modules/audio_coding/codecs/ilbc/cb_search_core.c
index 3deb08a..d297b15 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/cb_search_core.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/cb_search_core.c
@@ -21,13 +21,13 @@
 
 void WebRtcIlbcfix_CbSearchCore(
     int32_t *cDot,    /* (i) Cross Correlation */
-    int16_t range,    /* (i) Search range */
+    size_t range,    /* (i) Search range */
     int16_t stage,    /* (i) Stage of this search */
     int16_t *inverseEnergy,  /* (i) Inversed energy */
     int16_t *inverseEnergyShift, /* (i) Shifts of inversed energy
                                            with the offset 2*16-29 */
     int32_t *Crit,    /* (o) The criteria */
-    int16_t *bestIndex,   /* (o) Index that corresponds to
+    size_t *bestIndex,   /* (o) Index that corresponds to
                                                    maximum criteria (in this
                                                    vector) */
     int32_t *bestCrit,   /* (o) Value of critera for the
@@ -37,7 +37,7 @@
 {
   int32_t maxW32, tmp32;
   int16_t max, sh, tmp16;
-  int i;
+  size_t i;
   int32_t *cDotPtr;
   int16_t cDotSqW16;
   int16_t *inverseEnergyPtr;
@@ -103,7 +103,7 @@
   }
 
   /* Find the index of the best value */
-  *bestIndex = WebRtcSpl_MaxIndexW32(Crit, range);
+  *bestIndex = (size_t)WebRtcSpl_MaxIndexW32(Crit, range);
   *bestCrit = Crit[*bestIndex];
 
   /* Calculate total shifts of this criteria */
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/cb_search_core.h b/webrtc/modules/audio_coding/codecs/ilbc/cb_search_core.h
index e4f2e92..9648cf2 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/cb_search_core.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/cb_search_core.h
@@ -23,13 +23,13 @@
 
 void WebRtcIlbcfix_CbSearchCore(
     int32_t *cDot,    /* (i) Cross Correlation */
-    int16_t range,    /* (i) Search range */
+    size_t range,    /* (i) Search range */
     int16_t stage,    /* (i) Stage of this search */
     int16_t *inverseEnergy,  /* (i) Inversed energy */
     int16_t *inverseEnergyShift, /* (i) Shifts of inversed energy
                                           with the offset 2*16-29 */
     int32_t *Crit,    /* (o) The criteria */
-    int16_t *bestIndex,   /* (o) Index that corresponds to
+    size_t *bestIndex,   /* (o) Index that corresponds to
                                    maximum criteria (in this
                                    vector) */
     int32_t *bestCrit,   /* (o) Value of critera for the
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.c b/webrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.c
index 6fdec27..fc27ea9 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.c
@@ -23,13 +23,13 @@
 void WebRtcIlbcfix_CbUpdateBestIndex(
     int32_t CritNew,    /* (i) New Potentially best Criteria */
     int16_t CritNewSh,   /* (i) Shift value of above Criteria */
-    int16_t IndexNew,   /* (i) Index of new Criteria */
+    size_t IndexNew,   /* (i) Index of new Criteria */
     int32_t cDotNew,    /* (i) Cross dot of new index */
     int16_t invEnergyNew,  /* (i) Inversed energy new index */
     int16_t energyShiftNew,  /* (i) Energy shifts of new index */
     int32_t *CritMax,   /* (i/o) Maximum Criteria (so far) */
     int16_t *shTotMax,   /* (i/o) Shifts of maximum criteria */
-    int16_t *bestIndex,   /* (i/o) Index that corresponds to
+    size_t *bestIndex,   /* (i/o) Index that corresponds to
                                                    maximum criteria */
     int16_t *bestGain)   /* (i/o) Gain in Q14 that corresponds
                                                    to maximum criteria */
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.h b/webrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.h
index e8519d4..a20fa38 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.h
@@ -24,13 +24,13 @@
 void WebRtcIlbcfix_CbUpdateBestIndex(
     int32_t CritNew,    /* (i) New Potentially best Criteria */
     int16_t CritNewSh,   /* (i) Shift value of above Criteria */
-    int16_t IndexNew,   /* (i) Index of new Criteria */
+    size_t IndexNew,   /* (i) Index of new Criteria */
     int32_t cDotNew,    /* (i) Cross dot of new index */
     int16_t invEnergyNew,  /* (i) Inversed energy new index */
     int16_t energyShiftNew,  /* (i) Energy shifts of new index */
     int32_t *CritMax,   /* (i/o) Maximum Criteria (so far) */
     int16_t *shTotMax,   /* (i/o) Shifts of maximum criteria */
-    int16_t *bestIndex,   /* (i/o) Index that corresponds to
+    size_t *bestIndex,   /* (i/o) Index that corresponds to
                                    maximum criteria */
     int16_t *bestGain);   /* (i/o) Gain in Q14 that corresponds
                                    to maximum criteria */
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/comp_corr.c b/webrtc/modules/audio_coding/codecs/ilbc/comp_corr.c
index a53e8a7..7653cb0 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/comp_corr.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/comp_corr.c
@@ -27,9 +27,9 @@
     int32_t *corr, /* (o) cross correlation */
     int32_t *ener, /* (o) energy */
     int16_t *buffer, /* (i) signal buffer */
-    int16_t lag,  /* (i) pitch lag */
-    int16_t bLen, /* (i) length of buffer */
-    int16_t sRange, /* (i) correlation search length */
+    size_t lag,  /* (i) pitch lag */
+    size_t bLen, /* (i) length of buffer */
+    size_t sRange, /* (i) correlation search length */
     int16_t scale /* (i) number of rightshifts to use */
                             ){
   int16_t *w16ptr;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/comp_corr.h b/webrtc/modules/audio_coding/codecs/ilbc/comp_corr.h
index 4ff80aa..ab78c72 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/comp_corr.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/comp_corr.h
@@ -30,9 +30,9 @@
     int32_t *corr, /* (o) cross correlation */
     int32_t *ener, /* (o) energy */
     int16_t *buffer, /* (i) signal buffer */
-    int16_t lag,  /* (i) pitch lag */
-    int16_t bLen, /* (i) length of buffer */
-    int16_t sRange, /* (i) correlation search length */
+    size_t lag,  /* (i) pitch lag */
+    size_t bLen, /* (i) length of buffer */
+    size_t sRange, /* (i) correlation search length */
     int16_t scale /* (i) number of rightshifts to use */
                             );
 
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/constants.c b/webrtc/modules/audio_coding/codecs/ilbc/constants.c
index 1d384b7..f726ae2 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/constants.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/constants.c
@@ -593,10 +593,10 @@
 
 /* Ranges for search and filters at different subframes */
 
-const int16_t WebRtcIlbcfix_kSearchRange[5][CB_NSTAGES]={
+const size_t WebRtcIlbcfix_kSearchRange[5][CB_NSTAGES]={
   {58,58,58}, {108,44,44}, {108,108,108}, {108,108,108}, {108,108,108}};
 
-const int16_t WebRtcIlbcfix_kFilterRange[5]={63, 85, 125, 147, 147};
+const size_t WebRtcIlbcfix_kFilterRange[5]={63, 85, 125, 147, 147};
 
 /* Gain Quantization for the codebook gains of the 3 stages */
 
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/constants.h b/webrtc/modules/audio_coding/codecs/ilbc/constants.h
index ff6370e..1f4de4d 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/constants.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/constants.h
@@ -61,8 +61,8 @@
 
 /* Ranges for search and filters at different subframes */
 
-extern const int16_t WebRtcIlbcfix_kSearchRange[5][CB_NSTAGES];
-extern const int16_t WebRtcIlbcfix_kFilterRange[];
+extern const size_t WebRtcIlbcfix_kSearchRange[5][CB_NSTAGES];
+extern const size_t WebRtcIlbcfix_kFilterRange[];
 
 /* gain quantization tables */
 
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.c b/webrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.c
index 965cbe0..8ae28ac 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.c
@@ -25,12 +25,12 @@
  *----------------------------------------------------------------*/
 
 void WebRtcIlbcfix_CreateAugmentedVec(
-    int16_t index,  /* (i) Index for the augmented vector to be created */
+    size_t index,  /* (i) Index for the augmented vector to be created */
     int16_t *buffer,  /* (i) Pointer to the end of the codebook memory that
                                            is used for creation of the augmented codebook */
     int16_t *cbVec  /* (o) The construced codebook vector */
                                       ) {
-  int16_t ilow;
+  size_t ilow;
   int16_t *ppo, *ppi;
   int16_t cbVecTmp[4];
 
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.h b/webrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.h
index e3c3c7b..430dfe9 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.h
@@ -27,7 +27,7 @@
  *----------------------------------------------------------------*/
 
 void WebRtcIlbcfix_CreateAugmentedVec(
-    int16_t index,  /* (i) Index for the augmented vector to be created */
+    size_t index,  /* (i) Index for the augmented vector to be created */
     int16_t *buffer,  /* (i) Pointer to the end of the codebook memory that
                                            is used for creation of the augmented codebook */
     int16_t *cbVec  /* (o) The construced codebook vector */
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/decode.c b/webrtc/modules/audio_coding/codecs/ilbc/decode.c
index 9918de2..4c8497a 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/decode.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/decode.c
@@ -44,7 +44,7 @@
     int16_t mode      /* (i) 0: bad packet, PLC,
                                                                    1: normal */
                            ) {
-  int i;
+  size_t i;
   int16_t order_plus_one;
 
   int16_t last_bit;
@@ -106,7 +106,7 @@
       WebRtcIlbcfix_DoThePlc(
           PLCresidual, PLClpc, 0, decresidual,
           syntdenum + (LPC_FILTERORDER + 1) * (iLBCdec_inst->nsub - 1),
-          (int16_t)(iLBCdec_inst->last_lag), iLBCdec_inst);
+          iLBCdec_inst->last_lag, iLBCdec_inst);
 
       /* Use the output from doThePLC */
       WEBRTC_SPL_MEMCPY_W16(decresidual, PLCresidual, iLBCdec_inst->blockl);
@@ -122,7 +122,7 @@
     /* packet loss conceal */
 
     WebRtcIlbcfix_DoThePlc(PLCresidual, PLClpc, 1, decresidual, syntdenum,
-                           (int16_t)(iLBCdec_inst->last_lag), iLBCdec_inst);
+                           iLBCdec_inst->last_lag, iLBCdec_inst);
 
     WEBRTC_SPL_MEMCPY_W16(decresidual, PLCresidual, iLBCdec_inst->blockl);
 
@@ -188,18 +188,18 @@
     WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->syntMem, &data[iLBCdec_inst->blockl-LPC_FILTERORDER], LPC_FILTERORDER);
 
   } else { /* Enhancer not activated */
-    int16_t lag;
+    size_t lag;
 
     /* Find last lag (since the enhancer is not called to give this info) */
     lag = 20;
     if (iLBCdec_inst->mode==20) {
-      lag = (int16_t)WebRtcIlbcfix_XcorrCoef(
+      lag = WebRtcIlbcfix_XcorrCoef(
           &decresidual[iLBCdec_inst->blockl-60],
           &decresidual[iLBCdec_inst->blockl-60-lag],
           60,
           80, lag, -1);
     } else {
-      lag = (int16_t)WebRtcIlbcfix_XcorrCoef(
+      lag = WebRtcIlbcfix_XcorrCoef(
           &decresidual[iLBCdec_inst->blockl-ENH_BLOCKL],
           &decresidual[iLBCdec_inst->blockl-ENH_BLOCKL-lag],
           ENH_BLOCKL,
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/decode_residual.c b/webrtc/modules/audio_coding/codecs/ilbc/decode_residual.c
index de42ea9..b8a067e 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/decode_residual.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/decode_residual.c
@@ -41,8 +41,8 @@
     int16_t *syntdenum   /* (i) the decoded synthesis filter
                                   coefficients */
                                   ) {
-  int16_t meml_gotten, diff, start_pos;
-  int16_t subcount, subframe;
+  size_t meml_gotten, diff, start_pos;
+  size_t subcount, subframe;
   int16_t *reverseDecresidual = iLBCdec_inst->enh_buf; /* Reversed decoded data, used for decoding backwards in time (reuse memory in state) */
   int16_t *memVec = iLBCdec_inst->prevResidual;  /* Memory for codebook and filter state (reuse memory in state) */
   int16_t *mem = &memVec[CB_HALFFILTERLEN];   /* Memory for codebook */
@@ -118,7 +118,7 @@
 
     /* loop over subframes to encode */
 
-    int16_t Nfor = iLBCdec_inst->nsub - iLBC_encbits->startIdx - 1;
+    size_t Nfor = iLBCdec_inst->nsub - iLBC_encbits->startIdx - 1;
     for (subframe=0; subframe<Nfor; subframe++) {
 
       /* construct decoded vector */
@@ -156,7 +156,7 @@
 
     /* loop over subframes to decode */
 
-    int16_t Nback = iLBC_encbits->startIdx - 1;
+    size_t Nback = iLBC_encbits->startIdx - 1;
     for (subframe=0; subframe<Nback; subframe++) {
 
       /* construct decoded vector */
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/decoder_interpolate_lsf.c b/webrtc/modules/audio_coding/codecs/ilbc/decoder_interpolate_lsf.c
index fad8170..06ab2e7 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/decoder_interpolate_lsf.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/decoder_interpolate_lsf.c
@@ -34,7 +34,8 @@
     IlbcDecoder *iLBCdec_inst
     /* (i) the decoder state structure */
                                           ){
-  int  i, pos, lp_length;
+  size_t i;
+  int pos, lp_length;
   int16_t  lp[LPC_FILTERORDER + 1], *lsfdeq2;
 
   lsfdeq2 = lsfdeq + length;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/defines.h b/webrtc/modules/audio_coding/codecs/ilbc/defines.h
index 2d37e52..5fcd4a0 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/defines.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/defines.h
@@ -121,11 +121,11 @@
   int16_t lsf[LSF_NSPLIT*LPC_N_MAX];
   int16_t cb_index[CB_NSTAGES*(NASUB_MAX+1)];  /* First CB_NSTAGES values contains extra CB index */
   int16_t gain_index[CB_NSTAGES*(NASUB_MAX+1)]; /* First CB_NSTAGES values contains extra CB gain */
-  int16_t idxForMax;
+  size_t idxForMax;
   int16_t state_first;
   int16_t idxVec[STATE_SHORT_LEN_30MS];
   int16_t firstbits;
-  int16_t startIdx;
+  size_t startIdx;
 } iLBC_bits;
 
 /* type definition encoder instance */
@@ -135,12 +135,12 @@
   int16_t mode;
 
   /* basic parameters for different frame sizes */
-  int16_t blockl;
-  int16_t nsub;
+  size_t blockl;
+  size_t nsub;
   int16_t nasub;
-  int16_t no_of_bytes, no_of_words;
+  size_t no_of_bytes, no_of_words;
   int16_t lpc_n;
-  int16_t state_short_len;
+  size_t state_short_len;
 
   /* analysis filter state */
   int16_t anaMem[LPC_FILTERORDER];
@@ -164,7 +164,7 @@
   int16_t Nfor_flag;
   int16_t Nback_flag;
   int16_t start_pos;
-  int16_t diff;
+  size_t diff;
 #endif
 
 } IlbcEncoder;
@@ -176,12 +176,12 @@
   int16_t mode;
 
   /* basic parameters for different frame sizes */
-  int16_t blockl;
-  int16_t nsub;
+  size_t blockl;
+  size_t nsub;
   int16_t nasub;
-  int16_t no_of_bytes, no_of_words;
+  size_t no_of_bytes, no_of_words;
   int16_t lpc_n;
-  int16_t state_short_len;
+  size_t state_short_len;
 
   /* synthesis filter state */
   int16_t syntMem[LPC_FILTERORDER];
@@ -190,14 +190,15 @@
   int16_t lsfdeqold[LPC_FILTERORDER];
 
   /* pitch lag estimated in enhancer and used in PLC */
-  int last_lag;
+  size_t last_lag;
 
   /* PLC state information */
   int consPLICount, prev_enh_pl;
   int16_t perSquare;
 
   int16_t prevScale, prevPLI;
-  int16_t prevLag, prevLpc[LPC_FILTERORDER+1];
+  size_t prevLag;
+  int16_t prevLpc[LPC_FILTERORDER+1];
   int16_t prevResidual[NSUB_MAX*SUBL];
   int16_t seed;
 
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/do_plc.c b/webrtc/modules/audio_coding/codecs/ilbc/do_plc.c
index b313b58..f74439e 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/do_plc.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/do_plc.c
@@ -33,18 +33,19 @@
                                                            0 - no PL, 1 = PL */
     int16_t *decresidual,  /* (i) decoded residual */
     int16_t *lpc,    /* (i) decoded LPC (only used for no PL) */
-    int16_t inlag,    /* (i) pitch lag */
+    size_t inlag,    /* (i) pitch lag */
     IlbcDecoder *iLBCdec_inst
     /* (i/o) decoder instance */
                             ){
-  int16_t i;
+  size_t i;
   int32_t cross, ener, cross_comp, ener_comp = 0;
   int32_t measure, maxMeasure, energy;
   int16_t max, crossSquareMax, crossSquare;
-  int16_t j, lag, tmp1, tmp2, randlag;
+  size_t j, lag, randlag;
+  int16_t tmp1, tmp2;
   int16_t shift1, shift2, shift3, shiftMax;
   int16_t scale3;
-  int16_t corrLen;
+  size_t corrLen;
   int32_t tmpW32, tmp2W32;
   int16_t use_gain;
   int16_t tot_gain;
@@ -54,7 +55,7 @@
   int32_t nom;
   int16_t denom;
   int16_t pitchfact;
-  int16_t use_lag;
+  size_t use_lag;
   int ind;
   int16_t randvec[BLOCKL_MAX];
 
@@ -71,7 +72,7 @@
       /* Maximum 60 samples are correlated, preserve as high accuracy
          as possible without getting overflow */
       max = WebRtcSpl_MaxAbsValueW16((*iLBCdec_inst).prevResidual,
-                                     (int16_t)iLBCdec_inst->blockl);
+                                     iLBCdec_inst->blockl);
       scale3 = (WebRtcSpl_GetSizeInBits(max)<<1) - 25;
       if (scale3 < 0) {
         scale3 = 0;
@@ -86,7 +87,7 @@
       lag = inlag - 3;
 
       /* Guard against getting outside the frame */
-      corrLen = WEBRTC_SPL_MIN(60, iLBCdec_inst->blockl-(inlag+3));
+      corrLen = (size_t)WEBRTC_SPL_MIN(60, iLBCdec_inst->blockl-(inlag+3));
 
       WebRtcIlbcfix_CompCorr( &cross, &ener,
                               iLBCdec_inst->prevResidual, lag, iLBCdec_inst->blockl, corrLen, scale3);
@@ -234,7 +235,7 @@
 
       /* noise component -  52 < randlagFIX < 117 */
       iLBCdec_inst->seed = (int16_t)(iLBCdec_inst->seed * 31821 + 13849);
-      randlag = 53 + (int16_t)(iLBCdec_inst->seed & 63);
+      randlag = 53 + (iLBCdec_inst->seed & 63);
       if (randlag > i) {
         randvec[i] =
             iLBCdec_inst->prevResidual[iLBCdec_inst->blockl + i - randlag];
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/do_plc.h b/webrtc/modules/audio_coding/codecs/ilbc/do_plc.h
index c55b815..38b8fdb 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/do_plc.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/do_plc.h
@@ -33,7 +33,7 @@
                                                            0 - no PL, 1 = PL */
     int16_t *decresidual,  /* (i) decoded residual */
     int16_t *lpc,    /* (i) decoded LPC (only used for no PL) */
-    int16_t inlag,    /* (i) pitch lag */
+    size_t inlag,    /* (i) pitch lag */
     IlbcDecoder *iLBCdec_inst
     /* (i/o) decoder instance */
                             );
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/encode.c b/webrtc/modules/audio_coding/codecs/ilbc/encode.c
index 114ce1f..812ec8d 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/encode.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/encode.c
@@ -48,11 +48,11 @@
     IlbcEncoder *iLBCenc_inst /* (i/o) the general encoder
                                      state */
                           ){
-  int n, meml_gotten, Nfor;
-  int16_t diff, start_pos;
-  int index;
-  int subcount, subframe;
-  int16_t start_count, end_count;
+  size_t n, meml_gotten, Nfor;
+  size_t diff, start_pos;
+  size_t index;
+  size_t subcount, subframe;
+  size_t start_count, end_count;
   int16_t *residual;
   int32_t en1, en2;
   int16_t scale, max;
@@ -86,7 +86,7 @@
 #ifdef SPLIT_10MS
 
   WebRtcSpl_MemSetW16 (  (int16_t *) iLBCbits_inst, 0,
-                         (int16_t) (sizeof(iLBC_bits) / sizeof(int16_t))  );
+                         sizeof(iLBC_bits) / sizeof(int16_t)  );
 
   start_pos = iLBCenc_inst->start_pos;
   diff = iLBCenc_inst->diff;
@@ -317,17 +317,17 @@
       if (iLBCenc_inst->section == 1)
       {
         start_count = 0;
-        end_count = WEBRTC_SPL_MIN (Nfor, 2);
+        end_count = WEBRTC_SPL_MIN (Nfor, (size_t)2);
       }
       if (iLBCenc_inst->section == 2)
       {
-        start_count = WEBRTC_SPL_MIN (Nfor, 2);
+        start_count = WEBRTC_SPL_MIN (Nfor, (size_t)2);
         end_count = Nfor;
       }
     }
 #else
     start_count = 0;
-    end_count = (int16_t)Nfor;
+    end_count = Nfor;
 #endif
 
     /* loop over subframes to encode */
@@ -341,7 +341,7 @@
                              &residual[(iLBCbits_inst->startIdx+1+subframe)*SUBL],
                              mem, MEM_LF_TBL, SUBL,
                              &weightdenum[(iLBCbits_inst->startIdx+1+subframe)*(LPC_FILTERORDER+1)],
-                             (int16_t)subcount);
+                             subcount);
 
       /* construct decoded vector */
 
@@ -386,7 +386,7 @@
        contained in the same vector as the residual)
     */
 
-    int Nback = iLBCbits_inst->startIdx - 1;
+    size_t Nback = iLBCbits_inst->startIdx - 1;
     WebRtcSpl_MemCpyReversedOrder(&reverseResidual[Nback*SUBL-1], residual, Nback*SUBL);
 
     /* setup memory */
@@ -434,7 +434,7 @@
     }
 #else
     start_count = 0;
-    end_count = (int16_t)Nback;
+    end_count = Nback;
 #endif
 
     /* loop over subframes to encode */
@@ -447,7 +447,7 @@
                              iLBCbits_inst->gain_index+subcount*CB_NSTAGES, &reverseResidual[subframe*SUBL],
                              mem, MEM_LF_TBL, SUBL,
                              &weightdenum[(iLBCbits_inst->startIdx-2-subframe)*(LPC_FILTERORDER+1)],
-                             (int16_t)subcount);
+                             subcount);
 
       /* construct decoded vector */
 
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/energy_inverse.c b/webrtc/modules/audio_coding/codecs/ilbc/energy_inverse.c
index a6b1c75..b2bdcff 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/energy_inverse.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/energy_inverse.c
@@ -23,12 +23,12 @@
 void WebRtcIlbcfix_EnergyInverse(
     int16_t *energy,    /* (i/o) Energy and inverse
                                                            energy (in Q29) */
-    int noOfEnergies)  /* (i)   The length of the energy
+    size_t noOfEnergies)  /* (i)   The length of the energy
                                    vector */
 {
   int32_t Nom=(int32_t)0x1FFFFFFF;
   int16_t *energyPtr;
-  int i;
+  size_t i;
 
   /* Set the minimum energy value to 16384 to avoid overflow */
   energyPtr=energy;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/energy_inverse.h b/webrtc/modules/audio_coding/codecs/ilbc/energy_inverse.h
index 7bb6721..fe25094 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/energy_inverse.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/energy_inverse.h
@@ -26,7 +26,7 @@
 void WebRtcIlbcfix_EnergyInverse(
     int16_t *energy,     /* (i/o) Energy and inverse
                                                                    energy (in Q29) */
-    int noOfEnergies);   /* (i)   The length of the energy
+    size_t noOfEnergies);   /* (i)   The length of the energy
                                    vector */
 
 #endif
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/enhancer.c b/webrtc/modules/audio_coding/codecs/ilbc/enhancer.c
index 38c3de3..5683597 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/enhancer.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/enhancer.c
@@ -33,7 +33,7 @@
     int16_t centerStartPos, /* (i) first sample current block within idata */
     int16_t *period,   /* (i) pitch period array (pitch bward-in time) */
     int16_t *plocs,   /* (i) locations where period array values valid */
-    int16_t periodl   /* (i) dimension of period and plocs */
+    size_t periodl   /* (i) dimension of period and plocs */
                             ){
   /* Stack based */
   int16_t surround[ENH_BLOCKL];
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/enhancer.h b/webrtc/modules/audio_coding/codecs/ilbc/enhancer.h
index 83f48b0..78a12d3 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/enhancer.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/enhancer.h
@@ -33,7 +33,7 @@
     int16_t centerStartPos, /* (i) first sample current block within idata */
     int16_t *period,   /* (i) pitch period array (pitch bward-in time) */
     int16_t *plocs,   /* (i) locations where period array values valid */
-    int16_t periodl   /* (i) dimension of period and plocs */
+    size_t periodl   /* (i) dimension of period and plocs */
                             );
 
 #endif
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.c b/webrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.c
index c630dd5..f15aee6 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.c
@@ -30,19 +30,21 @@
  * interface for enhancer
  *---------------------------------------------------------------*/
 
-int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
+size_t WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
     int16_t *out,     /* (o) enhanced signal */
     int16_t *in,      /* (i) unenhanced signal */
     IlbcDecoder *iLBCdec_inst /* (i) buffers etc */
                                         ){
   int iblock;
-  int lag=20, tlag=20;
-  int inLen=iLBCdec_inst->blockl+120;
-  int16_t scale, scale1, plc_blockl;
+  size_t lag=20, tlag=20;
+  size_t inLen=iLBCdec_inst->blockl+120;
+  int16_t scale, scale1;
+  size_t plc_blockl;
   int16_t *enh_buf, *enh_period;
   int32_t tmp1, tmp2, max, new_blocks;
   int16_t *enh_bufPtr1;
-  int i, k;
+  size_t i;
+  int k;
   int16_t EnChange;
   int16_t SqrtEnChange;
   int16_t inc;
@@ -56,7 +58,8 @@
   int32_t ener;
   int16_t enerSh;
   int16_t corrSh;
-  int16_t ind, sh;
+  size_t ind;
+  int16_t sh;
   int16_t start, stop;
   /* Stack based */
   int16_t totsh[3];
@@ -168,7 +171,7 @@
       }
     }
 
-    lag = lagmax[ind] + 10;
+    lag = (size_t)(lagmax[ind] + 10);
 
     /* Store the estimated lag in the non-downsampled domain */
     enh_period[ENH_NBLOCKS_TOT - new_blocks + iblock] = (int16_t)(lag * 8);
@@ -224,7 +227,7 @@
             (plc_blockl-lag));
       }
     } else {
-      int pos;
+      size_t pos;
 
       pos = plc_blockl;
 
@@ -280,8 +283,8 @@
 
 
         /* Multiply first part of vector with 2*SqrtEnChange */
-        WebRtcSpl_ScaleVector(plc_pred, plc_pred, SqrtEnChange,
-                              (int16_t)(plc_blockl-16), 14);
+        WebRtcSpl_ScaleVector(plc_pred, plc_pred, SqrtEnChange, plc_blockl-16,
+                              14);
 
         /* Calculate increase parameter for window part (16 last samples) */
         /* (1-2*SqrtEnChange)/16 in Q15 */
@@ -343,7 +346,7 @@
                             LPC_FILTERORDER);
       WebRtcIlbcfix_HpOutput(synt, (int16_t*)WebRtcIlbcfix_kHpOutCoefs,
                              iLBCdec_inst->hpimemy, iLBCdec_inst->hpimemx,
-                             (int16_t)lag);
+                             lag);
       WebRtcSpl_FilterARFastQ12(
           enh_bufPtr1, synt,
           &iLBCdec_inst->old_syntdenum[
@@ -354,7 +357,7 @@
                             LPC_FILTERORDER);
       WebRtcIlbcfix_HpOutput(synt, (int16_t*)WebRtcIlbcfix_kHpOutCoefs,
                              iLBCdec_inst->hpimemy, iLBCdec_inst->hpimemx,
-                             (int16_t)lag);
+                             lag);
     }
   }
 
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.h b/webrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.h
index fa58b7a..61efd22 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.h
@@ -25,7 +25,7 @@
  * interface for enhancer
  *---------------------------------------------------------------*/
 
-int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
+size_t WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
     int16_t *out,     /* (o) enhanced signal */
     int16_t *in,      /* (i) unenhanced signal */
     IlbcDecoder *iLBCdec_inst /* (i) buffers etc */
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.c b/webrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.c
index aa8170c..04d17a6 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.c
@@ -29,8 +29,8 @@
     int16_t *cbvectors, /* (o) Codebook vector for the higher section */
     int16_t *CBmem,  /* (i) Codebook memory that is filtered to create a
                                            second CB section */
-    int lMem,  /* (i) Length of codebook memory */
-    int16_t samples    /* (i) Number of samples to filter */
+    size_t lMem,  /* (i) Length of codebook memory */
+    size_t samples    /* (i) Number of samples to filter */
                                   ) {
 
   /* Set up the memory, start with zero state */
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.h b/webrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.h
index 99e89a0..d23b25c 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.h
@@ -31,8 +31,8 @@
     int16_t *cbvectors, /* (o) Codebook vector for the higher section */
     int16_t *CBmem,  /* (i) Codebook memory that is filtered to create a
                                            second CB section */
-    int lMem,  /* (i) Length of codebook memory */
-    int16_t samples    /* (i) Number of samples to filter */
+    size_t lMem,  /* (i) Length of codebook memory */
+    size_t samples    /* (i) Number of samples to filter */
                                   );
 
 #endif
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/frame_classify.c b/webrtc/modules/audio_coding/codecs/ilbc/frame_classify.c
index 6a68dec..f442f6a 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/frame_classify.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/frame_classify.c
@@ -23,7 +23,7 @@
  *  Classification of subframes to localize start state
  *---------------------------------------------------------------*/
 
-int16_t WebRtcIlbcfix_FrameClassify(
+size_t WebRtcIlbcfix_FrameClassify(
     /* (o) Index to the max-energy sub frame */
     IlbcEncoder *iLBCenc_inst,
     /* (i/o) the encoder state structure */
@@ -35,8 +35,8 @@
   int32_t *seqEnPtr;
   int32_t maxW32;
   int16_t scale1;
-  int16_t pos;
-  int n;
+  size_t pos;
+  size_t n;
 
   /*
     Calculate the energy of each of the 80 sample blocks
@@ -82,7 +82,7 @@
   }
 
   /* Extract the best choise of start state */
-  pos = WebRtcSpl_MaxIndexW32(ssqEn, iLBCenc_inst->nsub - 1) + 1;
+  pos = (size_t)WebRtcSpl_MaxIndexW32(ssqEn, iLBCenc_inst->nsub - 1) + 1;
 
   return(pos);
 }
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/frame_classify.h b/webrtc/modules/audio_coding/codecs/ilbc/frame_classify.h
index b32e2c8..99f7144 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/frame_classify.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/frame_classify.h
@@ -19,7 +19,7 @@
 #ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_FRAME_CLASSIFY_H_
 #define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_FRAME_CLASSIFY_H_
 
-int16_t WebRtcIlbcfix_FrameClassify(
+size_t WebRtcIlbcfix_FrameClassify(
     /* (o) Index to the max-energy sub frame */
     IlbcEncoder *iLBCenc_inst,
     /* (i/o) the encoder state structure */
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.c b/webrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.c
index cf05ce3..d7c2e75 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.c
@@ -27,12 +27,12 @@
 void WebRtcIlbcfix_GetCbVec(
     int16_t *cbvec,   /* (o) Constructed codebook vector */
     int16_t *mem,   /* (i) Codebook buffer */
-    int16_t index,   /* (i) Codebook index */
-    int16_t lMem,   /* (i) Length of codebook buffer */
-    int16_t cbveclen   /* (i) Codebook vector length */
+    size_t index,   /* (i) Codebook index */
+    size_t lMem,   /* (i) Length of codebook buffer */
+    size_t cbveclen   /* (i) Codebook vector length */
                             ){
-  int16_t k, base_size;
-  int16_t lag;
+  size_t k, base_size;
+  size_t lag;
   /* Stack based */
   int16_t tempbuff2[SUBL+5];
 
@@ -58,7 +58,7 @@
 
     /* Calculate lag */
 
-    k = (int16_t)(2 * (index - (lMem - cbveclen + 1))) + cbveclen;
+    k = (2 * (index - (lMem - cbveclen + 1))) + cbveclen;
 
     lag = k / 2;
 
@@ -70,7 +70,7 @@
 
   else {
 
-    int16_t memIndTest;
+    size_t memIndTest;
 
     /* first non-interpolated vectors */
 
@@ -100,7 +100,7 @@
       /* do filtering */
       WebRtcSpl_FilterMAFastQ12(
           &mem[memIndTest+7], tempbuff2, (int16_t*)WebRtcIlbcfix_kCbFiltersRev,
-          CB_FILTERLEN, (int16_t)(cbveclen+5));
+          CB_FILTERLEN, cbveclen+5);
 
       /* Calculate lag index */
       lag = (cbveclen<<1)-20+index-base_size-lMem-1;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.h b/webrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.h
index 1c5ac8f..07f67a2 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.h
@@ -22,9 +22,9 @@
 void WebRtcIlbcfix_GetCbVec(
     int16_t *cbvec,   /* (o) Constructed codebook vector */
     int16_t *mem,   /* (i) Codebook buffer */
-    int16_t index,   /* (i) Codebook index */
-    int16_t lMem,   /* (i) Length of codebook buffer */
-    int16_t cbveclen   /* (i) Codebook vector length */
+    size_t index,   /* (i) Codebook index */
+    size_t lMem,   /* (i) Length of codebook buffer */
+    size_t cbveclen   /* (i) Codebook vector length */
                             );
 
 #endif
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.c b/webrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.c
index 480ed7c..66dfafb 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.c
@@ -31,12 +31,13 @@
     int16_t centerStartPos, /* (i) where current block starts */
     int16_t *period,   /* (i) rough-pitch-period array       (Q-2) */
     int16_t *plocs,   /* (i) where periods of period array are taken (Q-2) */
-    int16_t periodl,   /* (i) dimension period array */
+    size_t periodl,   /* (i) dimension period array */
     int16_t hl,    /* (i) 2*hl+1 is the number of sequences */
     int16_t *surround  /* (i/o) The contribution from this sequence
                                 summed with earlier contributions */
                               ){
-  int16_t i,centerEndPos,q;
+  size_t i;
+  int16_t centerEndPos,q;
   /* Stack based */
   int16_t lagBlock[2*ENH_HL+1];
   int16_t blockStartPos[2*ENH_HL+1]; /* Defines the position to search around (Q2) */
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.h b/webrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.h
index f9b08b7..5b59f98 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.h
@@ -31,7 +31,7 @@
     int16_t centerStartPos, /* (i) where current block starts */
     int16_t *period,   /* (i) rough-pitch-period array       (Q-2) */
     int16_t *plocs,   /* (i) where periods of period array are taken (Q-2) */
-    int16_t periodl,   /* (i) dimension period array */
+    size_t periodl,   /* (i) dimension period array */
     int16_t hl,    /* (i) 2*hl+1 is the number of sequences */
     int16_t *surround  /* (i/o) The contribution from this sequence
                                 summed with earlier contributions */
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/hp_input.c b/webrtc/modules/audio_coding/codecs/ilbc/hp_input.c
index 260591e..5d8a860 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/hp_input.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/hp_input.c
@@ -30,9 +30,9 @@
     int16_t *y,      /* (i/o) Filter state yhi[n-1] ylow[n-1]
                                                                    yhi[n-2] ylow[n-2] */
     int16_t *x,      /* (i/o) Filter state x[n-1] x[n-2] */
-    int16_t len)      /* (i)   Number of samples to filter */
+    size_t len)      /* (i)   Number of samples to filter */
 {
-  int i;
+  size_t i;
   int32_t tmpW32;
   int32_t tmpW32b;
 
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/hp_input.h b/webrtc/modules/audio_coding/codecs/ilbc/hp_input.h
index a30f703..acdfa91 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/hp_input.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/hp_input.h
@@ -29,6 +29,6 @@
     int16_t *y,      /* (i/o) Filter state yhi[n-1] ylow[n-1]
                                                                    yhi[n-2] ylow[n-2] */
     int16_t *x,      /* (i/o) Filter state x[n-1] x[n-2] */
-    int16_t len);     /* (i)   Number of samples to filter */
+    size_t len);     /* (i)   Number of samples to filter */
 
 #endif
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/hp_output.c b/webrtc/modules/audio_coding/codecs/ilbc/hp_output.c
index 3abb427..bd101bf 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/hp_output.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/hp_output.c
@@ -30,9 +30,9 @@
     int16_t *y,      /* (i/o) Filter state yhi[n-1] ylow[n-1]
                                                                    yhi[n-2] ylow[n-2] */
     int16_t *x,      /* (i/o) Filter state x[n-1] x[n-2] */
-    int16_t len)      /* (i)   Number of samples to filter */
+    size_t len)      /* (i)   Number of samples to filter */
 {
-  int i;
+  size_t i;
   int32_t tmpW32;
   int32_t tmpW32b;
 
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/hp_output.h b/webrtc/modules/audio_coding/codecs/ilbc/hp_output.h
index 7937ba0..1840b681 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/hp_output.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/hp_output.h
@@ -29,6 +29,6 @@
     int16_t *y,      /* (i/o) Filter state yhi[n-1] ylow[n-1]
                               yhi[n-2] ylow[n-2] */
     int16_t *x,      /* (i/o) Filter state x[n-1] x[n-2] */
-    int16_t len);      /* (i)   Number of samples to filter */
+    size_t len);      /* (i)   Number of samples to filter */
 
 #endif
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/ilbc.c b/webrtc/modules/audio_coding/codecs/ilbc/ilbc.c
index e41c095..c565a24 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/ilbc.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/ilbc.c
@@ -90,10 +90,10 @@
 
 int WebRtcIlbcfix_Encode(IlbcEncoderInstance* iLBCenc_inst,
                          const int16_t* speechIn,
-                         int16_t len,
+                         size_t len,
                          uint8_t* encoded) {
-  int16_t pos = 0;
-  int16_t encpos = 0;
+  size_t pos = 0;
+  size_t encpos = 0;
 
   if ((len != ((IlbcEncoder*)iLBCenc_inst)->blockl) &&
 #ifdef SPLIT_10MS
@@ -118,7 +118,7 @@
 #endif
       encpos += ((IlbcEncoder*)iLBCenc_inst)->no_of_words;
     }
-    return (encpos*2);
+    return (int)(encpos*2);
   }
 }
 
@@ -143,11 +143,11 @@
 
 int WebRtcIlbcfix_Decode(IlbcDecoderInstance* iLBCdec_inst,
                          const uint8_t* encoded,
-                         int16_t len,
+                         size_t len,
                          int16_t* decoded,
                          int16_t* speechType)
 {
-  int i=0;
+  size_t i=0;
   /* Allow for automatic switching between the frame sizes
      (although you do get some discontinuity) */
   if ((len==((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)||
@@ -191,16 +191,16 @@
   }
   /* iLBC does not support VAD/CNG yet */
   *speechType=1;
-  return(i*((IlbcDecoder*)iLBCdec_inst)->blockl);
+  return (int)(i*((IlbcDecoder*)iLBCdec_inst)->blockl);
 }
 
 int WebRtcIlbcfix_Decode20Ms(IlbcDecoderInstance* iLBCdec_inst,
                              const uint8_t* encoded,
-                             int16_t len,
+                             size_t len,
                              int16_t* decoded,
                              int16_t* speechType)
 {
-  int i=0;
+  size_t i=0;
   if ((len==((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)||
       (len==2*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)||
       (len==3*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)) {
@@ -219,16 +219,16 @@
   }
   /* iLBC does not support VAD/CNG yet */
   *speechType=1;
-  return(i*((IlbcDecoder*)iLBCdec_inst)->blockl);
+  return (int)(i*((IlbcDecoder*)iLBCdec_inst)->blockl);
 }
 
 int WebRtcIlbcfix_Decode30Ms(IlbcDecoderInstance* iLBCdec_inst,
                              const uint8_t* encoded,
-                             int16_t len,
+                             size_t len,
                              int16_t* decoded,
                              int16_t* speechType)
 {
-  int i=0;
+  size_t i=0;
   if ((len==((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)||
       (len==2*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)||
       (len==3*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)) {
@@ -247,13 +247,13 @@
   }
   /* iLBC does not support VAD/CNG yet */
   *speechType=1;
-  return(i*((IlbcDecoder*)iLBCdec_inst)->blockl);
+  return (int)(i*((IlbcDecoder*)iLBCdec_inst)->blockl);
 }
 
-int16_t WebRtcIlbcfix_DecodePlc(IlbcDecoderInstance* iLBCdec_inst,
-                                int16_t* decoded,
-                                int16_t noOfLostFrames) {
-  int i;
+size_t WebRtcIlbcfix_DecodePlc(IlbcDecoderInstance* iLBCdec_inst,
+                               int16_t* decoded,
+                               size_t noOfLostFrames) {
+  size_t i;
   uint16_t dummy;
 
   for (i=0;i<noOfLostFrames;i++) {
@@ -265,9 +265,9 @@
   return (noOfLostFrames*((IlbcDecoder*)iLBCdec_inst)->blockl);
 }
 
-int16_t WebRtcIlbcfix_NetEqPlc(IlbcDecoderInstance* iLBCdec_inst,
-                               int16_t* decoded,
-                               int16_t noOfLostFrames) {
+size_t WebRtcIlbcfix_NetEqPlc(IlbcDecoderInstance* iLBCdec_inst,
+                              int16_t* decoded,
+                              size_t noOfLostFrames) {
   /* Two input parameters not used, but needed for function pointers in NetEQ */
   (void)(decoded = NULL);
   (void)(noOfLostFrames = 0);
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/init_decode.c b/webrtc/modules/audio_coding/codecs/ilbc/init_decode.c
index 0659e50..1f92480 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/init_decode.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/init_decode.c
@@ -92,5 +92,5 @@
 
   iLBCdec_inst->prev_enh_pl = 0;
 
-  return (iLBCdec_inst->blockl);
+  return (int)(iLBCdec_inst->blockl);
 }
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/init_encode.c b/webrtc/modules/audio_coding/codecs/ilbc/init_encode.c
index 9c562db..f559d84 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/init_encode.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/init_encode.c
@@ -67,5 +67,5 @@
   iLBCenc_inst->section = 0;
 #endif
 
-  return (iLBCenc_inst->no_of_bytes);
+  return (int)(iLBCenc_inst->no_of_bytes);
 }
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/interface/audio_encoder_ilbc.h b/webrtc/modules/audio_coding/codecs/ilbc/interface/audio_encoder_ilbc.h
index b627c3a..c3cf4d8 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/interface/audio_encoder_ilbc.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/interface/audio_encoder_ilbc.h
@@ -36,8 +36,8 @@
   int SampleRateHz() const override;
   int NumChannels() const override;
   size_t MaxEncodedBytes() const override;
-  int Num10MsFramesInNextPacket() const override;
-  int Max10MsFramesInAPacket() const override;
+  size_t Num10MsFramesInNextPacket() const override;
+  size_t Max10MsFramesInAPacket() const override;
   int GetTargetBitrate() const override;
   EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
                              const int16_t* audio,
@@ -47,10 +47,10 @@
  private:
   size_t RequiredOutputSizeBytes() const;
 
-  static const int kMaxSamplesPerPacket = 480;
+  static const size_t kMaxSamplesPerPacket = 480;
   const int payload_type_;
-  const int num_10ms_frames_per_packet_;
-  int num_10ms_frames_buffered_;
+  const size_t num_10ms_frames_per_packet_;
+  size_t num_10ms_frames_buffered_;
   uint32_t first_timestamp_in_buffer_;
   int16_t input_buffer_[kMaxSamplesPerPacket];
   IlbcEncoderInstance* encoder_;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/interface/ilbc.h b/webrtc/modules/audio_coding/codecs/ilbc/interface/ilbc.h
index 4934968..be0b121 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/interface/ilbc.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/interface/ilbc.h
@@ -18,6 +18,8 @@
 #ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_INTERFACE_ILBC_H_
 #define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_INTERFACE_ILBC_H_
 
+#include <stddef.h>
+
 /*
  * Define the fixpoint numeric formats
  */
@@ -137,7 +139,7 @@
 
   int WebRtcIlbcfix_Encode(IlbcEncoderInstance *iLBCenc_inst,
                            const int16_t *speechIn,
-                           int16_t len,
+                           size_t len,
                            uint8_t* encoded);
 
   /****************************************************************************
@@ -182,17 +184,17 @@
 
   int WebRtcIlbcfix_Decode(IlbcDecoderInstance* iLBCdec_inst,
                            const uint8_t* encoded,
-                           int16_t len,
+                           size_t len,
                            int16_t* decoded,
                            int16_t* speechType);
   int WebRtcIlbcfix_Decode20Ms(IlbcDecoderInstance* iLBCdec_inst,
                                const uint8_t* encoded,
-                               int16_t len,
+                               size_t len,
                                int16_t* decoded,
                                int16_t* speechType);
   int WebRtcIlbcfix_Decode30Ms(IlbcDecoderInstance* iLBCdec_inst,
                                const uint8_t* encoded,
-                               int16_t len,
+                               size_t len,
                                int16_t* decoded,
                                int16_t* speechType);
 
@@ -210,13 +212,12 @@
    * Output:
    *      - decoded           : The "decoded" vector
    *
-   * Return value             : >0 - Samples in decoded PLC vector
-   *                            -1 - Error
+   * Return value             : Samples in decoded PLC vector
    */
 
-  int16_t WebRtcIlbcfix_DecodePlc(IlbcDecoderInstance *iLBCdec_inst,
-                                  int16_t *decoded,
-                                  int16_t noOfLostFrames);
+  size_t WebRtcIlbcfix_DecodePlc(IlbcDecoderInstance *iLBCdec_inst,
+                                 int16_t *decoded,
+                                 size_t noOfLostFrames);
 
   /****************************************************************************
    * WebRtcIlbcfix_NetEqPlc(...)
@@ -232,13 +233,12 @@
    * Output:
    *      - decoded           : The "decoded" vector (nothing in this case)
    *
-   * Return value             : >0 - Samples in decoded PLC vector
-   *                            -1 - Error
+   * Return value             : Samples in decoded PLC vector
    */
 
-  int16_t WebRtcIlbcfix_NetEqPlc(IlbcDecoderInstance *iLBCdec_inst,
-                                 int16_t *decoded,
-                                 int16_t noOfLostFrames);
+  size_t WebRtcIlbcfix_NetEqPlc(IlbcDecoderInstance *iLBCdec_inst,
+                                int16_t *decoded,
+                                size_t noOfLostFrames);
 
   /****************************************************************************
    * WebRtcIlbcfix_version(...)
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.c b/webrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.c
index 4957142..376dbbb 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.c
@@ -22,7 +22,7 @@
 void WebRtcIlbcfix_InterpolateSamples(
     int16_t *interpSamples, /* (o) The interpolated samples */
     int16_t *CBmem,   /* (i) The CB memory */
-    int16_t lMem    /* (i) Length of the CB memory */
+    size_t lMem    /* (i) Length of the CB memory */
                                       ) {
   int16_t *ppi, *ppo, i, j, temp1, temp2;
   int16_t *tmpPtr;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.h b/webrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.h
index 586c27d..7549d2c 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.h
@@ -28,7 +28,7 @@
 void WebRtcIlbcfix_InterpolateSamples(
     int16_t *interpSamples, /* (o) The interpolated samples */
     int16_t *CBmem,   /* (i) The CB memory */
-    int16_t lMem    /* (i) Length of the CB memory */
+    size_t lMem    /* (i) Length of the CB memory */
                                       );
 
 #endif
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/my_corr.c b/webrtc/modules/audio_coding/codecs/ilbc/my_corr.c
index 3261015..bd6ff56 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/my_corr.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/my_corr.c
@@ -25,11 +25,12 @@
 void WebRtcIlbcfix_MyCorr(
     int32_t* corr,  /* (o) correlation of seq1 and seq2 */
     const int16_t* seq1,  /* (i) first sequence */
-    int16_t dim1,  /* (i) dimension first seq1 */
+    size_t dim1,  /* (i) dimension first seq1 */
     const int16_t* seq2, /* (i) second sequence */
-    int16_t dim2   /* (i) dimension seq2 */
+    size_t dim2   /* (i) dimension seq2 */
                           ){
-  int16_t max, loops;
+  int16_t max;
+  size_t loops;
   int scale;
 
   /* Calculate correlation between the two sequences. Scale the
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/my_corr.h b/webrtc/modules/audio_coding/codecs/ilbc/my_corr.h
index a74dd1e..2149464 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/my_corr.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/my_corr.h
@@ -28,9 +28,9 @@
 void WebRtcIlbcfix_MyCorr(
     int32_t* corr,  /* (o) correlation of seq1 and seq2 */
     const int16_t* seq1,  /* (i) first sequence */
-    int16_t dim1,  /* (i) dimension first seq1 */
+    size_t dim1,  /* (i) dimension first seq1 */
     const int16_t* seq2, /* (i) second sequence */
-    int16_t dim2   /* (i) dimension seq2 */
+    size_t dim2   /* (i) dimension seq2 */
                           );
 
 #endif
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.c b/webrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.c
index 30c7a03..b6cc240 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.c
@@ -28,9 +28,9 @@
     int16_t *index, /* (o) index of array element closest to value */
     int16_t *array, /* (i) data array (Q2) */
     int16_t value, /* (i) value (Q2) */
-    int16_t arlength /* (i) dimension of data array (==8) */
+    size_t arlength /* (i) dimension of data array (==8) */
                                    ){
-  int i;
+  size_t i;
   int16_t diff;
   /* Stack based */
   int32_t crit[8];
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.h b/webrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.h
index 0c03470..4c7ed3e 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.h
@@ -31,7 +31,7 @@
     int16_t *index, /* (o) index of array element closest to value */
     int16_t *array, /* (i) data array (Q2) */
     int16_t value, /* (i) value (Q2) */
-    int16_t arlength /* (i) dimension of data array (==8) */
+    size_t arlength /* (i) dimension of data array (==8) */
                                    );
 
 #endif
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/refiner.c b/webrtc/modules/audio_coding/codecs/ilbc/refiner.c
index 2fff362..86df81c 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/refiner.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/refiner.c
@@ -39,8 +39,9 @@
                                            summed with earlier contributions */
     int16_t gain    /* (i) Gain to use for this sequence */
                            ){
-  int16_t estSegPosRounded,searchSegStartPos,searchSegEndPos,corrdim;
-  int16_t tloc,tloc2,i,st,en,fraction;
+  int16_t estSegPosRounded,searchSegStartPos,searchSegEndPos;
+  size_t corrdim,i;
+  int16_t tloc,tloc2,st,en,fraction;
 
   int32_t maxtemp, scalefact;
   int16_t *filtStatePtr, *polyPtr;
@@ -65,13 +66,13 @@
   if(searchSegEndPos+ENH_BLOCKL >= idatal) {
     searchSegEndPos=idatal-ENH_BLOCKL-1;
   }
-  corrdim=searchSegEndPos-searchSegStartPos+1;
+  corrdim=(size_t)(searchSegEndPos-searchSegStartPos+1);
 
   /* compute upsampled correlation and find
      location of max */
 
   WebRtcIlbcfix_MyCorr(corrVecTemp,idata+searchSegStartPos,
-                       (int16_t)(corrdim+ENH_BLOCKL-1),idata+centerStartPos,ENH_BLOCKL);
+                       corrdim+ENH_BLOCKL-1,idata+centerStartPos,ENH_BLOCKL);
 
   /* Calculate the rescaling factor for the correlation in order to
      put the correlation in a int16_t vector instead */
@@ -110,7 +111,7 @@
   /* initialize the vector to be filtered, stuff with zeros
      when data is outside idata buffer */
   if(st<0){
-    WebRtcSpl_MemSetW16(vect, 0, (int16_t)(-st));
+    WebRtcSpl_MemSetW16(vect, 0, (size_t)(-st));
     WEBRTC_SPL_MEMCPY_W16(&vect[-st], idata, (ENH_VECTL+st));
   }
   else{
@@ -120,7 +121,7 @@
       WEBRTC_SPL_MEMCPY_W16(vect, &idata[st],
                             (ENH_VECTL-(en-idatal)));
       WebRtcSpl_MemSetW16(&vect[ENH_VECTL-(en-idatal)], 0,
-                          (int16_t)(en-idatal));
+                          (size_t)(en-idatal));
     }
     else {
       WEBRTC_SPL_MEMCPY_W16(vect, &idata[st], ENH_VECTL);
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/simple_interpolate_lsf.c b/webrtc/modules/audio_coding/codecs/ilbc/simple_interpolate_lsf.c
index d89770e..e63dda8 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/simple_interpolate_lsf.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/simple_interpolate_lsf.c
@@ -42,7 +42,8 @@
     IlbcEncoder *iLBCenc_inst
     /* (i/o) the encoder state structure */
                                         ) {
-  int i, pos, lp_length;
+  size_t i;
+  int pos, lp_length;
 
   int16_t *lsf2, *lsfdeq2;
   /* Stack based */
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/simple_lpc_analysis.c b/webrtc/modules/audio_coding/codecs/ilbc/simple_lpc_analysis.c
index dfc637b..72d80e0 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/simple_lpc_analysis.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/simple_lpc_analysis.c
@@ -34,7 +34,7 @@
                                      ) {
   int k;
   int scale;
-  int16_t is;
+  size_t is;
   int16_t stability;
   /* Stack based */
   int16_t A[LPC_FILTERORDER + 1];
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/state_construct.c b/webrtc/modules/audio_coding/codecs/ilbc/state_construct.c
index 324b670..29fe91b 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/state_construct.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/state_construct.c
@@ -24,14 +24,14 @@
  *---------------------------------------------------------------*/
 
 void WebRtcIlbcfix_StateConstruct(
-    int16_t idxForMax,   /* (i) 6-bit index for the quantization of
+    size_t idxForMax,   /* (i) 6-bit index for the quantization of
                                            max amplitude */
     int16_t *idxVec,   /* (i) vector of quantization indexes */
     int16_t *syntDenum,  /* (i) synthesis filter denumerator */
     int16_t *Out_fix,  /* (o) the decoded state vector */
-    int16_t len    /* (i) length of a state vector */
+    size_t len    /* (i) length of a state vector */
                                   ) {
-  int k;
+  size_t k;
   int16_t maxVal;
   int16_t *tmp1, *tmp2, *tmp3;
   /* Stack based */
@@ -96,7 +96,7 @@
   /* Run MA filter + AR filter */
   WebRtcSpl_FilterMAFastQ12(
       sampleVal, sampleMa,
-      numerator, LPC_FILTERORDER+1, (int16_t)(len + LPC_FILTERORDER));
+      numerator, LPC_FILTERORDER+1, len + LPC_FILTERORDER);
   WebRtcSpl_MemSetW16(&sampleMa[len + LPC_FILTERORDER], 0, (len - LPC_FILTERORDER));
   WebRtcSpl_FilterARFastQ12(
       sampleMa, sampleAr,
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/state_construct.h b/webrtc/modules/audio_coding/codecs/ilbc/state_construct.h
index 22d75e2..2631919 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/state_construct.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/state_construct.h
@@ -24,12 +24,12 @@
  *---------------------------------------------------------------*/
 
 void WebRtcIlbcfix_StateConstruct(
-    int16_t idxForMax,   /* (i) 6-bit index for the quantization of
+    size_t idxForMax,   /* (i) 6-bit index for the quantization of
                                            max amplitude */
     int16_t *idxVec,   /* (i) vector of quantization indexes */
     int16_t *syntDenum,  /* (i) synthesis filter denumerator */
     int16_t *Out_fix,  /* (o) the decoded state vector */
-    int16_t len    /* (i) length of a state vector */
+    size_t len    /* (i) length of a state vector */
                                   );
 
 #endif
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/state_search.c b/webrtc/modules/audio_coding/codecs/ilbc/state_search.c
index b2214c7..295c543 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/state_search.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/state_search.c
@@ -33,7 +33,7 @@
     int16_t *syntDenum,  /* (i) lpc synthesis filter */
     int16_t *weightDenum  /* (i) weighting filter denuminator */
                                ) {
-  int16_t k, index;
+  size_t k, index;
   int16_t maxVal;
   int16_t scale, shift;
   int32_t maxValsq;
@@ -64,9 +64,9 @@
 
   /* Run the Zero-Pole filter (Ciurcular convolution) */
   WebRtcSpl_MemSetW16(residualLongVec, 0, LPC_FILTERORDER);
-  WebRtcSpl_FilterMAFastQ12(
-      residualLong, sampleMa,
-      numerator, LPC_FILTERORDER+1, (int16_t)(iLBCenc_inst->state_short_len + LPC_FILTERORDER));
+  WebRtcSpl_FilterMAFastQ12(residualLong, sampleMa, numerator,
+                            LPC_FILTERORDER + 1,
+                            iLBCenc_inst->state_short_len + LPC_FILTERORDER);
   WebRtcSpl_MemSetW16(&sampleMa[iLBCenc_inst->state_short_len + LPC_FILTERORDER], 0, iLBCenc_inst->state_short_len - LPC_FILTERORDER);
 
   WebRtcSpl_FilterARFastQ12(
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/swap_bytes.c b/webrtc/modules/audio_coding/codecs/ilbc/swap_bytes.c
index 8bbac42..b795e56 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/swap_bytes.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/swap_bytes.c
@@ -24,10 +24,10 @@
 
 void WebRtcIlbcfix_SwapBytes(
     const uint16_t* input,   /* (i) the sequence to swap */
-    int16_t wordLength,      /* (i) number or uint16_t to swap */
+    size_t wordLength,      /* (i) number or uint16_t to swap */
     uint16_t* output         /* (o) the swapped sequence */
                               ) {
-  int k;
+  size_t k;
   for (k = wordLength; k > 0; k--) {
     *output++ = (*input >> 8)|(*input << 8);
     input++;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/swap_bytes.h b/webrtc/modules/audio_coding/codecs/ilbc/swap_bytes.h
index a909b2c..a4484d6 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/swap_bytes.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/swap_bytes.h
@@ -27,7 +27,7 @@
 
 void WebRtcIlbcfix_SwapBytes(
     const uint16_t* input,   /* (i) the sequence to swap */
-    int16_t wordLength,      /* (i) number or uint16_t to swap */
+    size_t wordLength,      /* (i) number or uint16_t to swap */
     uint16_t* output         /* (o) the swapped sequence */
                               );
 
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_test.c b/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_test.c
index 6ee3df4..1199c81 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_test.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_test.c
@@ -47,12 +47,11 @@
   int16_t data[BLOCKL_MAX];
   uint8_t encoded_data[2 * ILBCNOOFWORDS_MAX];
   int16_t decoded_data[BLOCKL_MAX];
-  int len;
-  short pli, mode;
+  int len_int, mode;
+  short pli;
   int blockcount = 0;
   int packetlosscount = 0;
-  int frameLen;
-  size_t len_i16s;
+  size_t frameLen, len, len_i16s;
   int16_t speechType;
   IlbcEncoderInstance *Enc_Inst;
   IlbcDecoderInstance *Dec_Inst;
@@ -153,23 +152,23 @@
 
   WebRtcIlbcfix_EncoderInit(Enc_Inst, mode);
   WebRtcIlbcfix_DecoderInit(Dec_Inst, mode);
-  frameLen = mode*8;
+  frameLen = (size_t)(mode*8);
 
   /* loop over input blocks */
 
-  while (((int16_t)fread(data,sizeof(int16_t),frameLen,ifileid))==
-         frameLen) {
+  while (fread(data,sizeof(int16_t),frameLen,ifileid) == frameLen) {
 
     blockcount++;
 
     /* encoding */
 
     fprintf(stderr, "--- Encoding block %i --- ",blockcount);
-    len = WebRtcIlbcfix_Encode(Enc_Inst, data, (int16_t)frameLen, encoded_data);
-    if (len < 0) {
+    len_int = WebRtcIlbcfix_Encode(Enc_Inst, data, frameLen, encoded_data);
+    if (len_int < 0) {
       fprintf(stderr, "Error encoding\n");
       exit(0);
     }
+    len = (size_t)len_int;
     fprintf(stderr, "\r");
 
     /* write byte file */
@@ -204,12 +203,13 @@
 
     fprintf(stderr, "--- Decoding block %i --- ",blockcount);
     if (pli==1) {
-      len=WebRtcIlbcfix_Decode(Dec_Inst, encoded_data,
-                               (int16_t)len, decoded_data,&speechType);
-      if (len < 0) {
+      len_int=WebRtcIlbcfix_Decode(Dec_Inst, encoded_data,
+                                   len, decoded_data,&speechType);
+      if (len_int < 0) {
         fprintf(stderr, "Error decoding\n");
         exit(0);
       }
+      len = (size_t)len_int;
     } else {
       len=WebRtcIlbcfix_DecodePlc(Dec_Inst, decoded_data, 1);
     }
@@ -217,8 +217,7 @@
 
     /* write output file */
 
-    if (fwrite(decoded_data, sizeof(int16_t), len,
-               ofileid) != (size_t)len) {
+    if (fwrite(decoded_data, sizeof(int16_t), len, ofileid) != len) {
       return -1;
     }
   }
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_testLib.c b/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_testLib.c
index b4e36b6..f14192c 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_testLib.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_testLib.c
@@ -41,15 +41,15 @@
 {
   FILE *ifileid,*efileid,*ofileid, *chfileid;
   short encoded_data[55], data[240], speechType;
-  int len;
-  short mode, pli;
-  size_t readlen;
+  int len_int, mode;
+  short pli;
+  size_t len, readlen;
   int blockcount = 0;
 
   IlbcEncoderInstance *Enc_Inst;
   IlbcDecoderInstance *Dec_Inst;
 #ifdef JUNK_DATA
-  int i;
+  size_t i;
   FILE *seedfile;
   unsigned int random_seed = (unsigned int) time(NULL);//1196764538
 #endif
@@ -136,11 +136,12 @@
 
     /* encoding */
     fprintf(stderr, "--- Encoding block %i --- ",blockcount);
-    len=WebRtcIlbcfix_Encode(Enc_Inst, data, (short)readlen, encoded_data);
-    if (len < 0) {
+    len_int=WebRtcIlbcfix_Encode(Enc_Inst, data, readlen, encoded_data);
+    if (len_int < 0) {
       fprintf(stderr, "Error encoding\n");
       exit(0);
     }
+    len = (size_t)len_int;
     fprintf(stderr, "\r");
 
 #ifdef JUNK_DATA
@@ -174,12 +175,13 @@
       /* decoding */
       fprintf(stderr, "--- Decoding block %i --- ",blockcount);
       if (pli==1) {
-        len=WebRtcIlbcfix_Decode(Dec_Inst, encoded_data, (int16_t)len, data,
-                                 &speechType);
-        if (len < 0) {
+        len_int = WebRtcIlbcfix_Decode(Dec_Inst, encoded_data, len, data,
+                                       &speechType);
+        if (len_int < 0) {
           fprintf(stderr, "Error decoding\n");
           exit(0);
         }
+        len = (size_t)len_int;
       } else {
         len=WebRtcIlbcfix_DecodePlc(Dec_Inst, data, 1);
       }
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/window32_w32.c b/webrtc/modules/audio_coding/codecs/ilbc/window32_w32.c
index dbecc33..dc12a5a 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/window32_w32.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/window32_w32.c
@@ -26,9 +26,9 @@
     int32_t *z,    /* Output */
     int32_t *x,    /* Input (same domain as Output)*/
     const int32_t  *y,  /* Q31 Window */
-    int16_t N     /* length to process */
+    size_t N     /* length to process */
                                ) {
-  int16_t i;
+  size_t i;
   int16_t x_low, x_hi, y_low, y_hi;
   int16_t left_shifts;
   int32_t temp;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/window32_w32.h b/webrtc/modules/audio_coding/codecs/ilbc/window32_w32.h
index 4ee6fce..27ed1b6 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/window32_w32.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/window32_w32.h
@@ -29,7 +29,7 @@
     int32_t *z,    /* Output */
     int32_t *x,    /* Input (same domain as Output)*/
     const int32_t  *y,  /* Q31 Window */
-    int16_t N     /* length to process */
+    size_t N     /* length to process */
                                );
 
 #endif
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.c b/webrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.c
index 53d95bf..0d898c5 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.c
@@ -23,16 +23,16 @@
  * crossCorr*crossCorr/(energy) criteria
  *---------------------------------------------------------------*/
 
-int WebRtcIlbcfix_XcorrCoef(
+size_t WebRtcIlbcfix_XcorrCoef(
     int16_t *target,  /* (i) first array */
     int16_t *regressor, /* (i) second array */
-    int16_t subl,  /* (i) dimension arrays */
-    int16_t searchLen, /* (i) the search lenght */
-    int16_t offset,  /* (i) samples offset between arrays */
+    size_t subl,  /* (i) dimension arrays */
+    size_t searchLen, /* (i) the search lenght */
+    size_t offset,  /* (i) samples offset between arrays */
     int16_t step   /* (i) +1 or -1 */
                             ){
-  int k;
-  int16_t maxlag;
+  size_t k;
+  size_t maxlag;
   int16_t pos;
   int16_t max;
   int16_t crossCorrScale, Energyscale;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.h b/webrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.h
index 1f4c58d..9b81c0f 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.h
@@ -26,12 +26,12 @@
  * crossCorr*crossCorr/(energy) criteria
  *---------------------------------------------------------------*/
 
-int WebRtcIlbcfix_XcorrCoef(
+size_t WebRtcIlbcfix_XcorrCoef(
     int16_t *target,  /* (i) first array */
     int16_t *regressor, /* (i) second array */
-    int16_t subl,  /* (i) dimension arrays */
-    int16_t searchLen, /* (i) the search lenght */
-    int16_t offset,  /* (i) samples offset between arrays */
+    size_t subl,  /* (i) dimension arrays */
+    size_t searchLen, /* (i) the search lenght */
+    size_t offset,  /* (i) samples offset between arrays */
     int16_t step   /* (i) +1 or -1 */
                             );
 
diff --git a/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h b/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h
index 7093304..a2c43a6 100644
--- a/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h
+++ b/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h
@@ -55,8 +55,8 @@
   int SampleRateHz() const override;
   int NumChannels() const override;
   size_t MaxEncodedBytes() const override;
-  int Num10MsFramesInNextPacket() const override;
-  int Max10MsFramesInAPacket() const override;
+  size_t Num10MsFramesInNextPacket() const override;
+  size_t Max10MsFramesInAPacket() const override;
   int GetTargetBitrate() const override;
   EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
                              const int16_t* audio,
@@ -94,7 +94,7 @@
   ~AudioDecoderIsacT() override;
 
   bool HasDecodePlc() const override;
-  int DecodePlc(int num_frames, int16_t* decoded) override;
+  size_t DecodePlc(size_t num_frames, int16_t* decoded) override;
   int Init() override;
   int IncomingPacket(const uint8_t* payload,
                      size_t payload_len,
diff --git a/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h b/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h
index ce70db4..93fbde9 100644
--- a/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h
+++ b/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h
@@ -123,14 +123,15 @@
 }
 
 template <typename T>
-int AudioEncoderIsacT<T>::Num10MsFramesInNextPacket() const {
+size_t AudioEncoderIsacT<T>::Num10MsFramesInNextPacket() const {
   const int samples_in_next_packet = T::GetNewFrameLen(isac_state_);
-  return rtc::CheckedDivExact(samples_in_next_packet,
-                              rtc::CheckedDivExact(SampleRateHz(), 100));
+  return static_cast<size_t>(
+      rtc::CheckedDivExact(samples_in_next_packet,
+                           rtc::CheckedDivExact(SampleRateHz(), 100)));
 }
 
 template <typename T>
-int AudioEncoderIsacT<T>::Max10MsFramesInAPacket() const {
+size_t AudioEncoderIsacT<T>::Max10MsFramesInAPacket() const {
   return 6;  // iSAC puts at most 60 ms in a packet.
 }
 
@@ -215,8 +216,7 @@
   }
   int16_t temp_type = 1;  // Default is speech.
   int ret =
-      T::DecodeInternal(isac_state_, encoded, static_cast<int16_t>(encoded_len),
-                        decoded, &temp_type);
+      T::DecodeInternal(isac_state_, encoded, encoded_len, decoded, &temp_type);
   *speech_type = ConvertSpeechType(temp_type);
   return ret;
 }
@@ -227,7 +227,7 @@
 }
 
 template <typename T>
-int AudioDecoderIsacT<T>::DecodePlc(int num_frames, int16_t* decoded) {
+size_t AudioDecoderIsacT<T>::DecodePlc(size_t num_frames, int16_t* decoded) {
   return T::DecodePlc(isac_state_, decoded, num_frames);
 }
 
@@ -243,7 +243,7 @@
                                          uint32_t rtp_timestamp,
                                          uint32_t arrival_timestamp) {
   int ret = T::UpdateBwEstimate(
-      isac_state_, payload, static_cast<int32_t>(payload_len),
+      isac_state_, payload, payload_len,
       rtp_sequence_number, rtp_timestamp, arrival_timestamp);
   if (bwinfo_) {
     IsacBandwidthInfo bwinfo;
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/interface/audio_encoder_isacfix.h b/webrtc/modules/audio_coding/codecs/isac/fix/interface/audio_encoder_isacfix.h
index 9d51161..6c61915 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/interface/audio_encoder_isacfix.h
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/interface/audio_encoder_isacfix.h
@@ -40,14 +40,14 @@
   }
   static inline int DecodeInternal(instance_type* inst,
                                    const uint8_t* encoded,
-                                   int16_t len,
+                                   size_t len,
                                    int16_t* decoded,
                                    int16_t* speech_type) {
     return WebRtcIsacfix_Decode(inst, encoded, len, decoded, speech_type);
   }
-  static inline int16_t DecodePlc(instance_type* inst,
-                                  int16_t* decoded,
-                                  int16_t num_lost_frames) {
+  static inline size_t DecodePlc(instance_type* inst,
+                                 int16_t* decoded,
+                                 size_t num_lost_frames) {
     return WebRtcIsacfix_DecodePlc(inst, decoded, num_lost_frames);
   }
   static inline int16_t DecoderInit(instance_type* inst) {
@@ -104,7 +104,7 @@
   }
   static inline int16_t UpdateBwEstimate(instance_type* inst,
                                          const uint8_t* encoded,
-                                         int32_t packet_size,
+                                         size_t packet_size,
                                          uint16_t rtp_seq_number,
                                          uint32_t send_ts,
                                          uint32_t arr_ts) {
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/interface/isacfix.h b/webrtc/modules/audio_coding/codecs/isac/fix/interface/isacfix.h
index 68ffe65..eec4a39 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/interface/isacfix.h
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/interface/isacfix.h
@@ -11,6 +11,8 @@
 #ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INTERFACE_ISACFIX_H_
 #define WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INTERFACE_ISACFIX_H_
 
+#include <stddef.h>
+
 #include "webrtc/modules/audio_coding/codecs/isac/bandwidth_info.h"
 #include "webrtc/typedefs.h"
 
@@ -189,7 +191,7 @@
    * Input:
    *      - ISAC_main_inst    : ISAC instance.
    *      - encoded           : encoded ISAC frame(s).
-   *      - packet_size       : size of the packet.
+   *      - packet_size       : size of the packet in bytes.
    *      - rtp_seq_number    : the RTP number of the packet.
    *      - arr_ts            : the arrival time of the packet (from NetEq)
    *                            in samples.
@@ -200,7 +202,7 @@
 
   int16_t WebRtcIsacfix_UpdateBwEstimate1(ISACFIX_MainStruct *ISAC_main_inst,
                                           const uint8_t* encoded,
-                                          int32_t  packet_size,
+                                          size_t packet_size,
                                           uint16_t rtp_seq_number,
                                           uint32_t arr_ts);
 
@@ -212,7 +214,7 @@
    * Input:
    *      - ISAC_main_inst    : ISAC instance.
    *      - encoded           : encoded ISAC frame(s).
-   *      - packet_size       : size of the packet.
+   *      - packet_size       : size of the packet in bytes.
    *      - rtp_seq_number    : the RTP number of the packet.
    *      - send_ts           : the send time of the packet from RTP header,
    *                            in samples.
@@ -225,7 +227,7 @@
 
   int16_t WebRtcIsacfix_UpdateBwEstimate(ISACFIX_MainStruct *ISAC_main_inst,
                                          const uint8_t* encoded,
-                                         int32_t packet_size,
+                                         size_t packet_size,
                                          uint16_t rtp_seq_number,
                                          uint32_t send_ts,
                                          uint32_t arr_ts);
@@ -251,7 +253,7 @@
 
   int WebRtcIsacfix_Decode(ISACFIX_MainStruct *ISAC_main_inst,
                            const uint8_t* encoded,
-                           int16_t len,
+                           size_t len,
                            int16_t *decoded,
                            int16_t *speechType);
 
@@ -280,7 +282,7 @@
 #ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
   int WebRtcIsacfix_DecodeNb(ISACFIX_MainStruct *ISAC_main_inst,
                              const uint16_t *encoded,
-                             int16_t len,
+                             size_t len,
                              int16_t *decoded,
                              int16_t *speechType);
 #endif //  WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
@@ -303,14 +305,13 @@
    * Output:
    *      - decoded           : The decoded vector
    *
-   * Return value             : >0 - number of samples in decoded PLC vector
-   *                            -1 - Error
+   * Return value             : Number of samples in decoded PLC vector
    */
 
 #ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
-  int16_t WebRtcIsacfix_DecodePlcNb(ISACFIX_MainStruct *ISAC_main_inst,
-                                    int16_t *decoded,
-                                    int16_t noOfLostFrames);
+  size_t WebRtcIsacfix_DecodePlcNb(ISACFIX_MainStruct *ISAC_main_inst,
+                                   int16_t *decoded,
+                                   size_t noOfLostFrames);
 #endif // WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
 
 
@@ -332,13 +333,12 @@
    * Output:
    *      - decoded           : The decoded vector
    *
-   * Return value             : >0 - number of samples in decoded PLC vector
-   *                            -1 - Error
+   * Return value             : Number of samples in decoded PLC vector
    */
 
-  int16_t WebRtcIsacfix_DecodePlc(ISACFIX_MainStruct *ISAC_main_inst,
-                                  int16_t *decoded,
-                                  int16_t noOfLostFrames );
+  size_t WebRtcIsacfix_DecodePlc(ISACFIX_MainStruct *ISAC_main_inst,
+                                 int16_t *decoded,
+                                 size_t noOfLostFrames );
 
 
   /****************************************************************************
@@ -356,8 +356,8 @@
    */
 
   int16_t WebRtcIsacfix_ReadFrameLen(const uint8_t* encoded,
-                                     int encoded_len_bytes,
-                                     int16_t* frameLength);
+                                     size_t encoded_len_bytes,
+                                     size_t* frameLength);
 
   /****************************************************************************
    * WebRtcIsacfix_Control(...)
@@ -608,7 +608,7 @@
    */
 
   int16_t WebRtcIsacfix_ReadBwIndex(const uint8_t* encoded,
-                                    int encoded_len_bytes,
+                                    size_t encoded_len_bytes,
                                     int16_t* rateIndex);
 
 
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.c b/webrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.c
index d876a3c..b074962 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.c
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.c
@@ -148,7 +148,7 @@
                                          const int16_t  frameSize,
                                          const uint32_t sendTime,
                                          const uint32_t arrivalTime,
-                                         const int16_t  pksize,
+                                         const size_t   pksize,
                                          const uint16_t Index)
 {
   uint16_t  weight = 0;
@@ -379,7 +379,7 @@
 
         /* compute inverse receiving rate for last packet, in Q19 */
         numBytesInv = (uint16_t) WebRtcSpl_DivW32W16(
-            524288 + ((pksize + HEADER_SIZE) >> 1),
+            (int32_t)(524288 + ((pksize + HEADER_SIZE) >> 1)),
             (int16_t)(pksize + HEADER_SIZE));
 
         /* 8389 is  ~ 1/128000 in Q30 */
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h b/webrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h
index 5d8ccbc..101ef62 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h
@@ -62,7 +62,7 @@
                                          const int16_t         frameSize,
                                          const uint32_t        send_ts,
                                          const uint32_t        arr_ts,
-                                         const int16_t         pksize,
+                                         const size_t          pksize,
                                          const uint16_t        Index);
 
 /* Update receiving estimates. Used when we only receive BWE index, no iSAC data packet. */
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/codec.h b/webrtc/modules/audio_coding/codecs/isac/fix/source/codec.h
index d71decc..fdbb2fc 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/codec.h
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/codec.h
@@ -27,18 +27,18 @@
 
 int WebRtcIsacfix_EstimateBandwidth(BwEstimatorstr* bwest_str,
                                     Bitstr_dec* streamdata,
-                                    int32_t packet_size,
+                                    size_t packet_size,
                                     uint16_t rtp_seq_number,
                                     uint32_t send_ts,
                                     uint32_t arr_ts);
 
 int WebRtcIsacfix_DecodeImpl(int16_t* signal_out16,
                              IsacFixDecoderInstance* ISACdec_obj,
-                             int16_t* current_framesamples);
+                             size_t* current_framesamples);
 
 void WebRtcIsacfix_DecodePlcImpl(int16_t* decoded,
                                  IsacFixDecoderInstance* ISACdec_obj,
-                                 int16_t* current_framesample );
+                                 size_t* current_framesample );
 
 int WebRtcIsacfix_EncodeImpl(int16_t* in,
                              IsacFixEncoderInstance* ISACenc_obj,
@@ -141,7 +141,7 @@
 
 /* normalized lattice filters */
 
-void WebRtcIsacfix_NormLatticeFilterMa(int16_t orderCoef,
+void WebRtcIsacfix_NormLatticeFilterMa(size_t orderCoef,
                                        int32_t* stateGQ15,
                                        int16_t* lat_inQ0,
                                        int16_t* filt_coefQ15,
@@ -149,7 +149,7 @@
                                        int16_t lo_hi,
                                        int16_t* lat_outQ9);
 
-void WebRtcIsacfix_NormLatticeFilterAr(int16_t orderCoef,
+void WebRtcIsacfix_NormLatticeFilterAr(size_t orderCoef,
                                        int16_t* stateGQ0,
                                        int32_t* lat_inQ25,
                                        int16_t* filt_coefQ15,
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/decode.c b/webrtc/modules/audio_coding/codecs/isac/fix/source/decode.c
index d0c59d6..e3de437 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/decode.c
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/decode.c
@@ -29,7 +29,7 @@
 
 int WebRtcIsacfix_DecodeImpl(int16_t* signal_out16,
                              IsacFixDecoderInstance* ISACdec_obj,
-                             int16_t* current_framesamples)
+                             size_t* current_framesamples)
 {
   int k;
   int err;
@@ -58,9 +58,9 @@
   int16_t gainQ13;
 
 
-  int16_t frame_nb; /* counter */
-  int16_t frame_mode; /* 0 for 30ms, 1 for 60ms */
-  static const int16_t kProcessedSamples = 480; /* 480 (for both 30, 60 ms) */
+  size_t frame_nb; /* counter */
+  size_t frame_mode; /* 0 for 30ms, 1 for 60ms */
+  static const size_t kProcessedSamples = 480; /* 480 (for both 30, 60 ms) */
 
   /* PLC */
   int16_t overlapWin[ 240 ];
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/decode_bwe.c b/webrtc/modules/audio_coding/codecs/isac/fix/source/decode_bwe.c
index b1f5d10..316f59a 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/decode_bwe.c
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/decode_bwe.c
@@ -26,13 +26,13 @@
 
 int WebRtcIsacfix_EstimateBandwidth(BwEstimatorstr *bwest_str,
                                     Bitstr_dec  *streamdata,
-                                    int32_t  packet_size,
+                                    size_t packet_size,
                                     uint16_t rtp_seq_number,
                                     uint32_t send_ts,
                                     uint32_t arr_ts)
 {
   int16_t index;
-  int16_t frame_samples;
+  size_t frame_samples;
   int err;
 
   /* decode framelength */
@@ -53,10 +53,10 @@
   err = WebRtcIsacfix_UpdateUplinkBwImpl(
       bwest_str,
       rtp_seq_number,
-      frame_samples * 1000 / FS,
+      (int16_t)(frame_samples * 1000 / FS),
       send_ts,
       arr_ts,
-      (int16_t) packet_size,  /* in bytes */
+      packet_size,  /* in bytes */
       index);
 
   /* error check */
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/decode_plc.c b/webrtc/modules/audio_coding/codecs/isac/fix/source/decode_plc.c
index c3a89c3..e907f2b6 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/decode_plc.c
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/decode_plc.c
@@ -177,11 +177,12 @@
 
 static void LinearResampler(int16_t* in,
                             int16_t* out,
-                            int16_t lenIn,
-                            int16_t lenOut)
+                            size_t lenIn,
+                            size_t lenOut)
 {
-  int32_t n = (lenIn - 1) * RESAMP_RES;
-  int16_t resOut, i, j, relativePos, diff; /* */
+  size_t n = (lenIn - 1) * RESAMP_RES;
+  int16_t resOut, relativePos, diff; /* */
+  size_t i, j;
   uint16_t udiff;
 
   if( lenIn == lenOut )
@@ -190,7 +191,7 @@
     return;
   }
 
-  resOut = WebRtcSpl_DivW32W16ResW16( n, (int16_t)(lenOut-1) );
+  resOut = WebRtcSpl_DivW32W16ResW16( (int32_t)n, (int16_t)(lenOut-1) );
 
   out[0] = in[0];
   for( i = 1, j = 0, relativePos = 0; i < lenOut; i++ )
@@ -235,7 +236,7 @@
 
 void WebRtcIsacfix_DecodePlcImpl(int16_t *signal_out16,
                                  IsacFixDecoderInstance *ISACdec_obj,
-                                 int16_t *current_framesamples )
+                                 size_t *current_framesamples )
 {
   int subframecnt;
 
@@ -260,12 +261,14 @@
   int16_t myDecayRate;
 
   /* ---------- PLC variables ------------ */
-  int16_t lag0, i, k, noiseIndex;
+  size_t lag0, i, k;
+  int16_t noiseIndex;
   int16_t stretchPitchLP[PITCH_MAX_LAG + 10], stretchPitchLP1[PITCH_MAX_LAG + 10];
 
   int32_t gain_lo_hiQ17[2*SUBFRAMES];
 
-  int16_t nLP, pLP, wNoisyLP, wPriodicLP, tmp16, minIdx;
+  int16_t nLP, pLP, wNoisyLP, wPriodicLP, tmp16;
+  size_t minIdx;
   int32_t nHP, pHP, wNoisyHP, wPriodicHP, corr, minCorr, maxCoeff;
   int16_t noise1, rshift;
 
@@ -300,7 +303,7 @@
 
 
 
-  lag0 = ((ISACdec_obj->plcstr_obj.lastPitchLag_Q7 + 64) >> 7) + 1;
+  lag0 = (size_t)(((ISACdec_obj->plcstr_obj.lastPitchLag_Q7 + 64) >> 7) + 1);
 
 
   if( (ISACdec_obj->plcstr_obj).used != PLC_WAS_USED )
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.c b/webrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.c
index 5f6e6ac..2379ba5 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.c
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.c
@@ -1870,7 +1870,7 @@
 
 
 int WebRtcIsacfix_DecodeFrameLen(Bitstr_dec *streamdata,
-                                 int16_t *framesamples)
+                                 size_t *framesamples)
 {
 
   int err;
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.h b/webrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.h
index e4489df..2c8c923 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.h
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.h
@@ -92,7 +92,7 @@
                                  int16_t *PitchLagQ7);
 
 int WebRtcIsacfix_DecodeFrameLen(Bitstr_dec *streamdata,
-                                 int16_t *framelength);
+                                 size_t *framelength);
 
 
 int WebRtcIsacfix_EncodeFrameLen(int16_t framelength,
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/isacfix.c b/webrtc/modules/audio_coding/codecs/isac/fix/source/isacfix.c
index 9b61d60..4a663d1 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/isacfix.c
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/isacfix.c
@@ -38,7 +38,7 @@
 /* This method assumes that |stream_size_bytes| is in valid range,
  * i.e. >= 0 && <=  STREAM_MAXW16_60MS
  */
-static void InitializeDecoderBitstream(int stream_size_bytes,
+static void InitializeDecoderBitstream(size_t stream_size_bytes,
                                        Bitstr_dec* bitstream) {
   bitstream->W_upper = 0xFFFFFFFF;
   bitstream->streamval = 0;
@@ -621,20 +621,20 @@
 
 int16_t WebRtcIsacfix_UpdateBwEstimate1(ISACFIX_MainStruct *ISAC_main_inst,
                                         const uint8_t* encoded,
-                                        int32_t packet_size,
+                                        size_t packet_size,
                                         uint16_t rtp_seq_number,
                                         uint32_t arr_ts)
 {
   ISACFIX_SubStruct *ISAC_inst;
   Bitstr_dec streamdata;
   int16_t err;
-  const int kRequiredEncodedLenBytes = 10;
+  const size_t kRequiredEncodedLenBytes = 10;
 
   /* typecast pointer to real structure */
   ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
 
   /* Sanity check of packet length */
-  if (packet_size <= 0) {
+  if (packet_size == 0) {
     /* return error code if the packet length is null or less */
     ISAC_inst->errorcode = ISAC_EMPTY_PACKET;
     return -1;
@@ -693,7 +693,7 @@
 
 int16_t WebRtcIsacfix_UpdateBwEstimate(ISACFIX_MainStruct *ISAC_main_inst,
                                        const uint8_t* encoded,
-                                       int32_t packet_size,
+                                       size_t packet_size,
                                        uint16_t rtp_seq_number,
                                        uint32_t send_ts,
                                        uint32_t arr_ts)
@@ -701,13 +701,13 @@
   ISACFIX_SubStruct *ISAC_inst;
   Bitstr_dec streamdata;
   int16_t err;
-  const int kRequiredEncodedLenBytes = 10;
+  const size_t kRequiredEncodedLenBytes = 10;
 
   /* typecast pointer to real structure */
   ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
 
   /* Sanity check of packet length */
-  if (packet_size <= 0) {
+  if (packet_size == 0) {
     /* return error code if the packet length is null  or less */
     ISAC_inst->errorcode = ISAC_EMPTY_PACKET;
     return -1;
@@ -770,15 +770,16 @@
 
 int WebRtcIsacfix_Decode(ISACFIX_MainStruct* ISAC_main_inst,
                          const uint8_t* encoded,
-                         int16_t len,
+                         size_t len,
                          int16_t* decoded,
                          int16_t* speechType)
 {
   ISACFIX_SubStruct *ISAC_inst;
   /* number of samples (480 or 960), output from decoder */
   /* that were actually used in the encoder/decoder (determined on the fly) */
-  int16_t number_of_samples;
-  int declen = 0;
+  size_t number_of_samples;
+  int declen_int = 0;
+  size_t declen;
 
   /* typecast pointer to real structure */
   ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
@@ -790,7 +791,7 @@
   }
 
   /* Sanity check of packet length */
-  if (len <= 0) {
+  if (len == 0) {
     /* return error code if the packet length is null  or less */
     ISAC_inst->errorcode = ISAC_EMPTY_PACKET;
     return -1;
@@ -807,14 +808,15 @@
   /* added for NetEq purposes (VAD/DTX related) */
   *speechType=1;
 
-  declen = WebRtcIsacfix_DecodeImpl(decoded, &ISAC_inst->ISACdec_obj,
-                                    &number_of_samples);
-  if (declen < 0) {
+  declen_int = WebRtcIsacfix_DecodeImpl(decoded, &ISAC_inst->ISACdec_obj,
+                                        &number_of_samples);
+  if (declen_int < 0) {
     /* Some error inside the decoder */
-    ISAC_inst->errorcode = -(int16_t)declen;
+    ISAC_inst->errorcode = -(int16_t)declen_int;
     memset(decoded, 0, sizeof(int16_t) * MAX_FRAMESAMPLES);
     return -1;
   }
+  declen = (size_t)declen_int;
 
   /* error check */
 
@@ -836,7 +838,7 @@
     }
   }
 
-  return number_of_samples;
+  return (int)number_of_samples;
 }
 
 
@@ -865,17 +867,18 @@
  */
 
 #ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
-int WebRtcIsacfix_DecodeNb(ISACFIX_MainStruct *ISAC_main_inst,
-                           const uint16_t   *encoded,
-                           int16_t          len,
-                           int16_t          *decoded,
-                           int16_t    *speechType)
+int WebRtcIsacfix_DecodeNb(ISACFIX_MainStruct* ISAC_main_inst,
+                           const uint16_t* encoded,
+                           size_t len,
+                           int16_t* decoded,
+                           int16_t* speechType)
 {
   ISACFIX_SubStruct *ISAC_inst;
   /* twice the number of samples (480 or 960), output from decoder */
   /* that were actually used in the encoder/decoder (determined on the fly) */
-  int16_t number_of_samples;
-  int declen = 0;
+  size_t number_of_samples;
+  int declen_int = 0;
+  size_t declen;
   int16_t dummy[FRAMESAMPLES/2];
 
 
@@ -888,7 +891,7 @@
     return (-1);
   }
 
-  if (len <= 0) {
+  if (len == 0) {
     /* return error code if the packet length is null  or less */
     ISAC_inst->errorcode = ISAC_EMPTY_PACKET;
     return -1;
@@ -905,14 +908,15 @@
   /* added for NetEq purposes (VAD/DTX related) */
   *speechType=1;
 
-  declen = WebRtcIsacfix_DecodeImpl(decoded, &ISAC_inst->ISACdec_obj,
-                                    &number_of_samples);
-  if (declen < 0) {
+  declen_int = WebRtcIsacfix_DecodeImpl(decoded, &ISAC_inst->ISACdec_obj,
+                                        &number_of_samples);
+  if (declen_int < 0) {
     /* Some error inside the decoder */
-    ISAC_inst->errorcode = -(int16_t)declen;
+    ISAC_inst->errorcode = -(int16_t)declen_int;
     memset(decoded, 0, sizeof(int16_t) * FRAMESAMPLES);
     return -1;
   }
+  declen = (size_t)declen_int;
 
   /* error check */
 
@@ -941,7 +945,7 @@
                                   dummy, &ISAC_inst->ISACdec_obj.decimatorstr_obj);
   }
 
-  return number_of_samples/2;
+  return (int)(number_of_samples / 2);
 }
 #endif /* WEBRTC_ISAC_FIX_NB_CALLS_ENABLED */
 
@@ -962,16 +966,15 @@
  * Output:
  *      - decoded           : The decoded vector
  *
- * Return value             : >0 - number of samples in decoded PLC vector
- *                            -1 - Error
+ * Return value             : Number of samples in decoded PLC vector
  */
 
 #ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
-int16_t WebRtcIsacfix_DecodePlcNb(ISACFIX_MainStruct *ISAC_main_inst,
-                                  int16_t          *decoded,
-                                  int16_t noOfLostFrames )
+size_t WebRtcIsacfix_DecodePlcNb(ISACFIX_MainStruct* ISAC_main_inst,
+                                 int16_t* decoded,
+                                 size_t noOfLostFrames )
 {
-  int16_t no_of_samples, declen, k, ok;
+  size_t no_of_samples, declen, k;
   int16_t outframeNB[FRAMESAMPLES];
   int16_t outframeWB[FRAMESAMPLES];
   int16_t dummy[FRAMESAMPLES/2];
@@ -1028,16 +1031,15 @@
  * Output:
  *      - decoded           : The decoded vector
  *
- * Return value             : >0 - number of samples in decoded PLC vector
- *                            -1 - Error
+ * Return value             : Number of samples in decoded PLC vector
  */
 
-int16_t WebRtcIsacfix_DecodePlc(ISACFIX_MainStruct *ISAC_main_inst,
-                                int16_t          *decoded,
-                                int16_t noOfLostFrames)
+size_t WebRtcIsacfix_DecodePlc(ISACFIX_MainStruct* ISAC_main_inst,
+                               int16_t* decoded,
+                               size_t noOfLostFrames)
 {
 
-  int16_t no_of_samples, declen, k;
+  size_t no_of_samples, declen, k;
   int16_t outframe16[MAX_FRAMESAMPLES];
 
   ISACFIX_SubStruct *ISAC_inst;
@@ -1272,12 +1274,12 @@
  */
 
 int16_t WebRtcIsacfix_ReadFrameLen(const uint8_t* encoded,
-                                   int encoded_len_bytes,
-                                   int16_t* frameLength)
+                                   size_t encoded_len_bytes,
+                                   size_t* frameLength)
 {
   Bitstr_dec streamdata;
   int16_t err;
-  const int kRequiredEncodedLenBytes = 10;
+  const size_t kRequiredEncodedLenBytes = 10;
 
   if (encoded_len_bytes < kRequiredEncodedLenBytes) {
     return -1;
@@ -1311,12 +1313,12 @@
  */
 
 int16_t WebRtcIsacfix_ReadBwIndex(const uint8_t* encoded,
-                                  int encoded_len_bytes,
+                                  size_t encoded_len_bytes,
                                   int16_t* rateIndex)
 {
   Bitstr_dec streamdata;
   int16_t err;
-  const int kRequiredEncodedLenBytes = 10;
+  const size_t kRequiredEncodedLenBytes = 10;
 
   if (encoded_len_bytes < kRequiredEncodedLenBytes) {
     return -1;
@@ -1327,7 +1329,7 @@
   read_be16(encoded, kRequiredEncodedLenBytes, streamdata.stream);
 
   /* decode frame length, needed to get to the rateIndex in the bitstream */
-  int16_t frameLength;
+  size_t frameLength;
   err = WebRtcIsacfix_DecodeFrameLen(&streamdata, &frameLength);
   if (err<0)  // error check
     return err;
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice.c b/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice.c
index 13858d7..22224a8 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice.c
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice.c
@@ -43,7 +43,7 @@
                                 int16_t* ar_f_Q0,
                                 int16_t* cth_Q15,
                                 int16_t* sth_Q15,
-                                int16_t order_coef);
+                                size_t order_coef);
 
 /* Inner loop used for function WebRtcIsacfix_NormLatticeFilterMa(). It does:
    for 0 <= n < HALF_SUBFRAMELEN - 1:
@@ -86,7 +86,7 @@
 
 /* filter the signal using normalized lattice filter */
 /* MA filter */
-void WebRtcIsacfix_NormLatticeFilterMa(int16_t orderCoef,
+void WebRtcIsacfix_NormLatticeFilterMa(size_t orderCoef,
                                        int32_t *stateGQ15,
                                        int16_t *lat_inQ0,
                                        int16_t *filt_coefQ15,
@@ -97,9 +97,10 @@
   int16_t sthQ15[MAX_AR_MODEL_ORDER];
   int16_t cthQ15[MAX_AR_MODEL_ORDER];
 
-  int u, i, k, n;
+  int u, n;
+  size_t i, k;
   int16_t temp2,temp3;
-  int16_t ord_1 = orderCoef+1;
+  size_t ord_1 = orderCoef+1;
   int32_t inv_cthQ16[MAX_AR_MODEL_ORDER];
 
   int32_t gain32, fQtmp;
@@ -210,7 +211,7 @@
 
 /* ----------------AR filter-------------------------*/
 /* filter the signal using normalized lattice filter */
-void WebRtcIsacfix_NormLatticeFilterAr(int16_t orderCoef,
+void WebRtcIsacfix_NormLatticeFilterAr(size_t orderCoef,
                                        int16_t *stateGQ0,
                                        int32_t *lat_inQ25,
                                        int16_t *filt_coefQ15,
@@ -218,7 +219,8 @@
                                        int16_t lo_hi,
                                        int16_t *lat_outQ0)
 {
-  int ii, n, k, i, u;
+  size_t ii, k, i;
+  int n, u;
   int16_t sthQ15[MAX_AR_MODEL_ORDER];
   int16_t cthQ15[MAX_AR_MODEL_ORDER];
   int32_t tmp32;
@@ -234,7 +236,7 @@
   int16_t sh;
 
   int16_t temp2,temp3;
-  int16_t ord_1 = orderCoef+1;
+  size_t ord_1 = orderCoef+1;
 
   for (u=0;u<SUBFRAMES;u++)
   {
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_c.c b/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_c.c
index 43a1579..40c3bf8 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_c.c
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_c.c
@@ -25,11 +25,11 @@
                                 int16_t* ar_f_Q0,     // Input samples
                                 int16_t* cth_Q15,     // Filter coefficients
                                 int16_t* sth_Q15,     // Filter coefficients
-                                int16_t order_coef) { // order of the filter
+                                size_t order_coef) { // order of the filter
   int n = 0;
 
   for (n = 0; n < HALF_SUBFRAMELEN - 1; n++) {
-    int k = 0;
+    size_t k = 0;
     int16_t tmpAR = 0;
     int32_t tmp32 = 0;
     int32_t tmp32_2 = 0;
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_mips.c b/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_mips.c
index c596922..7197b15 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_mips.c
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_mips.c
@@ -17,11 +17,11 @@
                                 int16_t* ar_f_Q0,     // Input samples
                                 int16_t* cth_Q15,     // Filter coefficients
                                 int16_t* sth_Q15,     // Filter coefficients
-                                int16_t order_coef) { // order of the filter
+                                size_t order_coef) { // order of the filter
   int n = 0;
 
   for (n = 0; n < HALF_SUBFRAMELEN - 1; n++) {
-    int count = order_coef - 1;
+    int count = (int)(order_coef - 1);
     int offset;
 #if !defined(MIPS_DSP_R1_LE)
     int16_t* tmp_cth;
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h b/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h
index da401e5..40f15c4 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h
@@ -39,7 +39,7 @@
 
 void WebRtcIsacfix_PitchFilterCore(int loopNumber,
                                    int16_t gain,
-                                   int index,
+                                   size_t index,
                                    int16_t sign,
                                    int16_t* inputState,
                                    int16_t* outputBuff2,
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter.c b/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter.c
index c787d6e..d73a429 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter.c
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter.c
@@ -34,8 +34,8 @@
   { 271, -743,  1570, -3320, 12963,  7301, -2292,  953, -325}
 };
 
-static __inline int32_t CalcLrIntQ(int32_t fixVal,
-                                   int16_t qDomain) {
+static __inline size_t CalcLrIntQ(int16_t fixVal,
+                                  int16_t qDomain) {
   int32_t roundVal = 1 << (qDomain - 1);
 
   return (fixVal + roundVal) >> qDomain;
@@ -55,7 +55,7 @@
   const int16_t Gain = 21299;     // 1.3 in Q14
   int16_t oldLagQ7;
   int16_t oldGainQ12, lagdeltaQ7, curLagQ7, gaindeltaQ12, curGainQ12;
-  int indW32 = 0, frcQQ = 0;
+  size_t indW32 = 0, frcQQ = 0;
   const int16_t* fracoeffQQ = NULL;
 
   // Assumptions in ARM assembly for WebRtcIsacfix_PitchFilterCoreARM().
@@ -141,13 +141,15 @@
                                     PitchFiltstr* pfp,
                                     int16_t* lagsQ7,
                                     int16_t* gainsQ12) {
-  int  k, n, m, ind, pos, pos3QQ;
+  int  k, n, m;
+  size_t ind, pos, pos3QQ;
 
   int16_t ubufQQ[PITCH_INTBUFFSIZE];
   int16_t oldLagQ7, lagdeltaQ7, curLagQ7;
   const int16_t* fracoeffQQ = NULL;
   int16_t scale;
-  int16_t cnt = 0, frcQQ, indW16 = 0, tmpW16;
+  int16_t cnt = 0, tmpW16;
+  size_t frcQQ, indW16 = 0;
   int32_t tmpW32, tmp2W32, csum1QQ, esumxQQ;
 
   // Set up buffer and states.
@@ -179,7 +181,7 @@
     for (cnt = 0; cnt < kSegments; cnt++) {
       // Update parameters for each segment.
       curLagQ7 += lagdeltaQ7;
-      indW16 = (int16_t)CalcLrIntQ(curLagQ7, 7);
+      indW16 = CalcLrIntQ(curLagQ7, 7);
       frcQQ = ((indW16 << 7) + 64 - curLagQ7) >> 4;
 
       if (frcQQ == PITCH_FRACS) {
@@ -202,7 +204,7 @@
 
         tmp2W32 = WEBRTC_SPL_MUL_16_32_RSFT14(indatQ0[ind], tmpW32);
         tmpW32 += 8192;
-        tmpW16 = (int16_t)(tmpW32 >> 14);
+        tmpW16 = tmpW32 >> 14;
         tmpW32 = tmpW16 * tmpW16;
 
         if ((tmp2W32 > 1073700000) || (csum1QQ > 1073700000) ||
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_armv6.S b/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_armv6.S
index 57796b0..10b9579 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_armv6.S
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_armv6.S
@@ -21,7 +21,7 @@
 
 @ void WebRtcIsacfix_PitchFilterCore(int loopNumber,
 @                                    int16_t gain,
-@                                    int index,
+@                                    size_t index,
 @                                    int16_t sign,
 @                                    int16_t* inputState,
 @                                    int16_t* outputBuf2,
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_c.c b/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_c.c
index 5c95678..366eef0 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_c.c
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_c.c
@@ -18,7 +18,7 @@
 
 void WebRtcIsacfix_PitchFilterCore(int loopNumber,
                                    int16_t gain,
-                                   int index,
+                                   size_t index,
                                    int16_t sign,
                                    int16_t* inputState,
                                    int16_t* outputBuf2,
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_mips.c b/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_mips.c
index 8334f7e..0f390b8 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_mips.c
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_mips.c
@@ -12,7 +12,7 @@
 
 void WebRtcIsacfix_PitchFilterCore(int loopNumber,
                                    int16_t gain,
-                                   int index,
+                                   size_t index,
                                    int16_t sign,
                                    int16_t* inputState,
                                    int16_t* outputBuf2,
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/structs.h b/webrtc/modules/audio_coding/codecs/isac/fix/source/structs.h
index 5abbd7a..278af75 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/structs.h
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/structs.h
@@ -34,7 +34,7 @@
   int16_t   full;             /* 0 - first byte in memory filled, second empty*/
   /* 1 - both bytes are empty (we just filled the previous memory */
 
-  int stream_size;  /* The size of stream. */
+  size_t stream_size;  /* The size of stream in bytes. */
 } Bitstr_dec;
 
 /* Bitstream struct for encoder */
@@ -178,8 +178,8 @@
   int16_t pitchCycles;
   int16_t A;
   int16_t B;
-  int16_t pitchIndex;
-  int16_t stretchLag;
+  size_t pitchIndex;
+  size_t stretchLag;
   int16_t *prevPitchLP;                                  // [ FRAMESAMPLES/2 ]; saved 240
   int16_t seed;
 
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc b/webrtc/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc
index 8f073ad..fc7588d 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc
@@ -26,8 +26,8 @@
   void SetUp() override;
   void TearDown() override;
   virtual float EncodeABlock(int16_t* in_data, uint8_t* bit_stream,
-                             int max_bytes, int* encoded_bytes);
-  virtual float DecodeABlock(const uint8_t* bit_stream, int encoded_bytes,
+                             size_t max_bytes, size_t* encoded_bytes);
+  virtual float DecodeABlock(const uint8_t* bit_stream, size_t encoded_bytes,
                              int16_t* out_data);
   ISACFIX_MainStruct *ISACFIX_main_inst_;
 };
@@ -43,7 +43,7 @@
   AudioCodecSpeedTest::SetUp();
 
   // Check whether the allocated buffer for the bit stream is large enough.
-  EXPECT_GE(max_bytes_, STREAM_MAXW16_60MS);
+  EXPECT_GE(max_bytes_, static_cast<size_t>(STREAM_MAXW16_60MS));
 
   // Create encoder memory.
   EXPECT_EQ(0, WebRtcIsacfix_Create(&ISACFIX_main_inst_));
@@ -61,7 +61,7 @@
 }
 
 float IsacSpeedTest::EncodeABlock(int16_t* in_data, uint8_t* bit_stream,
-                                  int max_bytes, int* encoded_bytes) {
+                                  size_t max_bytes, size_t* encoded_bytes) {
   // ISAC takes 10 ms everycall
   const int subblocks = block_duration_ms_ / 10;
   const int subblock_length = 10 * input_sampling_khz_;
@@ -78,13 +78,13 @@
       EXPECT_EQ(0, value);
   }
   clocks = clock() - clocks;
-  *encoded_bytes = value;
+  *encoded_bytes = static_cast<size_t>(value);
   assert(*encoded_bytes <= max_bytes);
   return 1000.0 * clocks / CLOCKS_PER_SEC;
 }
 
 float IsacSpeedTest::DecodeABlock(const uint8_t* bit_stream,
-                                  int encoded_bytes,
+                                  size_t encoded_bytes,
                                   int16_t* out_data) {
   int value;
   int16_t audio_type;
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/test/kenny.cc b/webrtc/modules/audio_coding/codecs/isac/fix/test/kenny.cc
index 2628f1f..6a947c8 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/test/kenny.cc
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/test/kenny.cc
@@ -50,7 +50,7 @@
 } BottleNeckModel;
 
 void get_arrival_time(int current_framesamples,   /* samples */
-                      int packet_size,            /* bytes */
+                      size_t packet_size,         /* bytes */
                       int bottleneck,             /* excluding headers; bits/s */
                       BottleNeckModel *BN_data)
 {
@@ -99,7 +99,8 @@
   FILE *inp, *outp, *f_bn, *outbits;
   int endfile;
 
-  int i, errtype, h = 0, k, packetLossPercent = 0;
+  size_t i;
+  int errtype, h = 0, k, packetLossPercent = 0;
   int16_t CodingMode;
   int16_t bottleneck;
   int framesize = 30;           /* ms */
@@ -108,14 +109,15 @@
   /* Runtime statistics */
   double starttime, runtime, length_file;
 
-  int16_t stream_len = 0;
+  int stream_len_int = 0;
+  size_t stream_len = 0;
   int16_t framecnt;
   int declen = 0;
   int16_t shortdata[FRAMESAMPLES_10ms];
   int16_t decoded[MAX_FRAMESAMPLES];
   uint16_t streamdata[500];
   int16_t speechType[1];
-  int16_t prevFrameSize = 1;
+  size_t prevFrameSize = 1;
   int16_t rateBPS = 0;
   int16_t fixedFL = 0;
   int16_t payloadSize = 0;
@@ -233,7 +235,7 @@
   CodingMode = 0;
   testNum = 0;
   testCE = 0;
-  for (i = 1; i + 2 < argc; i++) {
+  for (i = 1; i + 2 < static_cast<size_t>(argc); i++) {
     /* Instantaneous mode */
     if (!strcmp ("-I", argv[i])) {
       printf("\nInstantaneous BottleNeck\n");
@@ -565,19 +567,19 @@
           short bwe;
 
           /* Encode */
-          stream_len = WebRtcIsacfix_Encode(ISAC_main_inst,
-                                            shortdata,
-                                            (uint8_t*)streamdata);
+          stream_len_int = WebRtcIsacfix_Encode(ISAC_main_inst,
+                                                shortdata,
+                                                (uint8_t*)streamdata);
 
           /* If packet is ready, and CE testing, call the different API
              functions from the internal API. */
-          if (stream_len>0) {
+          if (stream_len_int>0) {
             if (testCE == 1) {
               err = WebRtcIsacfix_ReadBwIndex(
                   reinterpret_cast<const uint8_t*>(streamdata),
-                  stream_len,
+                  static_cast<size_t>(stream_len_int),
                   &bwe);
-              stream_len = WebRtcIsacfix_GetNewBitStream(
+              stream_len_int = WebRtcIsacfix_GetNewBitStream(
                   ISAC_main_inst,
                   bwe,
                   scale,
@@ -606,11 +608,11 @@
           }
         } else {
 #ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
-          stream_len = WebRtcIsacfix_EncodeNb(ISAC_main_inst,
-                                              shortdata,
-                                              streamdata);
+          stream_len_int = WebRtcIsacfix_EncodeNb(ISAC_main_inst,
+                                                  shortdata,
+                                                  streamdata);
 #else
-          stream_len = -1;
+          stream_len_int = -1;
 #endif
         }
       }
@@ -619,13 +621,14 @@
         break;
       }
 
-      if (stream_len < 0 || err < 0) {
+      if (stream_len_int < 0 || err < 0) {
         /* exit if returned with error */
         errtype=WebRtcIsacfix_GetErrorCode(ISAC_main_inst);
         printf("\nError in encoder: %d.\n", errtype);
       } else {
+        stream_len = static_cast<size_t>(stream_len_int);
         if (fwrite(streamdata, sizeof(char), stream_len, outbits) !=
-            (size_t)stream_len) {
+            stream_len) {
           return -1;
         }
       }
@@ -731,12 +734,12 @@
       /* iSAC decoding */
       if( lostFrame && framecnt >  0) {
         if (nbTest !=2) {
-          declen =
-              WebRtcIsacfix_DecodePlc(ISAC_main_inst, decoded, prevFrameSize);
+          declen = static_cast<int>(
+              WebRtcIsacfix_DecodePlc(ISAC_main_inst, decoded, prevFrameSize));
         } else {
 #ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
-          declen = WebRtcIsacfix_DecodePlcNb(
-              ISAC_main_inst, decoded, prevFrameSize);
+          declen = static_cast<int>(WebRtcIsacfix_DecodePlcNb(
+              ISAC_main_inst, decoded, prevFrameSize));
 #else
           declen = -1;
 #endif
@@ -744,7 +747,7 @@
         lostPackets++;
       } else {
         if (nbTest !=2 ) {
-          short FL;
+          size_t FL;
           /* Call getFramelen, only used here for function test */
           err = WebRtcIsacfix_ReadFrameLen(
               reinterpret_cast<const uint8_t*>(streamdata), stream_len, &FL);
@@ -755,11 +758,11 @@
               decoded,
               speechType);
           /* Error check */
-          if (err < 0 || declen < 0 || FL != declen) {
+          if (err < 0 || declen < 0 || FL != static_cast<size_t>(declen)) {
             errtype=WebRtcIsacfix_GetErrorCode(ISAC_main_inst);
             printf("\nError in decode_B/or getFrameLen: %d.\n", errtype);
           }
-          prevFrameSize = declen/480;
+          prevFrameSize = static_cast<size_t>(declen/480);
 
         } else {
 #ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
@@ -768,7 +771,7 @@
 #else
           declen = -1;
 #endif
-          prevFrameSize = static_cast<int16_t>(declen / 240);
+          prevFrameSize = static_cast<size_t>(declen / 240);
         }
       }
 
@@ -791,7 +794,7 @@
     framecnt++;
 
     totalsmpls += declen;
-    totalbits += 8 * stream_len;
+    totalbits += static_cast<int>(8 * stream_len);
 
     /* Error test number 10, garbage data */
     if (testNum == 10) {
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/test/test_iSACfixfloat.c b/webrtc/modules/audio_coding/codecs/isac/fix/test/test_iSACfixfloat.c
index 71bd272..b82af1c 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/test/test_iSACfixfloat.c
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/test/test_iSACfixfloat.c
@@ -21,6 +21,7 @@
 /* include API */
 #include "isac.h"
 #include "isacfix.h"
+#include "webrtc/base/format_macros.h"
 
 /* max number of samples per frame (= 60 ms frame) */
 #define MAX_FRAMESAMPLES 960
@@ -57,7 +58,7 @@
 } BottleNeckModel;
 
 void get_arrival_time(int current_framesamples, /* samples */
-                      int packet_size,          /* bytes */
+                      size_t packet_size,       /* bytes */
                       int bottleneck,           /* excluding headers; bits/s */
                       BottleNeckModel* BN_data) {
   const int HeaderSize = 35;
@@ -98,7 +99,7 @@
   double runtime;
   double length_file;
 
-  int16_t stream_len = 0;
+  size_t stream_len = 0;
   int declen;
 
   int16_t shortdata[FRAMESAMPLES_10ms];
@@ -114,7 +115,7 @@
 #ifdef _DEBUG
   FILE* fy;
   double kbps;
-  int totalbits = 0;
+  size_t totalbits = 0;
   int totalsmpls = 0;
 #endif /* _DEBUG */
 
@@ -392,6 +393,8 @@
   while (endfile == 0) {
     cur_framesmpls = 0;
     while (1) {
+      int stream_len_int;
+
       /* Read 10 ms speech block */
       if (nbTest != 1)
         endfile = readframe(shortdata, inp, FRAMESAMPLES_10ms);
@@ -401,9 +404,9 @@
       /* iSAC encoding */
 
       if (mode == 0 || mode == 1) {
-        stream_len =
+        stream_len_int =
             WebRtcIsac_Encode(ISAC_main_inst, shortdata, (uint8_t*)streamdata);
-        if (stream_len < 0) {
+        if (stream_len_int < 0) {
           /* exit if returned with error */
           errtype = WebRtcIsac_GetErrorCode(ISAC_main_inst);
           printf("\n\nError in encoder: %d.\n\n", errtype);
@@ -412,20 +415,21 @@
       } else if (mode == 2 || mode == 3) {
         /* iSAC encoding */
         if (nbTest != 1) {
-          stream_len = WebRtcIsacfix_Encode(ISACFIX_main_inst, shortdata,
-                                            (uint8_t*)streamdata);
+          stream_len_int = WebRtcIsacfix_Encode(ISACFIX_main_inst, shortdata,
+                                                (uint8_t*)streamdata);
         } else {
-          stream_len =
+          stream_len_int =
               WebRtcIsacfix_EncodeNb(ISACFIX_main_inst, shortdata, streamdata);
         }
 
-        if (stream_len < 0) {
+        if (stream_len_int < 0) {
           /* exit if returned with error */
           errtype = WebRtcIsacfix_GetErrorCode(ISACFIX_main_inst);
           printf("\n\nError in encoder: %d.\n\n", errtype);
           // exit(EXIT_FAILURE);
         }
       }
+      stream_len = (size_t)stream_len_int;
 
       cur_framesmpls += FRAMESAMPLES_10ms;
 
@@ -494,10 +498,13 @@
 
         /* iSAC decoding */
         if (plc && (framecnt + 1) % 10 == 0) {
-          if (nbTest != 2)
-            declen = WebRtcIsacfix_DecodePlc(ISACFIX_main_inst, decoded, 1);
-          else
-            declen = WebRtcIsacfix_DecodePlcNb(ISACFIX_main_inst, decoded, 1);
+          if (nbTest != 2) {
+            declen =
+                (int)WebRtcIsacfix_DecodePlc(ISACFIX_main_inst, decoded, 1);
+          } else {
+            declen =
+                (int)WebRtcIsacfix_DecodePlcNb(ISACFIX_main_inst, decoded, 1);
+          }
         } else {
           if (nbTest != 2)
             declen = WebRtcIsacfix_Decode(ISACFIX_main_inst, streamdata,
@@ -551,10 +558,13 @@
         /* iSAC decoding */
 
         if (plc && (framecnt + 1) % 10 == 0) {
-          if (nbTest != 2)
-            declen = WebRtcIsacfix_DecodePlc(ISACFIX_main_inst, decoded, 1);
-          else
-            declen = WebRtcIsacfix_DecodePlcNb(ISACFIX_main_inst, decoded, 1);
+          if (nbTest != 2) {
+            declen =
+                (int)WebRtcIsacfix_DecodePlc(ISACFIX_main_inst, decoded, 1);
+          } else {
+            declen =
+                (int)WebRtcIsacfix_DecodePlcNb(ISACFIX_main_inst, decoded, 1);
+          }
         } else {
           if (nbTest != 2) {
             declen = WebRtcIsacfix_Decode(ISACFIX_main_inst, streamdata,
@@ -592,7 +602,7 @@
   }
 
 #ifdef _DEBUG
-  printf("\n\ntotal bits               = %d bits", totalbits);
+  printf("\n\ntotal bits               = %" PRIuS " bits", totalbits);
   printf("\nmeasured average bitrate = %0.3f kbits/s",
          (double)totalbits * (FS / 1000) / totalsmpls);
   printf("\n");
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/interface/audio_encoder_isac.h b/webrtc/modules/audio_coding/codecs/isac/main/interface/audio_encoder_isac.h
index c0f3b11..1bfd149 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/interface/audio_encoder_isac.h
+++ b/webrtc/modules/audio_coding/codecs/isac/main/interface/audio_encoder_isac.h
@@ -39,14 +39,14 @@
   }
   static inline int DecodeInternal(instance_type* inst,
                                    const uint8_t* encoded,
-                                   int16_t len,
+                                   size_t len,
                                    int16_t* decoded,
                                    int16_t* speech_type) {
     return WebRtcIsac_Decode(inst, encoded, len, decoded, speech_type);
   }
-  static inline int16_t DecodePlc(instance_type* inst,
-                                  int16_t* decoded,
-                                  int16_t num_lost_frames) {
+  static inline size_t DecodePlc(instance_type* inst,
+                                 int16_t* decoded,
+                                 size_t num_lost_frames) {
     return WebRtcIsac_DecodePlc(inst, decoded, num_lost_frames);
   }
 
@@ -102,7 +102,7 @@
   }
   static inline int16_t UpdateBwEstimate(instance_type* inst,
                                          const uint8_t* encoded,
-                                         int32_t packet_size,
+                                         size_t packet_size,
                                          uint16_t rtp_seq_number,
                                          uint32_t send_ts,
                                          uint32_t arr_ts) {
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/interface/isac.h b/webrtc/modules/audio_coding/codecs/isac/main/interface/isac.h
index 429fc6b..0597de8 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/interface/isac.h
+++ b/webrtc/modules/audio_coding/codecs/isac/main/interface/isac.h
@@ -11,6 +11,8 @@
 #ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INTERFACE_ISAC_H_
 #define WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INTERFACE_ISAC_H_
 
+#include <stddef.h>
+
 #include "webrtc/modules/audio_coding/codecs/isac/bandwidth_info.h"
 #include "webrtc/typedefs.h"
 
@@ -186,7 +188,7 @@
   int16_t WebRtcIsac_UpdateBwEstimate(
       ISACStruct*         ISAC_main_inst,
       const uint8_t* encoded,
-      int32_t         packet_size,
+      size_t         packet_size,
       uint16_t        rtp_seq_number,
       uint32_t        send_ts,
       uint32_t        arr_ts);
@@ -215,7 +217,7 @@
   int WebRtcIsac_Decode(
       ISACStruct*           ISAC_main_inst,
       const uint8_t* encoded,
-      int16_t         len,
+      size_t         len,
       int16_t*        decoded,
       int16_t*        speechType);
 
@@ -235,14 +237,13 @@
    * Output:
    *        - decoded           : The decoded vector.
    *
-   * Return value               : >0 - number of samples in decoded PLC vector
-   *                              -1 - Error
+   * Return value               : Number of samples in decoded PLC vector
    */
 
-  int16_t WebRtcIsac_DecodePlc(
+  size_t WebRtcIsac_DecodePlc(
       ISACStruct*  ISAC_main_inst,
       int16_t* decoded,
-      int16_t  noOfLostFrames);
+      size_t  noOfLostFrames);
 
 
   /******************************************************************************
@@ -704,7 +705,7 @@
   int WebRtcIsac_DecodeRcu(
       ISACStruct*           ISAC_main_inst,
       const uint8_t* encoded,
-      int16_t         len,
+      size_t         len,
       int16_t*        decoded,
       int16_t*        speechType);
 
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.c b/webrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.c
index 940e8f5..51da3f7 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.c
+++ b/webrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.c
@@ -142,7 +142,7 @@
     const int32_t frame_length,
     const uint32_t send_ts,
     const uint32_t arr_ts,
-    const int32_t pksize
+    const size_t pksize
     /*,    const uint16_t Index*/)
 {
   float weight = 0.0f;
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h b/webrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h
index 2916876..0704337 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h
+++ b/webrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h
@@ -95,7 +95,7 @@
       const int32_t frame_length,
       const uint32_t send_ts,
       const uint32_t arr_ts,
-      const int32_t pksize);
+      const size_t pksize);
 
   /* Update receiving estimates. Used when we only receive BWE index, no iSAC data packet. */
   int16_t WebRtcIsac_UpdateUplinkBwImpl(
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/source/codec.h b/webrtc/modules/audio_coding/codecs/isac/main/source/codec.h
index 4b36fff..7ef64b5 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/source/codec.h
+++ b/webrtc/modules/audio_coding/codecs/isac/main/source/codec.h
@@ -25,7 +25,7 @@
 void WebRtcIsac_ResetBitstream(Bitstr* bit_stream);
 
 int WebRtcIsac_EstimateBandwidth(BwEstimatorstr* bwest_str, Bitstr* streamdata,
-                                 int32_t packet_size,
+                                 size_t packet_size,
                                  uint16_t rtp_seq_number,
                                  uint32_t send_ts, uint32_t arr_ts,
                                  enum IsacSamplingRate encoderSampRate,
@@ -195,14 +195,14 @@
 
 /******************************* filter functions ****************************/
 
-void WebRtcIsac_AllPoleFilter(double* InOut, double* Coef, int lengthInOut,
+void WebRtcIsac_AllPoleFilter(double* InOut, double* Coef, size_t lengthInOut,
                               int orderCoef);
 
-void WebRtcIsac_AllZeroFilter(double* In, double* Coef, int lengthInOut,
+void WebRtcIsac_AllZeroFilter(double* In, double* Coef, size_t lengthInOut,
                               int orderCoef, double* Out);
 
 void WebRtcIsac_ZeroPoleFilter(double* In, double* ZeroCoef, double* PoleCoef,
-                               int lengthInOut, int orderCoef, double* Out);
+                               size_t lengthInOut, int orderCoef, double* Out);
 
 
 /***************************** filterbank functions **************************/
@@ -228,6 +228,6 @@
 
 void WebRtcIsac_Dir2Lat(double* a, int orderCoef, float* sth, float* cth);
 
-void WebRtcIsac_AutoCorr(double* r, const double* x, int N, int order);
+void WebRtcIsac_AutoCorr(double* r, const double* x, size_t N, size_t order);
 
 #endif /* WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_CODEC_H_ */
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/source/decode_bwe.c b/webrtc/modules/audio_coding/codecs/isac/main/source/decode_bwe.c
index 5abe204..019cc89 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/source/decode_bwe.c
+++ b/webrtc/modules/audio_coding/codecs/isac/main/source/decode_bwe.c
@@ -18,7 +18,7 @@
 WebRtcIsac_EstimateBandwidth(
     BwEstimatorstr*           bwest_str,
     Bitstr*                   streamdata,
-    int32_t               packet_size,
+    size_t                packet_size,
     uint16_t              rtp_seq_number,
     uint32_t              send_ts,
     uint32_t              arr_ts,
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/source/filter_functions.c b/webrtc/modules/audio_coding/codecs/isac/main/source/filter_functions.c
index 089f26e..d47eb1f 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/source/filter_functions.c
+++ b/webrtc/modules/audio_coding/codecs/isac/main/source/filter_functions.c
@@ -19,12 +19,15 @@
 
 
 
-void WebRtcIsac_AllPoleFilter(double *InOut, double *Coef, int lengthInOut, int orderCoef){
-
+void WebRtcIsac_AllPoleFilter(double* InOut,
+                              double* Coef,
+                              size_t lengthInOut,
+                              int orderCoef) {
   /* the state of filter is assumed to be in InOut[-1] to InOut[-orderCoef] */
   double scal;
   double sum;
-  int n,k;
+  size_t n;
+  int k;
 
   //if (fabs(Coef[0]-1.0)<0.001) {
   if ( (Coef[0] > 0.9999) && (Coef[0] < 1.0001) )
@@ -53,11 +56,15 @@
 }
 
 
-void WebRtcIsac_AllZeroFilter(double *In, double *Coef, int lengthInOut, int orderCoef, double *Out){
-
+void WebRtcIsac_AllZeroFilter(double* In,
+                              double* Coef,
+                              size_t lengthInOut,
+                              int orderCoef,
+                              double* Out) {
   /* the state of filter is assumed to be in In[-1] to In[-orderCoef] */
 
-  int n, k;
+  size_t n;
+  int k;
   double tmp;
 
   for(n = 0; n < lengthInOut; n++)
@@ -74,9 +81,12 @@
 }
 
 
-
-void WebRtcIsac_ZeroPoleFilter(double *In, double *ZeroCoef, double *PoleCoef, int lengthInOut, int orderCoef, double *Out){
-
+void WebRtcIsac_ZeroPoleFilter(double* In,
+                               double* ZeroCoef,
+                               double* PoleCoef,
+                               size_t lengthInOut,
+                               int orderCoef,
+                               double* Out) {
   /* the state of the zero section is assumed to be in In[-1] to In[-orderCoef] */
   /* the state of the pole section is assumed to be in Out[-1] to Out[-orderCoef] */
 
@@ -85,14 +95,8 @@
 }
 
 
-void WebRtcIsac_AutoCorr(
-    double *r,
-    const double *x,
-    int N,
-    int order
-                        )
-{
-  int  lag, n;
+void WebRtcIsac_AutoCorr(double* r, const double* x, size_t N, size_t order) {
+  size_t  lag, n;
   double sum, prod;
   const double *x_lag;
 
@@ -112,8 +116,8 @@
 }
 
 
-void WebRtcIsac_BwExpand(double *out, double *in, double coef, short length) {
-  int i;
+void WebRtcIsac_BwExpand(double* out, double* in, double coef, size_t length) {
+  size_t i;
   double  chirp;
 
   chirp = coef;
@@ -125,8 +129,10 @@
   }
 }
 
-void WebRtcIsac_WeightingFilter(const double *in, double *weiout, double *whiout, WeightFiltstr *wfdata) {
-
+void WebRtcIsac_WeightingFilter(const double* in,
+                                double* weiout,
+                                double* whiout,
+                                WeightFiltstr* wfdata) {
   double  tmpbuffer[PITCH_FRAME_LEN + PITCH_WLPCBUFLEN];
   double  corr[PITCH_WLPCORDER+1], rc[PITCH_WLPCORDER+1];
   double apol[PITCH_WLPCORDER+1], apolr[PITCH_WLPCORDER+1];
@@ -195,15 +201,13 @@
 static const double APlower[ALLPASSSECTIONS] = {0.1544, 0.744};
 
 
-
-void WebRtcIsac_AllpassFilterForDec(double *InOut,
-                                   const double *APSectionFactors,
-                                   int lengthInOut,
-                                   double *FilterState)
-{
+void WebRtcIsac_AllpassFilterForDec(double* InOut,
+                                    const double* APSectionFactors,
+                                    size_t lengthInOut,
+                                    double* FilterState) {
   //This performs all-pass filtering--a series of first order all-pass sections are used
   //to filter the input in a cascade manner.
-  int n,j;
+  size_t n,j;
   double temp;
   for (j=0; j<ALLPASSSECTIONS; j++){
     for (n=0;n<lengthInOut;n+=2){
@@ -214,12 +218,11 @@
   }
 }
 
-void WebRtcIsac_DecimateAllpass(const double *in,
-                                double *state_in,        /* array of size: 2*ALLPASSSECTIONS+1 */
-                                int N,                   /* number of input samples */
-                                double *out)             /* array of size N/2 */
-{
-  int n;
+void WebRtcIsac_DecimateAllpass(const double* in,
+                                double* state_in,
+                                size_t N,
+                                double* out) {
+  size_t n;
   double data_vec[PITCH_FRAME_LEN];
 
   /* copy input */
@@ -237,7 +240,6 @@
 }
 
 
-
 /* create high-pass filter ocefficients
  * z = 0.998 * exp(j*2*pi*35/8000);
  * p = 0.94 * exp(j*2*pi*140/8000);
@@ -247,9 +249,11 @@
 static const double b_coef[2] = {-1.99524591718270,  0.99600400000000};
 
 /* second order high-pass filter */
-void WebRtcIsac_Highpass(const double *in, double *out, double *state, int N)
-{
-  int k;
+void WebRtcIsac_Highpass(const double* in,
+                         double* out,
+                         double* state,
+                         size_t N) {
+  size_t k;
 
   for (k=0; k<N; k++) {
     *out = *in + state[1];
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/source/isac.c b/webrtc/modules/audio_coding/codecs/isac/main/source/isac.c
index ac211e9..190277e 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/source/isac.c
+++ b/webrtc/modules/audio_coding/codecs/isac/main/source/isac.c
@@ -507,7 +507,7 @@
   int streamLenLB = 0;
   int streamLenUB = 0;
   int streamLen = 0;
-  int16_t k = 0;
+  size_t k = 0;
   uint8_t garbageLen = 0;
   int32_t bottleneck = 0;
   int16_t bottleneckIdx = 0;
@@ -528,12 +528,12 @@
 
   if (instISAC->in_sample_rate_hz == 48000) {
     /* Samples in 10 ms @ 48 kHz. */
-    const int kNumInputSamples = FRAMESAMPLES_10ms * 3;
+    const size_t kNumInputSamples = FRAMESAMPLES_10ms * 3;
     /* Samples 10 ms @ 32 kHz. */
-    const int kNumOutputSamples = FRAMESAMPLES_10ms * 2;
+    const size_t kNumOutputSamples = FRAMESAMPLES_10ms * 2;
     /* Resampler divide the input into blocks of 3 samples, i.e.
      * kNumInputSamples / 3. */
-    const int kNumResamplerBlocks = FRAMESAMPLES_10ms;
+    const size_t kNumResamplerBlocks = FRAMESAMPLES_10ms;
     int32_t buffer32[FRAMESAMPLES_10ms * 3 + SIZE_RESAMPLER_STATE];
 
     /* Restore last samples from the past to the beginning of the buffer
@@ -1006,7 +1006,7 @@
  */
 int16_t WebRtcIsac_UpdateBwEstimate(ISACStruct* ISAC_main_inst,
                                     const uint8_t* encoded,
-                                    int32_t packet_size,
+                                    size_t packet_size,
                                     uint16_t rtp_seq_number,
                                     uint32_t send_ts,
                                     uint32_t arr_ts) {
@@ -1056,7 +1056,7 @@
 
 static int Decode(ISACStruct* ISAC_main_inst,
                   const uint8_t* encoded,
-                  int16_t lenEncodedBytes,
+                  size_t lenEncodedBytes,
                   int16_t* decoded,
                   int16_t* speechType,
                   int16_t isRCUPayload) {
@@ -1069,13 +1069,14 @@
   float outFrame[MAX_FRAMESAMPLES];
   int16_t outFrameLB[MAX_FRAMESAMPLES];
   int16_t outFrameUB[MAX_FRAMESAMPLES];
-  int numDecodedBytesLB;
+  int numDecodedBytesLBint;
+  size_t numDecodedBytesLB;
   int numDecodedBytesUB;
-  int16_t lenEncodedLBBytes;
+  size_t lenEncodedLBBytes;
   int16_t validChecksum = 1;
   int16_t k;
   uint16_t numLayer;
-  int16_t totSizeBytes;
+  size_t totSizeBytes;
   int16_t err;
 
   ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
@@ -1089,7 +1090,7 @@
     return -1;
   }
 
-  if (lenEncodedBytes <= 0) {
+  if (lenEncodedBytes == 0) {
     /* return error code if the packet length is null. */
     instISAC->errorCode = ISAC_EMPTY_PACKET;
     return -1;
@@ -1115,11 +1116,12 @@
   /* Regardless of that the current codec is setup to work in
    * wideband or super-wideband, the decoding of the lower-band
    * has to be performed. */
-  numDecodedBytesLB = WebRtcIsac_DecodeLb(&instISAC->transform_tables,
-                                          outFrame, decInstLB,
-                                          &numSamplesLB, isRCUPayload);
-
-  if ((numDecodedBytesLB < 0) || (numDecodedBytesLB > lenEncodedLBBytes) ||
+  numDecodedBytesLBint = WebRtcIsac_DecodeLb(&instISAC->transform_tables,
+                                             outFrame, decInstLB,
+                                             &numSamplesLB, isRCUPayload);
+  numDecodedBytesLB = (size_t)numDecodedBytesLBint;
+  if ((numDecodedBytesLBint < 0) ||
+      (numDecodedBytesLB > lenEncodedLBBytes) ||
       (numSamplesLB > MAX_FRAMESAMPLES)) {
     instISAC->errorCode = ISAC_LENGTH_MISMATCH;
     return -1;
@@ -1362,7 +1364,7 @@
 
 int WebRtcIsac_Decode(ISACStruct* ISAC_main_inst,
                       const uint8_t* encoded,
-                      int16_t lenEncodedBytes,
+                      size_t lenEncodedBytes,
                       int16_t* decoded,
                       int16_t* speechType) {
   int16_t isRCUPayload = 0;
@@ -1394,7 +1396,7 @@
 
 int WebRtcIsac_DecodeRcu(ISACStruct* ISAC_main_inst,
                          const uint8_t* encoded,
-                         int16_t lenEncodedBytes,
+                         size_t lenEncodedBytes,
                          int16_t* decoded,
                          int16_t* speechType) {
   int16_t isRCUPayload = 1;
@@ -1417,13 +1419,12 @@
  * Output:
  *        - decoded           : The decoded vector
  *
- * Return value               : >0 - number of samples in decoded PLC vector
- *                              -1 - Error
+ * Return value               : Number of samples in decoded PLC vector
  */
-int16_t WebRtcIsac_DecodePlc(ISACStruct* ISAC_main_inst,
-                             int16_t* decoded,
-                             int16_t noOfLostFrames) {
-  int16_t numSamples = 0;
+size_t WebRtcIsac_DecodePlc(ISACStruct* ISAC_main_inst,
+                            int16_t* decoded,
+                            size_t noOfLostFrames) {
+  size_t numSamples = 0;
   ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
 
   /* Limit number of frames to two = 60 millisecond.
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/source/isac_unittest.cc b/webrtc/modules/audio_coding/codecs/isac/main/source/isac_unittest.cc
index a751c24..84c712e 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/source/isac_unittest.cc
+++ b/webrtc/modules/audio_coding/codecs/isac/main/source/isac_unittest.cc
@@ -97,10 +97,12 @@
   encoded_bytes =  WebRtcIsac_Encode(isac_codec_, speech_data_, bitstream_);
   EXPECT_EQ(0, encoded_bytes);
   encoded_bytes =  WebRtcIsac_Encode(isac_codec_, speech_data_, bitstream_);
+  EXPECT_GT(encoded_bytes, 0);
 
   // Call to update bandwidth estimator with real data.
   EXPECT_EQ(0, WebRtcIsac_UpdateBwEstimate(isac_codec_, bitstream_,
-                                           encoded_bytes, 1, 12345, 56789));
+                                           static_cast<size_t>(encoded_bytes),
+                                           1, 12345, 56789));
 
   // Free memory.
   EXPECT_EQ(0, WebRtcIsac_Free(isac_codec_));
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.c b/webrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.c
index 4708a5c..60fc25b 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.c
+++ b/webrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.c
@@ -75,11 +75,11 @@
   0.00155690, 0.00124918, 0.00094895, 0.00066112, 0.00039320, 0.00015881
 };
 
-double WebRtcIsac_LevDurb(double *a, double *k, double *r, int order)
+double WebRtcIsac_LevDurb(double *a, double *k, double *r, size_t order)
 {
 
   double sum, alpha;
-  int m, m_h, i;
+  size_t m, m_h, i;
   alpha = 0; //warning -DH
   a[0] = 1.0;
   if (r[0] < LEVINSON_EPS) { /* if r[0] <= 0, set LPC coeff. to zero */
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.h b/webrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.h
index 866c76d..8dfe383 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.h
+++ b/webrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.h
@@ -21,7 +21,7 @@
 #include "settings.h"
 #include "structs.h"
 
-double WebRtcIsac_LevDurb(double *a, double *k, double *r, int order);
+double WebRtcIsac_LevDurb(double *a, double *k, double *r, size_t order);
 
 void WebRtcIsac_GetVars(const double *input, const int16_t *pitchGains_Q12,
                        double *oldEnergy, double *varscale);
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/source/pitch_estimator.h b/webrtc/modules/audio_coding/codecs/isac/main/source/pitch_estimator.h
index f5d9356..6fb02b3 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/source/pitch_estimator.h
+++ b/webrtc/modules/audio_coding/codecs/isac/main/source/pitch_estimator.h
@@ -61,11 +61,15 @@
 
 void WebRtcIsac_WeightingFilter(const double *in, double *weiout, double *whiout, WeightFiltstr *wfdata);
 
-void WebRtcIsac_Highpass(const double *in, double *out, double *state, int N);
+void WebRtcIsac_Highpass(const double *in,
+                         double *out,
+                         double *state,
+                         size_t N);
 
 void WebRtcIsac_DecimateAllpass(const double *in,
-                                double *state_in,        /* array of size: 2*ALLPASSSECTIONS+1 */
-                                int N,                   /* number of input samples */
-                                double *out);            /* array of size N/2 */
+                                double *state_in,  /* array of size:
+                                                    *     2*ALLPASSSECTIONS+1 */
+                                size_t N,          /* number of input samples */
+                                double *out);      /* array of size N/2 */
 
 #endif /* WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_ESTIMATOR_H_ */
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc b/webrtc/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc
index 8584c76..d385ff4 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc
+++ b/webrtc/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc
@@ -21,6 +21,7 @@
 /* include API */
 #include "isac.h"
 #include "utility.h"
+#include "webrtc/base/format_macros.h"
 
 /* Defines */
 #define SEED_FILE "randseed.txt" /* Used when running decoder on garbage data */
@@ -42,7 +43,8 @@
   FILE* inp, *outp, * f_bn = NULL, * vadp = NULL, *bandwidthp;
   int framecnt, endfile;
 
-  int i, errtype, VADusage = 0, packetLossPercent = 0;
+  size_t i;
+  int errtype, VADusage = 0, packetLossPercent = 0;
   int16_t CodingMode;
   int32_t bottleneck = 0;
   int framesize = 30; /* ms */
@@ -51,7 +53,7 @@
   /* Runtime statistics */
   double starttime, runtime, length_file;
 
-  int16_t stream_len = 0;
+  size_t stream_len = 0;
   int declen = 0, declenTC = 0;
   bool lostFrame = false;
 
@@ -75,14 +77,14 @@
   FILE* fy;
   double kbps;
 #endif /* _DEBUG */
-  int totalbits = 0;
+  size_t totalbits = 0;
   int totalsmpls = 0;
 
   /* If use GNS file */
   FILE* fp_gns = NULL;
   char gns_file[100];
-  short maxStreamLen30 = 0;
-  short maxStreamLen60 = 0;
+  size_t maxStreamLen30 = 0;
+  size_t maxStreamLen60 = 0;
   short sampFreqKHz = 32;
   short samplesIn10Ms;
   short useAssign = 0;
@@ -90,10 +92,10 @@
   bool doTransCoding = false;
   int32_t rateTransCoding = 0;
   uint8_t streamDataTransCoding[1200];
-  int16_t streamLenTransCoding = 0;
+  size_t streamLenTransCoding = 0;
   FILE* transCodingFile = NULL;
   FILE* transcodingBitstream = NULL;
-  uint32_t numTransCodingBytes = 0;
+  size_t numTransCodingBytes = 0;
 
   /* only one structure used for ISAC encoder */
   ISACStruct* ISAC_main_inst = NULL;
@@ -185,7 +187,7 @@
   char transCodingFileName[500];
   int16_t totFileLoop = 0;
   int16_t numFileLoop = 0;
-  for (i = 1; i + 2 < argc; i++) {
+  for (i = 1; i + 2 < static_cast<size_t>(argc); i++) {
     if (!strcmp("-LOOP", argv[i])) {
       i++;
       totFileLoop = (int16_t)atol(argv[i]);
@@ -579,6 +581,8 @@
 
     cur_framesmpls = 0;
     while (1) {
+      int stream_len_int = 0;
+
       /* Read 10 ms speech block */
       endfile = readframe(shortdata, inp, samplesIn10Ms);
 
@@ -598,21 +602,21 @@
 
       /* iSAC encoding */
       if (!(testNum == 3 && framecnt == 0)) {
-        stream_len =
+        stream_len_int =
             WebRtcIsac_Encode(ISAC_main_inst, shortdata, (uint8_t*)streamdata);
-        if ((payloadSize != 0) && (stream_len > payloadSize)) {
+        if ((payloadSize != 0) && (stream_len_int > payloadSize)) {
           if (testNum == 0) {
             printf("\n\n");
           }
 
           printf("\nError: Streamsize out of range %d\n",
-                 stream_len - payloadSize);
+                 stream_len_int - payloadSize);
           cout << flush;
         }
 
         WebRtcIsac_GetUplinkBw(ISAC_main_inst, &sendBN);
 
-        if (stream_len > 0) {
+        if (stream_len_int > 0) {
           if (doTransCoding) {
             int16_t indexStream;
             uint8_t auxUW8;
@@ -620,13 +624,15 @@
             /******************** Main Transcoding stream ********************/
             WebRtcIsac_GetDownLinkBwIndex(ISAC_main_inst, &bnIdxTC,
                                           &jitterInfoTC);
-            streamLenTransCoding = WebRtcIsac_GetNewBitStream(
+            int streamLenTransCoding_int = WebRtcIsac_GetNewBitStream(
                 ISAC_main_inst, bnIdxTC, jitterInfoTC, rateTransCoding,
                 streamDataTransCoding, false);
-            if (streamLenTransCoding < 0) {
+            if (streamLenTransCoding_int < 0) {
               fprintf(stderr, "Error in trans-coding\n");
               exit(0);
             }
+            streamLenTransCoding =
+                static_cast<size_t>(streamLenTransCoding_int);
             auxUW8 = (uint8_t)(((streamLenTransCoding & 0xFF00) >> 8) & 0x00FF);
             if (fwrite(&auxUW8, sizeof(uint8_t), 1, transcodingBitstream) !=
                 1) {
@@ -641,7 +647,7 @@
 
             if (fwrite(streamDataTransCoding, sizeof(uint8_t),
                        streamLenTransCoding, transcodingBitstream) !=
-                static_cast<size_t>(streamLenTransCoding)) {
+                streamLenTransCoding) {
               return -1;
             }
 
@@ -659,13 +665,15 @@
         break;
       }
 
-      if (stream_len < 0) {
+      if (stream_len_int < 0) {
         /* exit if returned with error */
         errtype = WebRtcIsac_GetErrorCode(ISAC_main_inst);
         fprintf(stderr, "Error in encoder: %d.\n", errtype);
         cout << flush;
         exit(0);
       }
+      stream_len = static_cast<size_t>(stream_len_int);
+
       cur_framesmpls += samplesIn10Ms;
       /* exit encoder loop if the encoder returned a bitstream */
       if (stream_len != 0)
@@ -703,17 +711,24 @@
 
     // RED.
     if (lostFrame) {
-      stream_len = WebRtcIsac_GetRedPayload(
+      int stream_len_int = WebRtcIsac_GetRedPayload(
           ISAC_main_inst, reinterpret_cast<uint8_t*>(streamdata));
+      if (stream_len_int < 0) {
+        fprintf(stderr, "Error getting RED payload\n");
+        exit(0);
+      }
+      stream_len = static_cast<size_t>(stream_len_int);
 
       if (doTransCoding) {
-        streamLenTransCoding = WebRtcIsac_GetNewBitStream(
+        int streamLenTransCoding_int = WebRtcIsac_GetNewBitStream(
             ISAC_main_inst, bnIdxTC, jitterInfoTC, rateTransCoding,
             streamDataTransCoding, true);
-        if (streamLenTransCoding < 0) {
+        if (streamLenTransCoding_int < 0) {
           fprintf(stderr, "Error in RED trans-coding\n");
           exit(0);
         }
+        streamLenTransCoding =
+            static_cast<size_t>(streamLenTransCoding_int);
       }
     }
 
@@ -891,7 +906,7 @@
 #endif /* _DEBUG */
   }
   printf("\n");
-  printf("total bits               = %d bits\n", totalbits);
+  printf("total bits               = %" PRIuS " bits\n", totalbits);
   printf("measured average bitrate = %0.3f kbits/s\n",
          (double)totalbits * (sampFreqKHz) / totalsmpls);
   if (doTransCoding) {
@@ -910,11 +925,11 @@
          (100 * runtime / length_file));
 
   if (maxStreamLen30 != 0) {
-    printf("Maximum payload size 30ms Frames %d bytes (%0.3f kbps)\n",
+    printf("Maximum payload size 30ms Frames %" PRIuS " bytes (%0.3f kbps)\n",
            maxStreamLen30, maxStreamLen30 * 8 / 30.);
   }
   if (maxStreamLen60 != 0) {
-    printf("Maximum payload size 60ms Frames %d bytes (%0.3f kbps)\n",
+    printf("Maximum payload size 60ms Frames %" PRIuS " bytes (%0.3f kbps)\n",
            maxStreamLen60, maxStreamLen60 * 8 / 60.);
   }
   // fprintf(stderr, "\n");
@@ -923,12 +938,12 @@
   fprintf(stderr, "   %0.1f kbps",
           (double)totalbits * (sampFreqKHz) / totalsmpls);
   if (maxStreamLen30 != 0) {
-    fprintf(stderr, "   plmax-30ms %d bytes (%0.0f kbps)", maxStreamLen30,
-            maxStreamLen30 * 8 / 30.);
+    fprintf(stderr, "   plmax-30ms %" PRIuS " bytes (%0.0f kbps)",
+            maxStreamLen30, maxStreamLen30 * 8 / 30.);
   }
   if (maxStreamLen60 != 0) {
-    fprintf(stderr, "   plmax-60ms %d bytes (%0.0f kbps)", maxStreamLen60,
-            maxStreamLen60 * 8 / 60.);
+    fprintf(stderr, "   plmax-60ms %" PRIuS " bytes (%0.0f kbps)",
+            maxStreamLen60, maxStreamLen60 * 8 / 60.);
   }
   if (doTransCoding) {
     fprintf(stderr, "  transcoding rate %.0f kbps",
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/test/SwitchingSampRate/SwitchingSampRate.cc b/webrtc/modules/audio_coding/codecs/isac/main/test/SwitchingSampRate/SwitchingSampRate.cc
index a11e408..08061ac 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/test/SwitchingSampRate/SwitchingSampRate.cc
+++ b/webrtc/modules/audio_coding/codecs/isac/main/test/SwitchingSampRate/SwitchingSampRate.cc
@@ -51,9 +51,9 @@
 
   short clientCntr;
 
-  unsigned int lenEncodedInBytes[MAX_NUM_CLIENTS];
+  size_t lenEncodedInBytes[MAX_NUM_CLIENTS];
   unsigned int lenAudioIn10ms[MAX_NUM_CLIENTS];
-  unsigned int lenEncodedInBytesTmp[MAX_NUM_CLIENTS];
+  size_t lenEncodedInBytesTmp[MAX_NUM_CLIENTS];
   unsigned int lenAudioIn10msTmp[MAX_NUM_CLIENTS];
   BottleNeckModel* packetData[MAX_NUM_CLIENTS];
 
@@ -189,9 +189,9 @@
   }
 
 
-  short streamLen;
+  size_t streamLen;
   short numSamplesRead;
-  int lenDecodedAudio;
+  size_t lenDecodedAudio;
   short senderIdx;
   short receiverIdx;
 
@@ -282,11 +282,11 @@
       // Encode
 
 
-      streamLen = WebRtcIsac_Encode(codecInstance[senderIdx],
-                                    audioBuff10ms,
-                                    (uint8_t*)bitStream);
+      int streamLen_int = WebRtcIsac_Encode(codecInstance[senderIdx],
+                                            audioBuff10ms,
+                                            (uint8_t*)bitStream);
       int16_t ggg;
-      if (streamLen > 0) {
+      if (streamLen_int > 0) {
         if ((WebRtcIsac_ReadFrameLen(
                 codecInstance[receiverIdx],
                 reinterpret_cast<const uint8_t*>(bitStream),
@@ -295,11 +295,12 @@
       }
 
       // Sanity check
-      if(streamLen < 0)
+      if(streamLen_int < 0)
       {
         printf(" Encoder error in client %d \n", senderIdx + 1);
         return -1;
       }
+      streamLen = static_cast<size_t>(streamLen_int);
 
 
       if(streamLen > 0)
@@ -423,18 +424,18 @@
         }
         /**/
         // Decode
-        lenDecodedAudio = WebRtcIsac_Decode(
+        int lenDecodedAudio_int = WebRtcIsac_Decode(
             codecInstance[receiverIdx],
             reinterpret_cast<const uint8_t*>(bitStream),
             streamLen,
             audioBuff60ms,
             speechType);
-        if(lenDecodedAudio < 0)
+        if(lenDecodedAudio_int < 0)
         {
           printf(" Decoder error in client %d \n", receiverIdx + 1);
           return -1;
         }
-
+        lenDecodedAudio = static_cast<size_t>(lenDecodedAudio_int);
 
         if(encoderSampRate[senderIdx] == 16000)
         {
@@ -442,7 +443,7 @@
                                 resamplerState[receiverIdx]);
           if (fwrite(resampledAudio60ms, sizeof(short), lenDecodedAudio << 1,
                      outFile[receiverIdx]) !=
-              static_cast<size_t>(lenDecodedAudio << 1)) {
+              lenDecodedAudio << 1) {
             return -1;
           }
         }
@@ -450,7 +451,7 @@
         {
           if (fwrite(audioBuff60ms, sizeof(short), lenDecodedAudio,
                      outFile[receiverIdx]) !=
-              static_cast<size_t>(lenDecodedAudio)) {
+              lenDecodedAudio) {
             return -1;
           }
         }
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/test/simpleKenny.c b/webrtc/modules/audio_coding/codecs/isac/main/test/simpleKenny.c
index 214dccd..2f44ca8 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/test/simpleKenny.c
+++ b/webrtc/modules/audio_coding/codecs/isac/main/test/simpleKenny.c
@@ -26,6 +26,7 @@
 /* include API */
 #include "isac.h"
 #include "utility.h"
+#include "webrtc/base/format_macros.h"
 //#include "commonDefs.h"
 
 /* max number of samples per frame (= 60 ms frame) */
@@ -57,7 +58,7 @@
   /* Runtime statistics */
   double rate;
   double rateRCU;
-  unsigned long totalbits = 0;
+  size_t totalbits = 0;
   unsigned long totalBitsRCU = 0;
   unsigned long totalsmpls = 0;
 
@@ -72,7 +73,7 @@
   int32_t rateLimit;
   ISACStruct* ISAC_main_inst;
 
-  int16_t stream_len = 0;
+  size_t stream_len = 0;
   int declen = 0;
   int16_t err;
   int cur_framesmpls;
@@ -94,7 +95,7 @@
   FILE* averageFile;
   int sampFreqKHz;
   int samplesIn10Ms;
-  int16_t maxStreamLen = 0;
+  size_t maxStreamLen = 0;
   char histFileName[500];
   char averageFileName[500];
   unsigned int hist[600];
@@ -310,22 +311,22 @@
 
     if (onlyDecode) {
       uint8_t auxUW8;
-      size_t auxSizet;
       if (fread(&auxUW8, sizeof(uint8_t), 1, inp) < 1) {
         break;
       }
-      stream_len = ((uint8_t)auxUW8) << 8;
+      stream_len = auxUW8 << 8;
       if (fread(&auxUW8, sizeof(uint8_t), 1, inp) < 1) {
         break;
       }
-      stream_len |= (uint16_t)auxUW8;
-      auxSizet = (size_t)stream_len;
-      if (fread(payload, 1, auxSizet, inp) < auxSizet) {
+      stream_len |= auxUW8;
+      if (fread(payload, 1, stream_len, inp) < stream_len) {
         printf("last payload is corrupted\n");
         break;
       }
     } else {
       while (stream_len == 0) {
+        int stream_len_int;
+
         // Read 10 ms speech block
         endfile = readframe(shortdata, inp, samplesIn10Ms);
         if (endfile) {
@@ -334,15 +335,16 @@
         cur_framesmpls += samplesIn10Ms;
 
         //-------- iSAC encoding ---------
-        stream_len = WebRtcIsac_Encode(ISAC_main_inst, shortdata, payload);
+        stream_len_int = WebRtcIsac_Encode(ISAC_main_inst, shortdata, payload);
 
-        if (stream_len < 0) {
+        if (stream_len_int < 0) {
           // exit if returned with error
           // errType=WebRtcIsac_GetErrorCode(ISAC_main_inst);
           fprintf(stderr, "\nError in encoder\n");
           getc(stdin);
           exit(EXIT_FAILURE);
         }
+        stream_len = (size_t)stream_len_int;
       }
       //===================================================================
       if (endfile) {
@@ -396,15 +398,16 @@
       if (fwrite(&auxUW8, sizeof(uint8_t), 1, outp) != 1) {
         return -1;
       }
-      if (fwrite(payload, 1, stream_len, outp) != (size_t)stream_len) {
+      if (fwrite(payload, 1, stream_len, outp) != stream_len) {
         return -1;
       }
     } else {
       //======================= iSAC decoding ===========================
 
       if ((rand() % 100) < packetLossPercent) {
-        declen = WebRtcIsac_DecodeRcu(ISAC_main_inst, payloadRCU, rcuStreamLen,
-                                      decoded, speechType);
+        declen = WebRtcIsac_DecodeRcu(ISAC_main_inst, payloadRCU,
+                                      (size_t)rcuStreamLen, decoded,
+                                      speechType);
         lostPacketCntr++;
       } else {
         declen = WebRtcIsac_Decode(ISAC_main_inst, payload, stream_len, decoded,
@@ -458,7 +461,7 @@
   printf("\n");
   printf("Measured bit-rate........... %0.3f kbps\n", rate);
   printf("Measured RCU bit-ratre...... %0.3f kbps\n", rateRCU);
-  printf("Maximum bit-rate/payloadsize %0.3f / %d\n",
+  printf("Maximum bit-rate/payloadsize %0.3f / %" PRIuS "\n",
          maxStreamLen * 8 / 0.03, maxStreamLen);
   printf("Measured packet-loss........ %0.1f%% \n",
          100.0f * (float)lostPacketCntr / (float)packetCntr);
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/util/utility.c b/webrtc/modules/audio_coding/codecs/isac/main/util/utility.c
index 0a2256a..d9c4332 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/util/utility.c
+++ b/webrtc/modules/audio_coding/codecs/isac/main/util/utility.c
@@ -135,7 +135,7 @@
 void
 get_arrival_time(
     int              current_framesamples,   /* samples */
-    int              packet_size,            /* bytes */
+    size_t           packet_size,            /* bytes */
     int              bottleneck,             /* excluding headers; bits/s */
     BottleNeckModel* BN_data,
     short            senderSampFreqHz,
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/util/utility.h b/webrtc/modules/audio_coding/codecs/isac/main/util/utility.h
index f9fba94..1bb6d29 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/util/utility.h
+++ b/webrtc/modules/audio_coding/codecs/isac/main/util/utility.h
@@ -99,7 +99,7 @@
 
   void get_arrival_time(
       int              current_framesamples,   /* samples */
-      int              packet_size,            /* bytes */
+      size_t           packet_size,            /* bytes */
       int              bottleneck,             /* excluding headers; bits/s */
       BottleNeckModel* BN_data,
       short            senderSampFreqHz,
diff --git a/webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h b/webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h
index 18d4068..545fc19 100644
--- a/webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h
+++ b/webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h
@@ -24,8 +24,8 @@
   MOCK_CONST_METHOD0(SampleRateHz, int());
   MOCK_CONST_METHOD0(NumChannels, int());
   MOCK_CONST_METHOD0(MaxEncodedBytes, size_t());
-  MOCK_CONST_METHOD0(Num10MsFramesInNextPacket, int());
-  MOCK_CONST_METHOD0(Max10MsFramesInAPacket, int());
+  MOCK_CONST_METHOD0(Num10MsFramesInNextPacket, size_t());
+  MOCK_CONST_METHOD0(Max10MsFramesInAPacket, size_t());
   MOCK_CONST_METHOD0(GetTargetBitrate, int());
   MOCK_METHOD1(SetTargetBitrate, void(int));
   MOCK_METHOD1(SetProjectedPacketLossRate, void(double));
@@ -42,8 +42,8 @@
   MOCK_CONST_METHOD0(SampleRateHz, int());
   MOCK_CONST_METHOD0(NumChannels, int());
   MOCK_CONST_METHOD0(MaxEncodedBytes, size_t());
-  MOCK_CONST_METHOD0(Num10MsFramesInNextPacket, int());
-  MOCK_CONST_METHOD0(Max10MsFramesInAPacket, int());
+  MOCK_CONST_METHOD0(Num10MsFramesInNextPacket, size_t());
+  MOCK_CONST_METHOD0(Max10MsFramesInAPacket, size_t());
   MOCK_CONST_METHOD0(GetTargetBitrate, int());
   MOCK_METHOD1(SetTargetBitrate, void(int));
   MOCK_METHOD1(SetProjectedPacketLossRate, void(double));
diff --git a/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc b/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
index 9bf1ae3..37ce873 100644
--- a/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
+++ b/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
@@ -34,16 +34,6 @@
 // We always encode at 48 kHz.
 const int kSampleRateHz = 48000;
 
-int16_t ClampInt16(size_t x) {
-  return static_cast<int16_t>(
-      std::min(x, static_cast<size_t>(std::numeric_limits<int16_t>::max())));
-}
-
-int16_t CastInt16(size_t x) {
-  DCHECK_LE(x, static_cast<size_t>(std::numeric_limits<int16_t>::max()));
-  return static_cast<int16_t>(x);
-}
-
 }  // namespace
 
 AudioEncoderOpus::Config::Config()
@@ -72,13 +62,13 @@
 
 AudioEncoderOpus::AudioEncoderOpus(const Config& config)
     : num_10ms_frames_per_packet_(
-          rtc::CheckedDivExact(config.frame_size_ms, 10)),
+          static_cast<size_t>(rtc::CheckedDivExact(config.frame_size_ms, 10))),
       num_channels_(config.num_channels),
       payload_type_(config.payload_type),
       application_(config.application),
       dtx_enabled_(config.dtx_enabled),
-      samples_per_10ms_frame_(rtc::CheckedDivExact(kSampleRateHz, 100) *
-                              num_channels_),
+      samples_per_10ms_frame_(static_cast<size_t>(
+          rtc::CheckedDivExact(kSampleRateHz, 100) * num_channels_)),
       packet_loss_rate_(0.0) {
   CHECK(config.IsOk());
   input_buffer_.reserve(num_10ms_frames_per_packet_ * samples_per_10ms_frame_);
@@ -121,11 +111,11 @@
   return 2 * approx_encoded_bytes;
 }
 
-int AudioEncoderOpus::Num10MsFramesInNextPacket() const {
+size_t AudioEncoderOpus::Num10MsFramesInNextPacket() const {
   return num_10ms_frames_per_packet_;
 }
 
-int AudioEncoderOpus::Max10MsFramesInAPacket() const {
+size_t AudioEncoderOpus::Max10MsFramesInAPacket() const {
   return num_10ms_frames_per_packet_;
 }
 
@@ -195,18 +185,17 @@
     first_timestamp_in_buffer_ = rtp_timestamp;
   input_buffer_.insert(input_buffer_.end(), audio,
                        audio + samples_per_10ms_frame_);
-  if (input_buffer_.size() < (static_cast<size_t>(num_10ms_frames_per_packet_) *
-                              samples_per_10ms_frame_)) {
+  if (input_buffer_.size() <
+      (num_10ms_frames_per_packet_ * samples_per_10ms_frame_)) {
     return EncodedInfo();
   }
   CHECK_EQ(input_buffer_.size(),
-           static_cast<size_t>(num_10ms_frames_per_packet_) *
-           samples_per_10ms_frame_);
+           num_10ms_frames_per_packet_ * samples_per_10ms_frame_);
   int status = WebRtcOpus_Encode(
       inst_, &input_buffer_[0],
-      rtc::CheckedDivExact(CastInt16(input_buffer_.size()),
-                           static_cast<int16_t>(num_channels_)),
-      ClampInt16(max_encoded_bytes), encoded);
+      rtc::CheckedDivExact(input_buffer_.size(),
+                           static_cast<size_t>(num_channels_)),
+      max_encoded_bytes, encoded);
   CHECK_GE(status, 0);  // Fails only if fed invalid data.
   input_buffer_.clear();
   EncodedInfo info;
diff --git a/webrtc/modules/audio_coding/codecs/opus/interface/audio_encoder_opus.h b/webrtc/modules/audio_coding/codecs/opus/interface/audio_encoder_opus.h
index 3393bd5..5fab599 100644
--- a/webrtc/modules/audio_coding/codecs/opus/interface/audio_encoder_opus.h
+++ b/webrtc/modules/audio_coding/codecs/opus/interface/audio_encoder_opus.h
@@ -50,8 +50,8 @@
   int SampleRateHz() const override;
   int NumChannels() const override;
   size_t MaxEncodedBytes() const override;
-  int Num10MsFramesInNextPacket() const override;
-  int Max10MsFramesInAPacket() const override;
+  size_t Num10MsFramesInNextPacket() const override;
+  size_t Max10MsFramesInAPacket() const override;
   int GetTargetBitrate() const override;
   void SetTargetBitrate(int bits_per_second) override;
   void SetProjectedPacketLossRate(double fraction) override;
@@ -66,13 +66,13 @@
                              uint8_t* encoded) override;
 
  private:
-  const int num_10ms_frames_per_packet_;
+  const size_t num_10ms_frames_per_packet_;
   const int num_channels_;
   const int payload_type_;
   const ApplicationMode application_;
   int bitrate_bps_;
   const bool dtx_enabled_;
-  const int samples_per_10ms_frame_;
+  const size_t samples_per_10ms_frame_;
   std::vector<int16_t> input_buffer_;
   OpusEncInst* inst_;
   uint32_t first_timestamp_in_buffer_;
diff --git a/webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h b/webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h
index 925cd85..007f5c5 100644
--- a/webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h
+++ b/webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h
@@ -11,6 +11,8 @@
 #ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_INTERFACE_OPUS_INTERFACE_H_
 #define WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_INTERFACE_OPUS_INTERFACE_H_
 
+#include <stddef.h>
+
 #include "webrtc/typedefs.h"
 
 #ifdef __cplusplus
@@ -66,8 +68,8 @@
  */
 int WebRtcOpus_Encode(OpusEncInst* inst,
                       const int16_t* audio_in,
-                      int16_t samples,
-                      int16_t length_encoded_buffer,
+                      size_t samples,
+                      size_t length_encoded_buffer,
                       uint8_t* encoded);
 
 /****************************************************************************
@@ -237,7 +239,7 @@
  *                             -1 - Error
  */
 int WebRtcOpus_Decode(OpusDecInst* inst, const uint8_t* encoded,
-                      int16_t encoded_bytes, int16_t* decoded,
+                      size_t encoded_bytes, int16_t* decoded,
                       int16_t* audio_type);
 
 /****************************************************************************
@@ -276,7 +278,7 @@
  *                             -1 - Error
  */
 int WebRtcOpus_DecodeFec(OpusDecInst* inst, const uint8_t* encoded,
-                         int16_t encoded_bytes, int16_t* decoded,
+                         size_t encoded_bytes, int16_t* decoded,
                          int16_t* audio_type);
 
 /****************************************************************************
@@ -293,7 +295,7 @@
  */
 int WebRtcOpus_DurationEst(OpusDecInst* inst,
                            const uint8_t* payload,
-                           int payload_length_bytes);
+                           size_t payload_length_bytes);
 
 /* TODO(minyue): Check whether it is needed to add a decoder context to the
  * arguments, like WebRtcOpus_DurationEst(...). In fact, the packet itself tells
@@ -313,7 +315,7 @@
  *                                  0 - No FEC data in the packet.
  */
 int WebRtcOpus_FecDurationEst(const uint8_t* payload,
-                              int payload_length_bytes);
+                              size_t payload_length_bytes);
 
 /****************************************************************************
  * WebRtcOpus_PacketHasFec(...)
@@ -327,7 +329,7 @@
  *                                 1 - the packet contains FEC.
  */
 int WebRtcOpus_PacketHasFec(const uint8_t* payload,
-                            int payload_length_bytes);
+                            size_t payload_length_bytes);
 
 #ifdef __cplusplus
 }  // extern "C"
diff --git a/webrtc/modules/audio_coding/codecs/opus/opus_fec_test.cc b/webrtc/modules/audio_coding/codecs/opus/opus_fec_test.cc
index f0ef70a..c86fab7 100644
--- a/webrtc/modules/audio_coding/codecs/opus/opus_fec_test.cc
+++ b/webrtc/modules/audio_coding/codecs/opus/opus_fec_test.cc
@@ -45,15 +45,15 @@
 
   int block_duration_ms_;
   int sampling_khz_;
-  int block_length_sample_;
+  size_t block_length_sample_;
 
   int channels_;
   int bit_rate_;
 
   size_t data_pointer_;
   size_t loop_length_samples_;
-  int max_bytes_;
-  int encoded_bytes_;
+  size_t max_bytes_;
+  size_t encoded_bytes_;
 
   WebRtcOpusEncInst* opus_encoder_;
   WebRtcOpusDecInst* opus_decoder_;
@@ -122,7 +122,8 @@
 OpusFecTest::OpusFecTest()
     : block_duration_ms_(kOpusBlockDurationMs),
       sampling_khz_(kOpusSamplingKhz),
-      block_length_sample_(block_duration_ms_ * sampling_khz_),
+      block_length_sample_(
+          static_cast<size_t>(block_duration_ms_ * sampling_khz_)),
       data_pointer_(0),
       max_bytes_(0),
       encoded_bytes_(0),
@@ -137,7 +138,7 @@
                                 max_bytes_, &bit_stream_[0]);
   EXPECT_GT(value, 0);
 
-  encoded_bytes_ = value;
+  encoded_bytes_ = static_cast<size_t>(value);
 }
 
 void OpusFecTest::DecodeABlock(bool lost_previous, bool lost_current) {
@@ -154,14 +155,14 @@
     } else {
       value_1 = WebRtcOpus_DecodePlc(opus_decoder_, &out_data_[0], 1);
     }
-    EXPECT_EQ(block_length_sample_, value_1);
+    EXPECT_EQ(static_cast<int>(block_length_sample_), value_1);
   }
 
   if (!lost_current) {
     // Decode current frame.
     value_2 = WebRtcOpus_Decode(opus_decoder_, &bit_stream_[0], encoded_bytes_,
                                 &out_data_[value_1 * channels_], &audio_type);
-    EXPECT_EQ(block_length_sample_, value_2);
+    EXPECT_EQ(static_cast<int>(block_length_sample_), value_2);
   }
 }
 
diff --git a/webrtc/modules/audio_coding/codecs/opus/opus_interface.c b/webrtc/modules/audio_coding/codecs/opus/opus_interface.c
index e250616..e2a8383 100644
--- a/webrtc/modules/audio_coding/codecs/opus/opus_interface.c
+++ b/webrtc/modules/audio_coding/codecs/opus/opus_interface.c
@@ -80,8 +80,8 @@
 
 int WebRtcOpus_Encode(OpusEncInst* inst,
                       const int16_t* audio_in,
-                      int16_t samples,
-                      int16_t length_encoded_buffer,
+                      size_t samples,
+                      size_t length_encoded_buffer,
                       uint8_t* encoded) {
   int res;
 
@@ -91,9 +91,9 @@
 
   res = opus_encode(inst->encoder,
                     (const opus_int16*)audio_in,
-                    samples,
+                    (int)samples,
                     encoded,
-                    length_encoded_buffer);
+                    (opus_int32)length_encoded_buffer);
 
   if (res == 1) {
     // Indicates DTX since the packet has nothing but a header. In principle,
@@ -260,7 +260,7 @@
 }
 
 /* For decoder to determine if it is to output speech or comfort noise. */
-static int16_t DetermineAudioType(OpusDecInst* inst, int16_t encoded_bytes) {
+static int16_t DetermineAudioType(OpusDecInst* inst, size_t encoded_bytes) {
   // Audio type becomes comfort noise if |encoded_byte| is 1 and keeps
   // to be so if the following |encoded_byte| are 0 or 1.
   if (encoded_bytes == 0 && inst->in_dtx_mode) {
@@ -278,9 +278,9 @@
  * is set to the number of samples needed for PLC in case of losses.
  * It is up to the caller to make sure the value is correct. */
 static int DecodeNative(OpusDecInst* inst, const uint8_t* encoded,
-                        int16_t encoded_bytes, int frame_size,
+                        size_t encoded_bytes, int frame_size,
                         int16_t* decoded, int16_t* audio_type, int decode_fec) {
-  int res = opus_decode(inst->decoder, encoded, encoded_bytes,
+  int res = opus_decode(inst->decoder, encoded, (opus_int32)encoded_bytes,
                         (opus_int16*)decoded, frame_size, decode_fec);
 
   if (res <= 0)
@@ -292,7 +292,7 @@
 }
 
 int WebRtcOpus_Decode(OpusDecInst* inst, const uint8_t* encoded,
-                      int16_t encoded_bytes, int16_t* decoded,
+                      size_t encoded_bytes, int16_t* decoded,
                       int16_t* audio_type) {
   int decoded_samples;
 
@@ -340,7 +340,7 @@
 }
 
 int WebRtcOpus_DecodeFec(OpusDecInst* inst, const uint8_t* encoded,
-                         int16_t encoded_bytes, int16_t* decoded,
+                         size_t encoded_bytes, int16_t* decoded,
                          int16_t* audio_type) {
   int decoded_samples;
   int fec_samples;
@@ -362,9 +362,9 @@
 
 int WebRtcOpus_DurationEst(OpusDecInst* inst,
                            const uint8_t* payload,
-                           int payload_length_bytes) {
+                           size_t payload_length_bytes) {
   int frames, samples;
-  frames = opus_packet_get_nb_frames(payload, payload_length_bytes);
+  frames = opus_packet_get_nb_frames(payload, (opus_int32)payload_length_bytes);
   if (frames < 0) {
     /* Invalid payload data. */
     return 0;
@@ -378,7 +378,7 @@
 }
 
 int WebRtcOpus_FecDurationEst(const uint8_t* payload,
-                              int payload_length_bytes) {
+                              size_t payload_length_bytes) {
   int samples;
   if (WebRtcOpus_PacketHasFec(payload, payload_length_bytes) != 1) {
     return 0;
@@ -393,13 +393,13 @@
 }
 
 int WebRtcOpus_PacketHasFec(const uint8_t* payload,
-                            int payload_length_bytes) {
+                            size_t payload_length_bytes) {
   int frames, channels, payload_length_ms;
   int n;
   opus_int16 frame_sizes[48];
   const unsigned char *frame_data[48];
 
-  if (payload == NULL || payload_length_bytes <= 0)
+  if (payload == NULL || payload_length_bytes == 0)
     return 0;
 
   /* In CELT_ONLY mode, packets should not have FEC. */
@@ -432,8 +432,8 @@
   }
 
   /* The following is to parse the LBRR flags. */
-  if (opus_packet_parse(payload, payload_length_bytes, NULL, frame_data,
-                        frame_sizes, NULL) < 0) {
+  if (opus_packet_parse(payload, (opus_int32)payload_length_bytes, NULL,
+                        frame_data, frame_sizes, NULL) < 0) {
     return 0;
   }
 
diff --git a/webrtc/modules/audio_coding/codecs/opus/opus_speed_test.cc b/webrtc/modules/audio_coding/codecs/opus/opus_speed_test.cc
index b39de49..926bcaf 100644
--- a/webrtc/modules/audio_coding/codecs/opus/opus_speed_test.cc
+++ b/webrtc/modules/audio_coding/codecs/opus/opus_speed_test.cc
@@ -24,8 +24,8 @@
   void SetUp() override;
   void TearDown() override;
   virtual float EncodeABlock(int16_t* in_data, uint8_t* bit_stream,
-                             int max_bytes, int* encoded_bytes);
-  virtual float DecodeABlock(const uint8_t* bit_stream, int encoded_bytes,
+                             size_t max_bytes, size_t* encoded_bytes);
+  virtual float DecodeABlock(const uint8_t* bit_stream, size_t encoded_bytes,
                              int16_t* out_data);
   WebRtcOpusEncInst* opus_encoder_;
   WebRtcOpusDecInst* opus_decoder_;
@@ -58,19 +58,19 @@
 }
 
 float OpusSpeedTest::EncodeABlock(int16_t* in_data, uint8_t* bit_stream,
-                                  int max_bytes, int* encoded_bytes) {
+                                  size_t max_bytes, size_t* encoded_bytes) {
   clock_t clocks = clock();
   int value = WebRtcOpus_Encode(opus_encoder_, in_data,
                                 input_length_sample_, max_bytes,
                                 bit_stream);
   clocks = clock() - clocks;
   EXPECT_GT(value, 0);
-  *encoded_bytes = value;
+  *encoded_bytes = static_cast<size_t>(value);
   return 1000.0 * clocks / CLOCKS_PER_SEC;
 }
 
 float OpusSpeedTest::DecodeABlock(const uint8_t* bit_stream,
-                                  int encoded_bytes, int16_t* out_data) {
+                                  size_t encoded_bytes, int16_t* out_data) {
   int value;
   int16_t audio_type;
   clock_t clocks = clock();
diff --git a/webrtc/modules/audio_coding/codecs/opus/opus_unittest.cc b/webrtc/modules/audio_coding/codecs/opus/opus_unittest.cc
index e218a6b..2208f74 100644
--- a/webrtc/modules/audio_coding/codecs/opus/opus_unittest.cc
+++ b/webrtc/modules/audio_coding/codecs/opus/opus_unittest.cc
@@ -25,11 +25,11 @@
 // Maximum number of bytes in output bitstream.
 const size_t kMaxBytes = 1000;
 // Sample rate of Opus.
-const int kOpusRateKhz = 48;
+const size_t kOpusRateKhz = 48;
 // Number of samples-per-channel in a 20 ms frame, sampled at 48 kHz.
-const int kOpus20msFrameSamples = kOpusRateKhz * 20;
+const size_t kOpus20msFrameSamples = kOpusRateKhz * 20;
 // Number of samples-per-channel in a 10 ms frame, sampled at 48 kHz.
-const int kOpus10msFrameSamples = kOpusRateKhz * 10;
+const size_t kOpus10msFrameSamples = kOpusRateKhz * 10;
 
 class OpusTest : public TestWithParam<::testing::tuple<int, int>> {
  protected:
@@ -45,7 +45,7 @@
 
   int EncodeDecode(WebRtcOpusEncInst* encoder,
                    const int16_t* input_audio,
-                   int input_samples,
+                   size_t input_samples,
                    WebRtcOpusDecInst* decoder,
                    int16_t* output_audio,
                    int16_t* audio_type);
@@ -58,7 +58,7 @@
 
   AudioLoop speech_data_;
   uint8_t bitstream_[kMaxBytes];
-  int encoded_bytes_;
+  size_t encoded_bytes_;
   int channels_;
   int application_;
 };
@@ -97,15 +97,14 @@
 
 int OpusTest::EncodeDecode(WebRtcOpusEncInst* encoder,
                            const int16_t* input_audio,
-                           int input_samples,
+                           size_t input_samples,
                            WebRtcOpusDecInst* decoder,
                            int16_t* output_audio,
                            int16_t* audio_type) {
-  encoded_bytes_ = WebRtcOpus_Encode(encoder,
-                                    input_audio,
-                                    input_samples, kMaxBytes,
-                                    bitstream_);
-  EXPECT_GE(encoded_bytes_, 0);
+  int encoded_bytes_int = WebRtcOpus_Encode(encoder, input_audio, input_samples,
+                                            kMaxBytes, bitstream_);
+  EXPECT_GE(encoded_bytes_int, 0);
+  encoded_bytes_ = static_cast<size_t>(encoded_bytes_int);
   return WebRtcOpus_Decode(decoder, bitstream_,
                            encoded_bytes_, output_audio,
                            audio_type);
@@ -139,13 +138,14 @@
 
   for (int i = 0; i < 100; ++i) {
     EXPECT_EQ(kOpus20msFrameSamples,
-              EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(),
-                           kOpus20msFrameSamples, opus_decoder_,
-                           output_data_decode, &audio_type));
+              static_cast<size_t>(EncodeDecode(
+                  opus_encoder_, speech_data_.GetNextBlock(),
+                  kOpus20msFrameSamples, opus_decoder_, output_data_decode,
+                  &audio_type)));
     // If not DTX, it should never enter DTX mode. If DTX, we do not care since
     // whether it enters DTX depends on the signal type.
     if (!dtx) {
-      EXPECT_GT(encoded_bytes_, 1);
+      EXPECT_GT(encoded_bytes_, 1U);
       EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
       EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
       EXPECT_EQ(0, audio_type);  // Speech.
@@ -156,11 +156,11 @@
   // However, DTX may happen after a while.
   for (int i = 0; i < 30; ++i) {
     EXPECT_EQ(kOpus20msFrameSamples,
-              EncodeDecode(opus_encoder_, silence,
-                           kOpus20msFrameSamples, opus_decoder_,
-                           output_data_decode, &audio_type));
+              static_cast<size_t>(EncodeDecode(
+                  opus_encoder_, silence, kOpus20msFrameSamples, opus_decoder_,
+                  output_data_decode, &audio_type)));
     if (!dtx) {
-      EXPECT_GT(encoded_bytes_, 1);
+      EXPECT_GT(encoded_bytes_, 1U);
       EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
       EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
       EXPECT_EQ(0, audio_type);  // Speech.
@@ -180,17 +180,17 @@
     // DTX mode is maintained 19 frames.
     for (int i = 0; i < 19; ++i) {
       EXPECT_EQ(kOpus20msFrameSamples,
-                EncodeDecode(opus_encoder_, silence,
-                             kOpus20msFrameSamples, opus_decoder_,
-                             output_data_decode, &audio_type));
+                static_cast<size_t>(EncodeDecode(
+                    opus_encoder_, silence, kOpus20msFrameSamples,
+                    opus_decoder_, output_data_decode, &audio_type)));
       if (dtx) {
-        EXPECT_EQ(0, encoded_bytes_)  // Send 0 byte.
+        EXPECT_EQ(0U, encoded_bytes_)  // Send 0 byte.
             << "Opus should have entered DTX mode.";
         EXPECT_EQ(1, opus_encoder_->in_dtx_mode);
         EXPECT_EQ(1, opus_decoder_->in_dtx_mode);
         EXPECT_EQ(2, audio_type);  // Comfort noise.
       } else {
-        EXPECT_GT(encoded_bytes_, 1);
+        EXPECT_GT(encoded_bytes_, 1U);
         EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
         EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
         EXPECT_EQ(0, audio_type);  // Speech.
@@ -199,27 +199,27 @@
 
     // Quit DTX after 19 frames.
     EXPECT_EQ(kOpus20msFrameSamples,
-              EncodeDecode(opus_encoder_, silence,
-                           kOpus20msFrameSamples, opus_decoder_,
-                           output_data_decode, &audio_type));
+              static_cast<size_t>(EncodeDecode(
+                  opus_encoder_, silence, kOpus20msFrameSamples, opus_decoder_,
+                  output_data_decode, &audio_type)));
 
-    EXPECT_GT(encoded_bytes_, 1);
+    EXPECT_GT(encoded_bytes_, 1U);
     EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
     EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
     EXPECT_EQ(0, audio_type);  // Speech.
 
     // Enters DTX again immediately.
     EXPECT_EQ(kOpus20msFrameSamples,
-              EncodeDecode(opus_encoder_, silence,
-                           kOpus20msFrameSamples, opus_decoder_,
-                           output_data_decode, &audio_type));
+              static_cast<size_t>(EncodeDecode(
+                  opus_encoder_, silence, kOpus20msFrameSamples, opus_decoder_,
+                  output_data_decode, &audio_type)));
     if (dtx) {
-      EXPECT_EQ(1, encoded_bytes_);  // Send 1 byte.
+      EXPECT_EQ(1U, encoded_bytes_);  // Send 1 byte.
       EXPECT_EQ(1, opus_encoder_->in_dtx_mode);
       EXPECT_EQ(1, opus_decoder_->in_dtx_mode);
       EXPECT_EQ(2, audio_type);  // Comfort noise.
     } else {
-      EXPECT_GT(encoded_bytes_, 1);
+      EXPECT_GT(encoded_bytes_, 1U);
       EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
       EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
       EXPECT_EQ(0, audio_type);  // Speech.
@@ -230,10 +230,10 @@
   if (dtx) {
     // Verify that encoder/decoder can jump out from DTX mode.
     EXPECT_EQ(kOpus20msFrameSamples,
-              EncodeDecode(opus_encoder_, silence,
-                           kOpus20msFrameSamples, opus_decoder_,
-                           output_data_decode, &audio_type));
-    EXPECT_GT(encoded_bytes_, 1);
+              static_cast<size_t>(EncodeDecode(
+                  opus_encoder_, silence, kOpus20msFrameSamples, opus_decoder_,
+                  output_data_decode, &audio_type)));
+    EXPECT_GT(encoded_bytes_, 1U);
     EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
     EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
     EXPECT_EQ(0, audio_type);  // Speech.
@@ -311,9 +311,10 @@
   int16_t audio_type;
   int16_t* output_data_decode = new int16_t[kOpus20msFrameSamples * channels_];
   EXPECT_EQ(kOpus20msFrameSamples,
-            EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(),
-                         kOpus20msFrameSamples, opus_decoder_,
-                         output_data_decode, &audio_type));
+            static_cast<size_t>(EncodeDecode(
+                opus_encoder_, speech_data_.GetNextBlock(),
+                kOpus20msFrameSamples, opus_decoder_, output_data_decode,
+                &audio_type)));
 
   // Free memory.
   delete[] output_data_decode;
@@ -370,16 +371,17 @@
   int16_t audio_type;
   int16_t* output_data_decode = new int16_t[kOpus20msFrameSamples * channels_];
   EXPECT_EQ(kOpus20msFrameSamples,
-            EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(),
-                         kOpus20msFrameSamples, opus_decoder_,
-                         output_data_decode, &audio_type));
+            static_cast<size_t>(EncodeDecode(
+                opus_encoder_, speech_data_.GetNextBlock(),
+                kOpus20msFrameSamples, opus_decoder_, output_data_decode,
+                &audio_type)));
 
   EXPECT_EQ(0, WebRtcOpus_DecoderInit(opus_decoder_));
 
   EXPECT_EQ(kOpus20msFrameSamples,
-            WebRtcOpus_Decode(opus_decoder_, bitstream_,
-                              encoded_bytes_, output_data_decode,
-                              &audio_type));
+            static_cast<size_t>(WebRtcOpus_Decode(
+                opus_decoder_, bitstream_, encoded_bytes_, output_data_decode,
+                &audio_type)));
 
   // Free memory.
   delete[] output_data_decode;
@@ -508,14 +510,16 @@
   int16_t audio_type;
   int16_t* output_data_decode = new int16_t[kOpus20msFrameSamples * channels_];
   EXPECT_EQ(kOpus20msFrameSamples,
-            EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(),
-                         kOpus20msFrameSamples, opus_decoder_,
-                         output_data_decode, &audio_type));
+            static_cast<size_t>(EncodeDecode(
+                opus_encoder_, speech_data_.GetNextBlock(),
+                kOpus20msFrameSamples, opus_decoder_, output_data_decode,
+                &audio_type)));
 
   // Call decoder PLC.
   int16_t* plc_buffer = new int16_t[kOpus20msFrameSamples * channels_];
   EXPECT_EQ(kOpus20msFrameSamples,
-            WebRtcOpus_DecodePlc(opus_decoder_, plc_buffer, 1));
+            static_cast<size_t>(WebRtcOpus_DecodePlc(
+                opus_decoder_, plc_buffer, 1)));
 
   // Free memory.
   delete[] plc_buffer;
@@ -535,24 +539,26 @@
   EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_decoder_, channels_));
 
   // 10 ms. We use only first 10 ms of a 20 ms block.
-  encoded_bytes_ = WebRtcOpus_Encode(opus_encoder_,
-                                     speech_data_.GetNextBlock(),
-                                     kOpus10msFrameSamples, kMaxBytes,
-                                     bitstream_);
-  EXPECT_GE(encoded_bytes_, 0);
+  int encoded_bytes_int = WebRtcOpus_Encode(opus_encoder_,
+                                            speech_data_.GetNextBlock(),
+                                            kOpus10msFrameSamples,
+                                            kMaxBytes, bitstream_);
+  EXPECT_GE(encoded_bytes_int, 0);
   EXPECT_EQ(kOpus10msFrameSamples,
-            WebRtcOpus_DurationEst(opus_decoder_, bitstream_,
-                                   encoded_bytes_));
+            static_cast<size_t>(WebRtcOpus_DurationEst(
+                opus_decoder_, bitstream_,
+                static_cast<size_t>(encoded_bytes_int))));
 
   // 20 ms
-  encoded_bytes_ = WebRtcOpus_Encode(opus_encoder_,
-                                     speech_data_.GetNextBlock(),
-                                     kOpus20msFrameSamples, kMaxBytes,
-                                     bitstream_);
-  EXPECT_GE(encoded_bytes_, 0);
+  encoded_bytes_int = WebRtcOpus_Encode(opus_encoder_,
+                                        speech_data_.GetNextBlock(),
+                                        kOpus20msFrameSamples,
+                                        kMaxBytes, bitstream_);
+  EXPECT_GE(encoded_bytes_int, 0);
   EXPECT_EQ(kOpus20msFrameSamples,
-            WebRtcOpus_DurationEst(opus_decoder_, bitstream_,
-                                   encoded_bytes_));
+            static_cast<size_t>(WebRtcOpus_DurationEst(
+                opus_decoder_, bitstream_,
+                static_cast<size_t>(encoded_bytes_int))));
 
   // Free memory.
   EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
@@ -595,11 +601,13 @@
   encoded_bytes_ = opus_repacketizer_out(rp, bitstream_, kMaxBytes);
 
   EXPECT_EQ(kOpus20msFrameSamples * kPackets,
-            WebRtcOpus_DurationEst(opus_decoder_, bitstream_, encoded_bytes_));
+            static_cast<size_t>(WebRtcOpus_DurationEst(
+                opus_decoder_, bitstream_, encoded_bytes_)));
 
   EXPECT_EQ(kOpus20msFrameSamples * kPackets,
-            WebRtcOpus_Decode(opus_decoder_, bitstream_, encoded_bytes_,
-                              output_data_decode.get(), &audio_type));
+            static_cast<size_t>(WebRtcOpus_Decode(
+                opus_decoder_, bitstream_, encoded_bytes_,
+                output_data_decode.get(), &audio_type)));
 
   // Free memory.
   opus_repacketizer_destroy(rp);
diff --git a/webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.cc b/webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.cc
index 0c246c3..4ca6fe9 100644
--- a/webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.cc
+++ b/webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.cc
@@ -23,10 +23,10 @@
   return AudioEncoderPcm::Config::IsOk();
 }
 
-int16_t AudioEncoderPcm16B::EncodeCall(const int16_t* audio,
-                                       size_t input_len,
-                                       uint8_t* encoded) {
-  return WebRtcPcm16b_Encode(audio, static_cast<int16_t>(input_len), encoded);
+size_t AudioEncoderPcm16B::EncodeCall(const int16_t* audio,
+                                      size_t input_len,
+                                      uint8_t* encoded) {
+  return WebRtcPcm16b_Encode(audio, input_len, encoded);
 }
 
 int AudioEncoderPcm16B::BytesPerSample() const {
diff --git a/webrtc/modules/audio_coding/codecs/pcm16b/include/audio_encoder_pcm16b.h b/webrtc/modules/audio_coding/codecs/pcm16b/include/audio_encoder_pcm16b.h
index f02cf92..6a0fb43 100644
--- a/webrtc/modules/audio_coding/codecs/pcm16b/include/audio_encoder_pcm16b.h
+++ b/webrtc/modules/audio_coding/codecs/pcm16b/include/audio_encoder_pcm16b.h
@@ -31,9 +31,9 @@
       : AudioEncoderPcm(config, config.sample_rate_hz) {}
 
  protected:
-  int16_t EncodeCall(const int16_t* audio,
-                     size_t input_len,
-                     uint8_t* encoded) override;
+  size_t EncodeCall(const int16_t* audio,
+                    size_t input_len,
+                    uint8_t* encoded) override;
 
   int BytesPerSample() const override;
 };
diff --git a/webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h b/webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h
index 1cdf92d..d65d08a 100644
--- a/webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h
+++ b/webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h
@@ -14,6 +14,8 @@
  * Define the fixpoint numeric formats
  */
 
+#include <stddef.h>
+
 #include "webrtc/typedefs.h"
 
 #ifdef __cplusplus
@@ -36,9 +38,9 @@
  *                                Always equal to twice the len input parameter.
  */
 
-int16_t WebRtcPcm16b_Encode(const int16_t* speech,
-                            int16_t len,
-                            uint8_t* encoded);
+size_t WebRtcPcm16b_Encode(const int16_t* speech,
+                           size_t len,
+                           uint8_t* encoded);
 
 /****************************************************************************
  * WebRtcPcm16b_Decode(...)
@@ -55,9 +57,9 @@
  * Returned value               : Samples in speech
  */
 
-int16_t WebRtcPcm16b_Decode(const uint8_t* encoded,
-                            int16_t len,
-                            int16_t* speech);
+size_t WebRtcPcm16b_Decode(const uint8_t* encoded,
+                           size_t len,
+                           int16_t* speech);
 
 #ifdef __cplusplus
 }
diff --git a/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.c b/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.c
index b6de0b5..120c790 100644
--- a/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.c
+++ b/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.c
@@ -12,10 +12,10 @@
 
 #include "webrtc/typedefs.h"
 
-int16_t WebRtcPcm16b_Encode(const int16_t* speech,
-                            int16_t len,
-                            uint8_t* encoded) {
-  int i;
+size_t WebRtcPcm16b_Encode(const int16_t* speech,
+                           size_t len,
+                           uint8_t* encoded) {
+  size_t i;
   for (i = 0; i < len; ++i) {
     uint16_t s = speech[i];
     encoded[2 * i] = s >> 8;
@@ -24,10 +24,10 @@
   return 2 * len;
 }
 
-int16_t WebRtcPcm16b_Decode(const uint8_t* encoded,
-                            int16_t len,
-                            int16_t* speech) {
-  int i;
+size_t WebRtcPcm16b_Decode(const uint8_t* encoded,
+                           size_t len,
+                           int16_t* speech) {
+  size_t i;
   for (i = 0; i < len / 2; ++i)
     speech[i] = encoded[2 * i] << 8 | encoded[2 * i + 1];
   return len / 2;
diff --git a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc
index 16ba290..dccaf43 100644
--- a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc
+++ b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc
@@ -41,11 +41,11 @@
   return 2 * speech_encoder_->MaxEncodedBytes();
 }
 
-int AudioEncoderCopyRed::Num10MsFramesInNextPacket() const {
+size_t AudioEncoderCopyRed::Num10MsFramesInNextPacket() const {
   return speech_encoder_->Num10MsFramesInNextPacket();
 }
 
-int AudioEncoderCopyRed::Max10MsFramesInAPacket() const {
+size_t AudioEncoderCopyRed::Max10MsFramesInAPacket() const {
   return speech_encoder_->Max10MsFramesInAPacket();
 }
 
diff --git a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h
index 78e1e9a..644255b 100644
--- a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h
+++ b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h
@@ -40,8 +40,8 @@
   int NumChannels() const override;
   size_t MaxEncodedBytes() const override;
   int RtpTimestampRateHz() const override;
-  int Num10MsFramesInNextPacket() const override;
-  int Max10MsFramesInAPacket() const override;
+  size_t Num10MsFramesInNextPacket() const override;
+  size_t Max10MsFramesInAPacket() const override;
   int GetTargetBitrate() const override;
   void SetTargetBitrate(int bits_per_second) override;
   void SetProjectedPacketLossRate(double fraction) override;
diff --git a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc
index 4debdfa..a1ddf4b 100644
--- a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc
+++ b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc
@@ -113,13 +113,13 @@
 }
 
 TEST_F(AudioEncoderCopyRedTest, CheckFrameSizePropagation) {
-  EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(17));
-  EXPECT_EQ(17, red_->Num10MsFramesInNextPacket());
+  EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(17U));
+  EXPECT_EQ(17U, red_->Num10MsFramesInNextPacket());
 }
 
 TEST_F(AudioEncoderCopyRedTest, CheckMaxFrameSizePropagation) {
-  EXPECT_CALL(mock_encoder_, Max10MsFramesInAPacket()).WillOnce(Return(17));
-  EXPECT_EQ(17, red_->Max10MsFramesInAPacket());
+  EXPECT_CALL(mock_encoder_, Max10MsFramesInAPacket()).WillOnce(Return(17U));
+  EXPECT_EQ(17U, red_->Max10MsFramesInAPacket());
 }
 
 TEST_F(AudioEncoderCopyRedTest, CheckSetBitratePropagation) {
diff --git a/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc b/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc
index c7cafdf..3395721 100644
--- a/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc
+++ b/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc
@@ -65,7 +65,8 @@
   memcpy(&in_data_[loop_length_samples_], &in_data_[0],
          input_length_sample_ * channels_ * sizeof(int16_t));
 
-  max_bytes_ = input_length_sample_ * channels_ * sizeof(int16_t);
+  max_bytes_ =
+      static_cast<size_t>(input_length_sample_ * channels_ * sizeof(int16_t));
   out_data_.reset(new int16_t[output_length_sample_ * channels_]);
   bit_stream_.reset(new uint8_t[max_bytes_]);
 
diff --git a/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h b/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h
index 35ac69e..2736c29 100644
--- a/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h
+++ b/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h
@@ -36,14 +36,14 @@
   // 3. assign |encoded_bytes| with the length of the bit stream (in bytes),
   // 4. return the cost of time (in millisecond) spent on actual encoding.
   virtual float EncodeABlock(int16_t* in_data, uint8_t* bit_stream,
-                             int max_bytes, int* encoded_bytes) = 0;
+                             size_t max_bytes, size_t* encoded_bytes) = 0;
 
   // DecodeABlock(...) does the following:
   // 1. decodes the bit stream in |bit_stream| with a length of |encoded_bytes|
   // (in bytes),
   // 2. save the decoded audio in |out_data|,
   // 3. return the cost of time (in millisecond) spent on actual decoding.
-  virtual float DecodeABlock(const uint8_t* bit_stream, int encoded_bytes,
+  virtual float DecodeABlock(const uint8_t* bit_stream, size_t encoded_bytes,
                              int16_t* out_data) = 0;
 
   // Encoding and decode an audio of |audio_duration| (in seconds) and
@@ -67,9 +67,9 @@
   rtc::scoped_ptr<uint8_t[]> bit_stream_;
 
   // Maximum number of bytes in output bitstream for a frame of audio.
-  int max_bytes_;
+  size_t max_bytes_;
 
-  int encoded_bytes_;
+  size_t encoded_bytes_;
   float encoding_time_ms_;
   float decoding_time_ms_;
   FILE* out_file_;
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_receive_test.cc b/webrtc/modules/audio_coding/main/acm2/acm_receive_test.cc
index dc59984..b5a86d0 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_receive_test.cc
+++ b/webrtc/modules/audio_coding/main/acm2/acm_receive_test.cc
@@ -93,7 +93,8 @@
       AudioFrame output_frame;
       EXPECT_TRUE(acm_->Get10MsAudio(&output_frame));
       EXPECT_EQ(output_freq_hz_, output_frame.sample_rate_hz_);
-      const int samples_per_block = output_freq_hz_ * 10 / 1000;
+      const size_t samples_per_block =
+          static_cast<size_t>(output_freq_hz_ * 10 / 1000);
       EXPECT_EQ(samples_per_block, output_frame.samples_per_channel_);
       if (expected_output_channels_ != kArbitraryChannels) {
         if (output_frame.speech_type_ == webrtc::AudioFrame::kPLC) {
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_receive_test_oldapi.cc b/webrtc/modules/audio_coding/main/acm2/acm_receive_test_oldapi.cc
index dd570e6..2a0bbe1 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_receive_test_oldapi.cc
+++ b/webrtc/modules/audio_coding/main/acm2/acm_receive_test_oldapi.cc
@@ -160,7 +160,8 @@
       AudioFrame output_frame;
       EXPECT_EQ(0, acm_->PlayoutData10Ms(output_freq_hz_, &output_frame));
       EXPECT_EQ(output_freq_hz_, output_frame.sample_rate_hz_);
-      const int samples_per_block = output_freq_hz_ * 10 / 1000;
+      const size_t samples_per_block =
+          static_cast<size_t>(output_freq_hz_ * 10 / 1000);
       EXPECT_EQ(samples_per_block, output_frame.samples_per_channel_);
       if (exptected_output_channels_ != kArbitraryChannels) {
         if (output_frame.speech_type_ == webrtc::AudioFrame::kPLC) {
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc b/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc
index 4c11197..1cefeb6 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc
+++ b/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc
@@ -344,7 +344,7 @@
 
 int AcmReceiver::GetAudio(int desired_freq_hz, AudioFrame* audio_frame) {
   enum NetEqOutputType type;
-  int samples_per_channel;
+  size_t samples_per_channel;
   int num_channels;
   bool return_silence = false;
 
@@ -394,7 +394,7 @@
   }
 
   // NetEq always returns 10 ms of audio.
-  current_sample_rate_hz_ = samples_per_channel * 100;
+  current_sample_rate_hz_ = static_cast<int>(samples_per_channel * 100);
 
   // Update if resampling is required.
   bool need_resampling = (desired_freq_hz != -1) &&
@@ -403,18 +403,19 @@
   if (need_resampling && !resampled_last_output_frame_) {
     // Prime the resampler with the last frame.
     int16_t temp_output[AudioFrame::kMaxDataSizeSamples];
-    samples_per_channel =
+    int samples_per_channel_int =
         resampler_.Resample10Msec(last_audio_buffer_.get(),
                                   current_sample_rate_hz_,
                                   desired_freq_hz,
                                   num_channels,
                                   AudioFrame::kMaxDataSizeSamples,
                                   temp_output);
-    if (samples_per_channel < 0) {
+    if (samples_per_channel_int < 0) {
       LOG(LERROR) << "AcmReceiver::GetAudio - "
                      "Resampling last_audio_buffer_ failed.";
       return -1;
     }
+    samples_per_channel = static_cast<size_t>(samples_per_channel_int);
   }
 
   // The audio in |audio_buffer_| is tansferred to |audio_frame_| below, either
@@ -422,17 +423,18 @@
   // TODO(henrik.lundin) Glitches in the output may appear if the output rate
   // from NetEq changes. See WebRTC issue 3923.
   if (need_resampling) {
-    samples_per_channel =
+    int samples_per_channel_int =
         resampler_.Resample10Msec(audio_buffer_.get(),
                                   current_sample_rate_hz_,
                                   desired_freq_hz,
                                   num_channels,
                                   AudioFrame::kMaxDataSizeSamples,
                                   audio_frame->data_);
-    if (samples_per_channel < 0) {
+    if (samples_per_channel_int < 0) {
       LOG(LERROR) << "AcmReceiver::GetAudio - Resampling audio_buffer_ failed.";
       return -1;
     }
+    samples_per_channel = static_cast<size_t>(samples_per_channel_int);
     resampled_last_output_frame_ = true;
   } else {
     resampled_last_output_frame_ = false;
@@ -448,7 +450,7 @@
 
   audio_frame->num_channels_ = num_channels;
   audio_frame->samples_per_channel_ = samples_per_channel;
-  audio_frame->sample_rate_hz_ = samples_per_channel * 100;
+  audio_frame->sample_rate_hz_ = static_cast<int>(samples_per_channel * 100);
 
   // Should set |vad_activity| before calling SetAudioFrameActivityAndType().
   audio_frame->vad_activity_ = previous_audio_activity_;
@@ -787,10 +789,11 @@
     frame->sample_rate_hz_ = current_sample_rate_hz_;
   }
 
-  frame->samples_per_channel_ = frame->sample_rate_hz_ / 100;  // Always 10 ms.
+  frame->samples_per_channel_ =
+      static_cast<size_t>(frame->sample_rate_hz_ / 100);  // Always 10 ms.
   frame->speech_type_ = AudioFrame::kCNG;
   frame->vad_activity_ = AudioFrame::kVadPassive;
-  int samples = frame->samples_per_channel_ * frame->num_channels_;
+  size_t samples = frame->samples_per_channel_ * frame->num_channels_;
   memset(frame->data_, 0, samples * sizeof(int16_t));
   return true;
 }
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_resampler.cc b/webrtc/modules/audio_coding/main/acm2/acm_resampler.cc
index 97d87b1..2650725 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_resampler.cc
+++ b/webrtc/modules/audio_coding/main/acm2/acm_resampler.cc
@@ -29,9 +29,9 @@
                                  int in_freq_hz,
                                  int out_freq_hz,
                                  int num_audio_channels,
-                                 int out_capacity_samples,
+                                 size_t out_capacity_samples,
                                  int16_t* out_audio) {
-  int in_length = in_freq_hz * num_audio_channels / 100;
+  size_t in_length = static_cast<size_t>(in_freq_hz * num_audio_channels / 100);
   int out_length = out_freq_hz * num_audio_channels / 100;
   if (in_freq_hz == out_freq_hz) {
     if (out_capacity_samples < in_length) {
@@ -39,7 +39,7 @@
       return -1;
     }
     memcpy(out_audio, in_audio, in_length * sizeof(int16_t));
-    return in_length / num_audio_channels;
+    return static_cast<int>(in_length / num_audio_channels);
   }
 
   if (resampler_.InitializeIfNeeded(in_freq_hz, out_freq_hz,
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_resampler.h b/webrtc/modules/audio_coding/main/acm2/acm_resampler.h
index a8fc6b6..a19b0c4 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_resampler.h
+++ b/webrtc/modules/audio_coding/main/acm2/acm_resampler.h
@@ -26,7 +26,7 @@
                      int in_freq_hz,
                      int out_freq_hz,
                      int num_audio_channels,
-                     int out_capacity_samples,
+                     size_t out_capacity_samples,
                      int16_t* out_audio);
 
  private:
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_send_test.cc b/webrtc/modules/audio_coding/main/acm2/acm_send_test.cc
index b96db6b..91df16f 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_send_test.cc
+++ b/webrtc/modules/audio_coding/main/acm2/acm_send_test.cc
@@ -29,7 +29,8 @@
     : clock_(0),
       audio_source_(audio_source),
       source_rate_hz_(source_rate_hz),
-      input_block_size_samples_(source_rate_hz_ * kBlockSizeMs / 1000),
+      input_block_size_samples_(
+          static_cast<size_t>(source_rate_hz_ * kBlockSizeMs / 1000)),
       codec_registered_(false),
       test_duration_ms_(test_duration_ms),
       frame_type_(kAudioFrameSpeech),
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_send_test.h b/webrtc/modules/audio_coding/main/acm2/acm_send_test.h
index 4c4db5b..09fe9e6 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_send_test.h
+++ b/webrtc/modules/audio_coding/main/acm2/acm_send_test.h
@@ -63,7 +63,7 @@
   rtc::scoped_ptr<AudioCoding> acm_;
   InputAudioFile* audio_source_;
   int source_rate_hz_;
-  const int input_block_size_samples_;
+  const size_t input_block_size_samples_;
   AudioFrame input_frame_;
   bool codec_registered_;
   int test_duration_ms_;
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.cc b/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.cc
index 1819d59..74e98d9 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.cc
+++ b/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.cc
@@ -31,7 +31,8 @@
       acm_(webrtc::AudioCodingModule::Create(0, &clock_)),
       audio_source_(audio_source),
       source_rate_hz_(source_rate_hz),
-      input_block_size_samples_(source_rate_hz_ * kBlockSizeMs / 1000),
+      input_block_size_samples_(
+          static_cast<size_t>(source_rate_hz_ * kBlockSizeMs / 1000)),
       codec_registered_(false),
       test_duration_ms_(test_duration_ms),
       frame_type_(kAudioFrameSpeech),
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.h b/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.h
index 8cdc298..008e264 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.h
+++ b/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.h
@@ -71,7 +71,7 @@
   rtc::scoped_ptr<AudioCodingModule> acm_;
   InputAudioFile* audio_source_;
   int source_rate_hz_;
-  const int input_block_size_samples_;
+  const size_t input_block_size_samples_;
   AudioFrame input_frame_;
   bool codec_registered_;
   int test_duration_ms_;
diff --git a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
index 32d60a7..46980d3 100644
--- a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
+++ b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
@@ -76,22 +76,24 @@
 }
 
 // Stereo-to-mono can be used as in-place.
-int DownMix(const AudioFrame& frame, int length_out_buff, int16_t* out_buff) {
+int DownMix(const AudioFrame& frame,
+            size_t length_out_buff,
+            int16_t* out_buff) {
   if (length_out_buff < frame.samples_per_channel_) {
     return -1;
   }
-  for (int n = 0; n < frame.samples_per_channel_; ++n)
+  for (size_t n = 0; n < frame.samples_per_channel_; ++n)
     out_buff[n] = (frame.data_[2 * n] + frame.data_[2 * n + 1]) >> 1;
   return 0;
 }
 
 // Mono-to-stereo can be used as in-place.
-int UpMix(const AudioFrame& frame, int length_out_buff, int16_t* out_buff) {
+int UpMix(const AudioFrame& frame, size_t length_out_buff, int16_t* out_buff) {
   if (length_out_buff < frame.samples_per_channel_) {
     return -1;
   }
-  for (int n = frame.samples_per_channel_; n > 0; --n) {
-    int i = n - 1;
+  for (size_t n = frame.samples_per_channel_; n != 0; --n) {
+    size_t i = n - 1;
     int16_t sample = frame.data_[i];
     out_buff[2 * i + 1] = sample;
     out_buff[2 * i] = sample;
@@ -338,11 +340,10 @@
 
 int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame,
                                                InputData* input_data) {
-  if (audio_frame.samples_per_channel_ <= 0) {
+  if (audio_frame.samples_per_channel_ == 0) {
     assert(false);
     WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
-                 "Cannot Add 10 ms audio, payload length is negative or "
-                 "zero");
+                 "Cannot Add 10 ms audio, payload length is zero");
     return -1;
   }
 
@@ -354,7 +355,7 @@
   }
 
   // If the length and frequency matches. We currently just support raw PCM.
-  if ((audio_frame.sample_rate_hz_ / 100) !=
+  if (static_cast<size_t>(audio_frame.sample_rate_hz_ / 100) !=
       audio_frame.samples_per_channel_) {
     WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
                  "Cannot Add 10 ms audio, input frequency and length doesn't"
@@ -477,17 +478,19 @@
     // The result of the resampler is written to output frame.
     dest_ptr_audio = preprocess_frame_.data_;
 
-    preprocess_frame_.samples_per_channel_ = resampler_.Resample10Msec(
+    int samples_per_channel = resampler_.Resample10Msec(
         src_ptr_audio, in_frame.sample_rate_hz_,
         codec_manager_.CurrentEncoder()->SampleRateHz(),
         preprocess_frame_.num_channels_, AudioFrame::kMaxDataSizeSamples,
         dest_ptr_audio);
 
-    if (preprocess_frame_.samples_per_channel_ < 0) {
+    if (samples_per_channel < 0) {
       WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
                    "Cannot add 10 ms audio, resampling failed");
       return -1;
     }
+    preprocess_frame_.samples_per_channel_ =
+        static_cast<size_t>(samples_per_channel);
     preprocess_frame_.sample_rate_hz_ =
         codec_manager_.CurrentEncoder()->SampleRateHz();
   }
diff --git a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h
index beb49bc..c451854 100644
--- a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h
+++ b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h
@@ -248,7 +248,7 @@
   struct InputData {
     uint32_t input_timestamp;
     const int16_t* audio;
-    uint16_t length_per_channel;
+    size_t length_per_channel;
     uint8_t audio_channel;
     // If a re-mix is required (up or down), this buffer will store a re-mixed
     // version of the input.
diff --git a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest.cc b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest.cc
index eea51a3..418ddd1 100644
--- a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest.cc
+++ b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest.cc
@@ -272,7 +272,8 @@
   EXPECT_TRUE(acm_->Get10MsAudio(&audio_frame));
   EXPECT_EQ(0u, audio_frame.timestamp_);
   EXPECT_GT(audio_frame.num_channels_, 0);
-  EXPECT_EQ(kSampleRateHz / 100, audio_frame.samples_per_channel_);
+  EXPECT_EQ(static_cast<size_t>(kSampleRateHz / 100),
+            audio_frame.samples_per_channel_);
   EXPECT_EQ(kSampleRateHz, audio_frame.sample_rate_hz_);
 }
 
diff --git a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc
index 0af6af8..e5371d0 100644
--- a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc
+++ b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc
@@ -314,7 +314,8 @@
   EXPECT_EQ(id_, audio_frame.id_);
   EXPECT_EQ(0u, audio_frame.timestamp_);
   EXPECT_GT(audio_frame.num_channels_, 0);
-  EXPECT_EQ(kSampleRateHz / 100, audio_frame.samples_per_channel_);
+  EXPECT_EQ(static_cast<size_t>(kSampleRateHz / 100),
+            audio_frame.samples_per_channel_);
   EXPECT_EQ(kSampleRateHz, audio_frame.sample_rate_hz_);
 }
 
diff --git a/webrtc/modules/audio_coding/main/acm2/codec_manager.cc b/webrtc/modules/audio_coding/main/acm2/codec_manager.cc
index cad6ee9..7b9c7ed 100644
--- a/webrtc/modules/audio_coding/main/acm2/codec_manager.cc
+++ b/webrtc/modules/audio_coding/main/acm2/codec_manager.cc
@@ -326,10 +326,10 @@
   // Make up a CodecInst.
   send_codec_inst_.channels = external_speech_encoder->NumChannels();
   send_codec_inst_.plfreq = external_speech_encoder->SampleRateHz();
-  send_codec_inst_.pacsize =
-      rtc::CheckedDivExact(external_speech_encoder->Max10MsFramesInAPacket() *
-                               send_codec_inst_.plfreq,
-                           100);
+  send_codec_inst_.pacsize = rtc::CheckedDivExact(
+      static_cast<int>(external_speech_encoder->Max10MsFramesInAPacket() *
+                       send_codec_inst_.plfreq),
+      100);
   send_codec_inst_.pltype = -1;  // Not valid.
   send_codec_inst_.rate = -1;    // Not valid.
   static const char kName[] = "external";
diff --git a/webrtc/modules/audio_coding/main/test/PCMFile.cc b/webrtc/modules/audio_coding/main/test/PCMFile.cc
index 4b08f75..d0ae783 100644
--- a/webrtc/modules/audio_coding/main/test/PCMFile.cc
+++ b/webrtc/modules/audio_coding/main/test/PCMFile.cc
@@ -150,7 +150,7 @@
       }
     } else {
       int16_t* stereo_audio = new int16_t[2 * audio_frame.samples_per_channel_];
-      for (int k = 0; k < audio_frame.samples_per_channel_; k++) {
+      for (size_t k = 0; k < audio_frame.samples_per_channel_; k++) {
         stereo_audio[k << 1] = audio_frame.data_[k];
         stereo_audio[(k << 1) + 1] = audio_frame.data_[k];
       }
@@ -172,7 +172,7 @@
   }
 }
 
-void PCMFile::Write10MsData(int16_t* playout_buffer, uint16_t length_smpls) {
+void PCMFile::Write10MsData(int16_t* playout_buffer, size_t length_smpls) {
   if (fwrite(playout_buffer, sizeof(uint16_t), length_smpls, pcm_file_) !=
       length_smpls) {
     return;
diff --git a/webrtc/modules/audio_coding/main/test/PCMFile.h b/webrtc/modules/audio_coding/main/test/PCMFile.h
index c4487b8..8353898 100644
--- a/webrtc/modules/audio_coding/main/test/PCMFile.h
+++ b/webrtc/modules/audio_coding/main/test/PCMFile.h
@@ -36,7 +36,7 @@
 
   int32_t Read10MsData(AudioFrame& audio_frame);
 
-  void Write10MsData(int16_t *playout_buffer, uint16_t length_smpls);
+  void Write10MsData(int16_t *playout_buffer, size_t length_smpls);
   void Write10MsData(AudioFrame& audio_frame);
 
   uint16_t PayloadLength10Ms() const;
diff --git a/webrtc/modules/audio_coding/main/test/SpatialAudio.cc b/webrtc/modules/audio_coding/main/test/SpatialAudio.cc
index b28c510..134d975 100644
--- a/webrtc/modules/audio_coding/main/test/SpatialAudio.cc
+++ b/webrtc/modules/audio_coding/main/test/SpatialAudio.cc
@@ -159,13 +159,13 @@
 
   while (!_inFile.EndOfFile()) {
     _inFile.Read10MsData(audioFrame);
-    for (int n = 0; n < audioFrame.samples_per_channel_; n++) {
+    for (size_t n = 0; n < audioFrame.samples_per_channel_; n++) {
       audioFrame.data_[n] = (int16_t) floor(
           audioFrame.data_[n] * leftPanning + 0.5);
     }
     CHECK_ERROR(_acmLeft->Add10MsData(audioFrame));
 
-    for (int n = 0; n < audioFrame.samples_per_channel_; n++) {
+    for (size_t n = 0; n < audioFrame.samples_per_channel_; n++) {
       audioFrame.data_[n] = (int16_t) floor(
           audioFrame.data_[n] * rightToLeftRatio + 0.5);
     }
diff --git a/webrtc/modules/audio_coding/main/test/initial_delay_unittest.cc b/webrtc/modules/audio_coding/main/test/initial_delay_unittest.cc
index ffbbc8c..0bac401 100644
--- a/webrtc/modules/audio_coding/main/test/initial_delay_unittest.cc
+++ b/webrtc/modules/audio_coding/main/test/initial_delay_unittest.cc
@@ -32,9 +32,9 @@
 namespace {
 
 double FrameRms(AudioFrame& frame) {
-  int samples = frame.num_channels_ * frame.samples_per_channel_;
+  size_t samples = frame.num_channels_ * frame.samples_per_channel_;
   double rms = 0;
-  for (int n = 0; n < samples; ++n)
+  for (size_t n = 0; n < samples; ++n)
     rms += frame.data_[n] * frame.data_[n];
   rms /= samples;
   rms = sqrt(rms);
@@ -132,9 +132,9 @@
     in_audio_frame.sample_rate_hz_ = codec.plfreq;
     in_audio_frame.num_channels_ = codec.channels;
     in_audio_frame.samples_per_channel_ = codec.plfreq / 100;  // 10 ms.
-    int samples = in_audio_frame.num_channels_ *
+    size_t samples = in_audio_frame.num_channels_ *
         in_audio_frame.samples_per_channel_;
-    for (int n = 0; n < samples; ++n) {
+    for (size_t n = 0; n < samples; ++n) {
       in_audio_frame.data_[n] = kAmp;
     }
 
diff --git a/webrtc/modules/audio_coding/main/test/opus_test.cc b/webrtc/modules/audio_coding/main/test/opus_test.cc
index c61d25a..79124aa 100644
--- a/webrtc/modules/audio_coding/main/test/opus_test.cc
+++ b/webrtc/modules/audio_coding/main/test/opus_test.cc
@@ -270,14 +270,14 @@
 
     if (loop_encode > 0) {
       const int kMaxBytes = 1000;  // Maximum number of bytes for one packet.
-      int16_t bitstream_len_byte;
+      size_t bitstream_len_byte;
       uint8_t bitstream[kMaxBytes];
       for (int i = 0; i < loop_encode; i++) {
         int bitstream_len_byte_int = WebRtcOpus_Encode(
             (channels == 1) ? opus_mono_encoder_ : opus_stereo_encoder_,
             &audio[read_samples], frame_length, kMaxBytes, bitstream);
         ASSERT_GE(bitstream_len_byte_int, 0);
-        bitstream_len_byte = static_cast<int16_t>(bitstream_len_byte_int);
+        bitstream_len_byte = static_cast<size_t>(bitstream_len_byte_int);
 
         // Simulate packet loss by setting |packet_loss_| to "true" in
         // |percent_loss| percent of the loops.
@@ -341,7 +341,8 @@
         audio_frame.samples_per_channel_ * audio_frame.num_channels_);
 
     // Write stand-alone speech to file.
-    out_file_standalone_.Write10MsData(out_audio, decoded_samples * channels);
+    out_file_standalone_.Write10MsData(
+        out_audio, static_cast<size_t>(decoded_samples) * channels);
 
     if (audio_frame.timestamp_ > start_time_stamp) {
       // Number of channels should be the same for both stand-alone and
diff --git a/webrtc/modules/audio_coding/neteq/accelerate.cc b/webrtc/modules/audio_coding/neteq/accelerate.cc
index ad74238..1c36fa8 100644
--- a/webrtc/modules/audio_coding/neteq/accelerate.cc
+++ b/webrtc/modules/audio_coding/neteq/accelerate.cc
@@ -18,11 +18,11 @@
                                             size_t input_length,
                                             bool fast_accelerate,
                                             AudioMultiVector* output,
-                                            int16_t* length_change_samples) {
+                                            size_t* length_change_samples) {
   // Input length must be (almost) 30 ms.
-  static const int k15ms = 120;  // 15 ms = 120 samples at 8 kHz sample rate.
-  if (num_channels_ == 0 || static_cast<int>(input_length) / num_channels_ <
-      (2 * k15ms - 1) * fs_mult_) {
+  static const size_t k15ms = 120;  // 15 ms = 120 samples at 8 kHz sample rate.
+  if (num_channels_ == 0 ||
+      input_length / num_channels_ < (2 * k15ms - 1) * fs_mult_) {
     // Length of input data too short to do accelerate. Simply move all data
     // from input to output.
     output->PushBackInterleaved(input, input_length);
@@ -34,7 +34,7 @@
 
 void Accelerate::SetParametersForPassiveSpeech(size_t /*len*/,
                                                int16_t* best_correlation,
-                                               int* /*peak_index*/) const {
+                                               size_t* /*peak_index*/) const {
   // When the signal does not contain any active speech, the correlation does
   // not matter. Simply set it to zero.
   *best_correlation = 0;
diff --git a/webrtc/modules/audio_coding/neteq/accelerate.h b/webrtc/modules/audio_coding/neteq/accelerate.h
index 684f74b..1238b77 100644
--- a/webrtc/modules/audio_coding/neteq/accelerate.h
+++ b/webrtc/modules/audio_coding/neteq/accelerate.h
@@ -45,14 +45,14 @@
                       size_t input_length,
                       bool fast_accelerate,
                       AudioMultiVector* output,
-                      int16_t* length_change_samples);
+                      size_t* length_change_samples);
 
  protected:
   // Sets the parameters |best_correlation| and |peak_index| to suitable
   // values when the signal contains no active speech.
   void SetParametersForPassiveSpeech(size_t len,
                                      int16_t* best_correlation,
-                                     int* peak_index) const override;
+                                     size_t* peak_index) const override;
 
   // Checks the criteria for performing the time-stretching operation and,
   // if possible, performs the time-stretching.
diff --git a/webrtc/modules/audio_coding/neteq/audio_decoder_impl.cc b/webrtc/modules/audio_coding/neteq/audio_decoder_impl.cc
index 53dc033..769f0b0 100644
--- a/webrtc/modules/audio_coding/neteq/audio_decoder_impl.cc
+++ b/webrtc/modules/audio_coding/neteq/audio_decoder_impl.cc
@@ -53,10 +53,9 @@
                                      SpeechType* speech_type) {
   DCHECK_EQ(sample_rate_hz, 8000);
   int16_t temp_type = 1;  // Default is speech.
-  int16_t ret = WebRtcG711_DecodeU(encoded, static_cast<int16_t>(encoded_len),
-                                   decoded, &temp_type);
+  size_t ret = WebRtcG711_DecodeU(encoded, encoded_len, decoded, &temp_type);
   *speech_type = ConvertSpeechType(temp_type);
-  return ret;
+  return static_cast<int>(ret);
 }
 
 int AudioDecoderPcmU::PacketDuration(const uint8_t* encoded,
@@ -85,10 +84,9 @@
                                      SpeechType* speech_type) {
   DCHECK_EQ(sample_rate_hz, 8000);
   int16_t temp_type = 1;  // Default is speech.
-  int16_t ret = WebRtcG711_DecodeA(encoded, static_cast<int16_t>(encoded_len),
-                                   decoded, &temp_type);
+  size_t ret = WebRtcG711_DecodeA(encoded, encoded_len, decoded, &temp_type);
   *speech_type = ConvertSpeechType(temp_type);
-  return ret;
+  return static_cast<int>(ret);
 }
 
 int AudioDecoderPcmA::PacketDuration(const uint8_t* encoded,
@@ -120,10 +118,9 @@
   DCHECK(sample_rate_hz == 8000 || sample_rate_hz == 16000 ||
          sample_rate_hz == 32000 || sample_rate_hz == 48000)
       << "Unsupported sample rate " << sample_rate_hz;
-  int16_t ret =
-      WebRtcPcm16b_Decode(encoded, static_cast<int16_t>(encoded_len), decoded);
+  size_t ret = WebRtcPcm16b_Decode(encoded, encoded_len, decoded);
   *speech_type = ConvertSpeechType(1);
-  return ret;
+  return static_cast<int>(ret);
 }
 
 int AudioDecoderPcm16B::PacketDuration(const uint8_t* encoded,
@@ -132,7 +129,7 @@
   return static_cast<int>(encoded_len / (2 * Channels()));
 }
 
-AudioDecoderPcm16BMultiCh::AudioDecoderPcm16BMultiCh(int num_channels)
+AudioDecoderPcm16BMultiCh::AudioDecoderPcm16BMultiCh(size_t num_channels)
     : channels_(num_channels) {
   DCHECK(num_channels > 0);
 }
@@ -163,14 +160,13 @@
                                      SpeechType* speech_type) {
   DCHECK_EQ(sample_rate_hz, 8000);
   int16_t temp_type = 1;  // Default is speech.
-  int ret = WebRtcIlbcfix_Decode(dec_state_, encoded,
-                                 static_cast<int16_t>(encoded_len), decoded,
+  int ret = WebRtcIlbcfix_Decode(dec_state_, encoded, encoded_len, decoded,
                                  &temp_type);
   *speech_type = ConvertSpeechType(temp_type);
   return ret;
 }
 
-int AudioDecoderIlbc::DecodePlc(int num_frames, int16_t* decoded) {
+size_t AudioDecoderIlbc::DecodePlc(size_t num_frames, int16_t* decoded) {
   return WebRtcIlbcfix_NetEqPlc(dec_state_, decoded, num_frames);
 }
 
@@ -204,11 +200,10 @@
                                      SpeechType* speech_type) {
   DCHECK_EQ(sample_rate_hz, 16000);
   int16_t temp_type = 1;  // Default is speech.
-  int16_t ret =
-      WebRtcG722_Decode(dec_state_, encoded, static_cast<int16_t>(encoded_len),
-                        decoded, &temp_type);
+  size_t ret =
+      WebRtcG722_Decode(dec_state_, encoded, encoded_len, decoded, &temp_type);
   *speech_type = ConvertSpeechType(temp_type);
-  return ret;
+  return static_cast<int>(ret);
 }
 
 int AudioDecoderG722::Init() {
@@ -246,29 +241,24 @@
   uint8_t* encoded_deinterleaved = new uint8_t[encoded_len];
   SplitStereoPacket(encoded, encoded_len, encoded_deinterleaved);
   // Decode left and right.
-  int16_t ret = WebRtcG722_Decode(dec_state_left_, encoded_deinterleaved,
-                                  static_cast<int16_t>(encoded_len / 2),
-                                  decoded, &temp_type);
-  if (ret >= 0) {
-    int decoded_len = ret;
-    ret = WebRtcG722_Decode(dec_state_right_,
-                            &encoded_deinterleaved[encoded_len / 2],
-                            static_cast<int16_t>(encoded_len / 2),
-                            &decoded[decoded_len], &temp_type);
-    if (ret == decoded_len) {
-      ret += decoded_len;  // Return total number of samples.
-      // Interleave output.
-      for (int k = ret / 2; k < ret; k++) {
-          int16_t temp = decoded[k];
-          memmove(&decoded[2 * k - ret + 2], &decoded[2 * k - ret + 1],
-                  (ret - k - 1) * sizeof(int16_t));
-          decoded[2 * k - ret + 1] = temp;
-      }
+  size_t decoded_len = WebRtcG722_Decode(dec_state_left_, encoded_deinterleaved,
+                                         encoded_len / 2, decoded, &temp_type);
+  size_t ret = WebRtcG722_Decode(
+      dec_state_right_, &encoded_deinterleaved[encoded_len / 2],
+      encoded_len / 2, &decoded[decoded_len], &temp_type);
+  if (ret == decoded_len) {
+    ret += decoded_len;  // Return total number of samples.
+    // Interleave output.
+    for (size_t k = ret / 2; k < ret; k++) {
+        int16_t temp = decoded[k];
+        memmove(&decoded[2 * k - ret + 2], &decoded[2 * k - ret + 1],
+                (ret - k - 1) * sizeof(int16_t));
+        decoded[2 * k - ret + 1] = temp;
     }
   }
   *speech_type = ConvertSpeechType(temp_type);
   delete [] encoded_deinterleaved;
-  return ret;
+  return static_cast<int>(ret);
 }
 
 size_t AudioDecoderG722Stereo::Channels() const {
@@ -312,7 +302,8 @@
 
 // Opus
 #ifdef WEBRTC_CODEC_OPUS
-AudioDecoderOpus::AudioDecoderOpus(int num_channels) : channels_(num_channels) {
+AudioDecoderOpus::AudioDecoderOpus(size_t num_channels)
+    : channels_(num_channels) {
   DCHECK(num_channels == 1 || num_channels == 2);
   WebRtcOpus_DecoderCreate(&dec_state_, static_cast<int>(channels_));
 }
@@ -328,8 +319,7 @@
                                      SpeechType* speech_type) {
   DCHECK_EQ(sample_rate_hz, 48000);
   int16_t temp_type = 1;  // Default is speech.
-  int ret = WebRtcOpus_Decode(dec_state_, encoded,
-                              static_cast<int16_t>(encoded_len), decoded,
+  int ret = WebRtcOpus_Decode(dec_state_, encoded, encoded_len, decoded,
                               &temp_type);
   if (ret > 0)
     ret *= static_cast<int>(channels_);  // Return total number of samples.
@@ -350,8 +340,7 @@
 
   DCHECK_EQ(sample_rate_hz, 48000);
   int16_t temp_type = 1;  // Default is speech.
-  int ret = WebRtcOpus_DecodeFec(dec_state_, encoded,
-                                 static_cast<int16_t>(encoded_len), decoded,
+  int ret = WebRtcOpus_DecodeFec(dec_state_, encoded, encoded_len, decoded,
                                  &temp_type);
   if (ret > 0)
     ret *= static_cast<int>(channels_);  // Return total number of samples.
@@ -365,8 +354,7 @@
 
 int AudioDecoderOpus::PacketDuration(const uint8_t* encoded,
                                      size_t encoded_len) const {
-  return WebRtcOpus_DurationEst(dec_state_,
-                                encoded, static_cast<int>(encoded_len));
+  return WebRtcOpus_DurationEst(dec_state_, encoded, encoded_len);
 }
 
 int AudioDecoderOpus::PacketDurationRedundant(const uint8_t* encoded,
@@ -376,13 +364,13 @@
     return PacketDuration(encoded, encoded_len);
   }
 
-  return WebRtcOpus_FecDurationEst(encoded, static_cast<int>(encoded_len));
+  return WebRtcOpus_FecDurationEst(encoded, encoded_len);
 }
 
 bool AudioDecoderOpus::PacketHasFec(const uint8_t* encoded,
                                     size_t encoded_len) const {
   int fec;
-  fec = WebRtcOpus_PacketHasFec(encoded, static_cast<int>(encoded_len));
+  fec = WebRtcOpus_PacketHasFec(encoded, encoded_len);
   return (fec == 1);
 }
 
diff --git a/webrtc/modules/audio_coding/neteq/audio_decoder_impl.h b/webrtc/modules/audio_coding/neteq/audio_decoder_impl.h
index 202d79d..427a0a6 100644
--- a/webrtc/modules/audio_coding/neteq/audio_decoder_impl.h
+++ b/webrtc/modules/audio_coding/neteq/audio_decoder_impl.h
@@ -122,7 +122,7 @@
 // of channels is derived from the type.
 class AudioDecoderPcm16BMultiCh : public AudioDecoderPcm16B {
  public:
-  explicit AudioDecoderPcm16BMultiCh(int num_channels);
+  explicit AudioDecoderPcm16BMultiCh(size_t num_channels);
   size_t Channels() const override;
 
  private:
@@ -137,7 +137,7 @@
   AudioDecoderIlbc();
   ~AudioDecoderIlbc() override;
   bool HasDecodePlc() const override;
-  int DecodePlc(int num_frames, int16_t* decoded) override;
+  size_t DecodePlc(size_t num_frames, int16_t* decoded) override;
   int Init() override;
   size_t Channels() const override;
 
@@ -209,7 +209,7 @@
 #ifdef WEBRTC_CODEC_OPUS
 class AudioDecoderOpus : public AudioDecoder {
  public:
-  explicit AudioDecoderOpus(int num_channels);
+  explicit AudioDecoderOpus(size_t num_channels);
   ~AudioDecoderOpus() override;
 
   int Init() override;
diff --git a/webrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc b/webrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc
index 3983c07..a2ef9d1 100644
--- a/webrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc
@@ -141,7 +141,7 @@
              input_len_samples);
     rtc::scoped_ptr<int16_t[]> interleaved_input(
         new int16_t[channels_ * samples_per_10ms]);
-    for (int i = 0; i < audio_encoder_->Num10MsFramesInNextPacket(); ++i) {
+    for (size_t i = 0; i < audio_encoder_->Num10MsFramesInNextPacket(); ++i) {
       EXPECT_EQ(0u, encoded_info_.encoded_bytes);
 
       // Duplicate the mono input signal to however many channels the test
@@ -348,7 +348,7 @@
                                       output.get(), &speech_type);
     EXPECT_EQ(frame_size_, dec_len);
     // Simply call DecodePlc and verify that we get 0 as return value.
-    EXPECT_EQ(0, decoder_->DecodePlc(1, output.get()));
+    EXPECT_EQ(0U, decoder_->DecodePlc(1, output.get()));
   }
 };
 
diff --git a/webrtc/modules/audio_coding/neteq/background_noise.cc b/webrtc/modules/audio_coding/neteq/background_noise.cc
index a59f444..d3df269 100644
--- a/webrtc/modules/audio_coding/neteq/background_noise.cc
+++ b/webrtc/modules/audio_coding/neteq/background_noise.cc
@@ -21,6 +21,9 @@
 
 namespace webrtc {
 
+// static
+const size_t BackgroundNoise::kMaxLpcOrder;
+
 BackgroundNoise::BackgroundNoise(size_t num_channels)
     : num_channels_(num_channels),
       channel_parameters_(new ChannelParameters[num_channels_]),
@@ -150,7 +153,7 @@
 void BackgroundNoise::SetFilterState(size_t channel, const int16_t* input,
                                      size_t length) {
   assert(channel < num_channels_);
-  length = std::min(length, static_cast<size_t>(kMaxLpcOrder));
+  length = std::min(length, kMaxLpcOrder);
   memcpy(channel_parameters_[channel].filter_state, input,
          length * sizeof(int16_t));
 }
@@ -165,7 +168,7 @@
 }
 
 int32_t BackgroundNoise::CalculateAutoCorrelation(
-    const int16_t* signal, int length, int32_t* auto_correlation) const {
+    const int16_t* signal, size_t length, int32_t* auto_correlation) const {
   int16_t signal_max = WebRtcSpl_MaxAbsValueW16(signal, length);
   int correlation_scale = kLogVecLen -
       WebRtcSpl_NormW32(signal_max * signal_max);
@@ -247,7 +250,7 @@
   residual_energy = residual_energy << norm_shift;
 
   // Calculate scale and shift factor.
-  parameters.scale = WebRtcSpl_SqrtFloor(residual_energy);
+  parameters.scale = static_cast<int16_t>(WebRtcSpl_SqrtFloor(residual_energy));
   // Add 13 to the |scale_shift_|, since the random numbers table is in
   // Q13.
   // TODO(hlundin): Move the "13" to where the |scale_shift_| is used?
diff --git a/webrtc/modules/audio_coding/neteq/background_noise.h b/webrtc/modules/audio_coding/neteq/background_noise.h
index baf1818..9ad12b7 100644
--- a/webrtc/modules/audio_coding/neteq/background_noise.h
+++ b/webrtc/modules/audio_coding/neteq/background_noise.h
@@ -29,7 +29,7 @@
  public:
   // TODO(hlundin): For 48 kHz support, increase kMaxLpcOrder to 10.
   // Will work anyway, but probably sound a little worse.
-  static const int kMaxLpcOrder = 8;  // 32000 / 8000 + 4.
+  static const size_t kMaxLpcOrder = 8;  // 32000 / 8000 + 4.
 
   explicit BackgroundNoise(size_t num_channels);
   virtual ~BackgroundNoise();
@@ -76,9 +76,9 @@
 
  private:
   static const int kThresholdIncrement = 229;  // 0.0035 in Q16.
-  static const int kVecLen = 256;
+  static const size_t kVecLen = 256;
   static const int kLogVecLen = 8;  // log2(kVecLen).
-  static const int kResidualLength = 64;
+  static const size_t kResidualLength = 64;
   static const int16_t kLogResidualLength = 6;  // log2(kResidualLength)
 
   struct ChannelParameters {
@@ -112,7 +112,7 @@
   };
 
   int32_t CalculateAutoCorrelation(const int16_t* signal,
-                                   int length,
+                                   size_t length,
                                    int32_t* auto_correlation) const;
 
   // Increments the energy threshold by a factor 1 + |kThresholdIncrement|.
diff --git a/webrtc/modules/audio_coding/neteq/buffer_level_filter.cc b/webrtc/modules/audio_coding/neteq/buffer_level_filter.cc
index 93f9a55..9054791 100644
--- a/webrtc/modules/audio_coding/neteq/buffer_level_filter.cc
+++ b/webrtc/modules/audio_coding/neteq/buffer_level_filter.cc
@@ -23,16 +23,16 @@
   level_factor_ = 253;
 }
 
-void BufferLevelFilter::Update(int buffer_size_packets,
+void BufferLevelFilter::Update(size_t buffer_size_packets,
                                int time_stretched_samples,
-                               int packet_len_samples) {
+                               size_t packet_len_samples) {
   // Filter:
   // |filtered_current_level_| = |level_factor_| * |filtered_current_level_| +
   //                            (1 - |level_factor_|) * |buffer_size_packets|
   // |level_factor_| and |filtered_current_level_| are in Q8.
   // |buffer_size_packets| is in Q0.
   filtered_current_level_ = ((level_factor_ * filtered_current_level_) >> 8) +
-      ((256 - level_factor_) * buffer_size_packets);
+      ((256 - level_factor_) * static_cast<int>(buffer_size_packets));
 
   // Account for time-scale operations (accelerate and pre-emptive expand).
   if (time_stretched_samples && packet_len_samples > 0) {
@@ -42,7 +42,7 @@
     // Make sure that the filtered value remains non-negative.
     filtered_current_level_ = std::max(0,
         filtered_current_level_ -
-        (time_stretched_samples << 8) / packet_len_samples);
+        (time_stretched_samples << 8) / static_cast<int>(packet_len_samples));
   }
 }
 
diff --git a/webrtc/modules/audio_coding/neteq/buffer_level_filter.h b/webrtc/modules/audio_coding/neteq/buffer_level_filter.h
index 2d2a888..add3cc4 100644
--- a/webrtc/modules/audio_coding/neteq/buffer_level_filter.h
+++ b/webrtc/modules/audio_coding/neteq/buffer_level_filter.h
@@ -11,6 +11,8 @@
 #ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_BUFFER_LEVEL_FILTER_H_
 #define WEBRTC_MODULES_AUDIO_CODING_NETEQ_BUFFER_LEVEL_FILTER_H_
 
+#include <stddef.h>
+
 #include "webrtc/base/constructormagic.h"
 
 namespace webrtc {
@@ -26,8 +28,8 @@
   // corresponding number of packets, and is subtracted from the filtered
   // value (thus bypassing the filter operation). |packet_len_samples| is the
   // number of audio samples carried in each incoming packet.
-  virtual void Update(int buffer_size_packets, int time_stretched_samples,
-                      int packet_len_samples);
+  virtual void Update(size_t buffer_size_packets, int time_stretched_samples,
+                      size_t packet_len_samples);
 
   // Set the current target buffer level (obtained from
   // DelayManager::base_target_level()). Used to select the appropriate
diff --git a/webrtc/modules/audio_coding/neteq/comfort_noise.cc b/webrtc/modules/audio_coding/neteq/comfort_noise.cc
index da9683b..3fe6607 100644
--- a/webrtc/modules/audio_coding/neteq/comfort_noise.cc
+++ b/webrtc/modules/audio_coding/neteq/comfort_noise.cc
@@ -79,8 +79,7 @@
   CNG_dec_inst* cng_inst = cng_decoder->CngDecoderInstance();
   // The expression &(*output)[0][0] is a pointer to the first element in
   // the first channel.
-  if (WebRtcCng_Generate(cng_inst, &(*output)[0][0],
-                         static_cast<int16_t>(number_of_samples),
+  if (WebRtcCng_Generate(cng_inst, &(*output)[0][0], number_of_samples,
                          new_period) < 0) {
     // Error returned.
     output->Zeros(requested_length);
diff --git a/webrtc/modules/audio_coding/neteq/decision_logic.cc b/webrtc/modules/audio_coding/neteq/decision_logic.cc
index 5fb054c..eb10e65 100644
--- a/webrtc/modules/audio_coding/neteq/decision_logic.cc
+++ b/webrtc/modules/audio_coding/neteq/decision_logic.cc
@@ -24,7 +24,7 @@
 namespace webrtc {
 
 DecisionLogic* DecisionLogic::Create(int fs_hz,
-                                     int output_size_samples,
+                                     size_t output_size_samples,
                                      NetEqPlayoutMode playout_mode,
                                      DecoderDatabase* decoder_database,
                                      const PacketBuffer& packet_buffer,
@@ -56,7 +56,7 @@
 }
 
 DecisionLogic::DecisionLogic(int fs_hz,
-                             int output_size_samples,
+                             size_t output_size_samples,
                              NetEqPlayoutMode playout_mode,
                              DecoderDatabase* decoder_database,
                              const PacketBuffer& packet_buffer,
@@ -95,7 +95,7 @@
   timescale_hold_off_ = kMinTimescaleInterval;
 }
 
-void DecisionLogic::SetSampleRate(int fs_hz, int output_size_samples) {
+void DecisionLogic::SetSampleRate(int fs_hz, size_t output_size_samples) {
   // TODO(hlundin): Change to an enumerator and skip assert.
   assert(fs_hz == 8000 || fs_hz == 16000 || fs_hz ==  32000 || fs_hz == 48000);
   fs_mult_ = fs_hz / 8000;
@@ -104,7 +104,7 @@
 
 Operations DecisionLogic::GetDecision(const SyncBuffer& sync_buffer,
                                       const Expand& expand,
-                                      int decoder_frame_length,
+                                      size_t decoder_frame_length,
                                       const RTPHeader* packet_header,
                                       Modes prev_mode,
                                       bool play_dtmf, bool* reset_decoder) {
@@ -123,9 +123,9 @@
     }
   }
 
-  const int samples_left = static_cast<int>(
-      sync_buffer.FutureLength() - expand.overlap_length());
-  const int cur_size_samples =
+  const size_t samples_left =
+      sync_buffer.FutureLength() - expand.overlap_length();
+  const size_t cur_size_samples =
       samples_left + packet_buffer_.NumSamplesInBuffer(decoder_database_,
                                                        decoder_frame_length);
   LOG(LS_VERBOSE) << "Buffers: " << packet_buffer_.NumPacketsInBuffer() <<
@@ -153,9 +153,10 @@
   }
 }
 
-void DecisionLogic::FilterBufferLevel(int buffer_size_samples,
+void DecisionLogic::FilterBufferLevel(size_t buffer_size_samples,
                                       Modes prev_mode) {
-  const int elapsed_time_ms = output_size_samples_ / (8 * fs_mult_);
+  const int elapsed_time_ms =
+      static_cast<int>(output_size_samples_ / (8 * fs_mult_));
   delay_manager_->UpdateCounters(elapsed_time_ms);
 
   // Do not update buffer history if currently playing CNG since it will bias
@@ -164,7 +165,7 @@
     buffer_level_filter_->SetTargetBufferLevel(
         delay_manager_->base_target_level());
 
-    int buffer_size_packets = 0;
+    size_t buffer_size_packets = 0;
     if (packet_length_samples_ > 0) {
       // Calculate size in packets.
       buffer_size_packets = buffer_size_samples / packet_length_samples_;
diff --git a/webrtc/modules/audio_coding/neteq/decision_logic.h b/webrtc/modules/audio_coding/neteq/decision_logic.h
index 672ce93..cb3dba0 100644
--- a/webrtc/modules/audio_coding/neteq/decision_logic.h
+++ b/webrtc/modules/audio_coding/neteq/decision_logic.h
@@ -34,7 +34,7 @@
   // Static factory function which creates different types of objects depending
   // on the |playout_mode|.
   static DecisionLogic* Create(int fs_hz,
-                               int output_size_samples,
+                               size_t output_size_samples,
                                NetEqPlayoutMode playout_mode,
                                DecoderDatabase* decoder_database,
                                const PacketBuffer& packet_buffer,
@@ -43,7 +43,7 @@
 
   // Constructor.
   DecisionLogic(int fs_hz,
-                int output_size_samples,
+                size_t output_size_samples,
                 NetEqPlayoutMode playout_mode,
                 DecoderDatabase* decoder_database,
                 const PacketBuffer& packet_buffer,
@@ -60,7 +60,7 @@
   void SoftReset();
 
   // Sets the sample rate and the output block size.
-  void SetSampleRate(int fs_hz, int output_size_samples);
+  void SetSampleRate(int fs_hz, size_t output_size_samples);
 
   // Returns the operation that should be done next. |sync_buffer| and |expand|
   // are provided for reference. |decoder_frame_length| is the number of samples
@@ -75,7 +75,7 @@
   // return value.
   Operations GetDecision(const SyncBuffer& sync_buffer,
                          const Expand& expand,
-                         int decoder_frame_length,
+                         size_t decoder_frame_length,
                          const RTPHeader* packet_header,
                          Modes prev_mode,
                          bool play_dtmf,
@@ -101,12 +101,12 @@
 
   // Accessors and mutators.
   void set_sample_memory(int32_t value) { sample_memory_ = value; }
-  int generated_noise_samples() const { return generated_noise_samples_; }
-  void set_generated_noise_samples(int value) {
+  size_t generated_noise_samples() const { return generated_noise_samples_; }
+  void set_generated_noise_samples(size_t value) {
     generated_noise_samples_ = value;
   }
-  int packet_length_samples() const { return packet_length_samples_; }
-  void set_packet_length_samples(int value) {
+  size_t packet_length_samples() const { return packet_length_samples_; }
+  void set_packet_length_samples(size_t value) {
     packet_length_samples_ = value;
   }
   void set_prev_time_scale(bool value) { prev_time_scale_ = value; }
@@ -134,7 +134,7 @@
   // Should be implemented by derived classes.
   virtual Operations GetDecisionSpecialized(const SyncBuffer& sync_buffer,
                                             const Expand& expand,
-                                            int decoder_frame_length,
+                                            size_t decoder_frame_length,
                                             const RTPHeader* packet_header,
                                             Modes prev_mode,
                                             bool play_dtmf,
@@ -142,18 +142,18 @@
 
   // Updates the |buffer_level_filter_| with the current buffer level
   // |buffer_size_packets|.
-  void FilterBufferLevel(int buffer_size_packets, Modes prev_mode);
+  void FilterBufferLevel(size_t buffer_size_packets, Modes prev_mode);
 
   DecoderDatabase* decoder_database_;
   const PacketBuffer& packet_buffer_;
   DelayManager* delay_manager_;
   BufferLevelFilter* buffer_level_filter_;
   int fs_mult_;
-  int output_size_samples_;
+  size_t output_size_samples_;
   CngState cng_state_;  // Remember if comfort noise is interrupted by other
                         // event (e.g., DTMF).
-  int generated_noise_samples_;
-  int packet_length_samples_;
+  size_t generated_noise_samples_;
+  size_t packet_length_samples_;
   int sample_memory_;
   bool prev_time_scale_;
   int timescale_hold_off_;
diff --git a/webrtc/modules/audio_coding/neteq/decision_logic_fax.cc b/webrtc/modules/audio_coding/neteq/decision_logic_fax.cc
index 08a4c4c..ddea644 100644
--- a/webrtc/modules/audio_coding/neteq/decision_logic_fax.cc
+++ b/webrtc/modules/audio_coding/neteq/decision_logic_fax.cc
@@ -22,7 +22,7 @@
 Operations DecisionLogicFax::GetDecisionSpecialized(
     const SyncBuffer& sync_buffer,
     const Expand& expand,
-    int decoder_frame_length,
+    size_t decoder_frame_length,
     const RTPHeader* packet_header,
     Modes prev_mode,
     bool play_dtmf,
diff --git a/webrtc/modules/audio_coding/neteq/decision_logic_fax.h b/webrtc/modules/audio_coding/neteq/decision_logic_fax.h
index d9f8db9..861e2fa 100644
--- a/webrtc/modules/audio_coding/neteq/decision_logic_fax.h
+++ b/webrtc/modules/audio_coding/neteq/decision_logic_fax.h
@@ -23,7 +23,7 @@
  public:
   // Constructor.
   DecisionLogicFax(int fs_hz,
-                   int output_size_samples,
+                   size_t output_size_samples,
                    NetEqPlayoutMode playout_mode,
                    DecoderDatabase* decoder_database,
                    const PacketBuffer& packet_buffer,
@@ -46,7 +46,7 @@
   // remain true if it was true before the call).
   Operations GetDecisionSpecialized(const SyncBuffer& sync_buffer,
                                     const Expand& expand,
-                                    int decoder_frame_length,
+                                    size_t decoder_frame_length,
                                     const RTPHeader* packet_header,
                                     Modes prev_mode,
                                     bool play_dtmf,
diff --git a/webrtc/modules/audio_coding/neteq/decision_logic_normal.cc b/webrtc/modules/audio_coding/neteq/decision_logic_normal.cc
index e985ee0..d3f6fa6 100644
--- a/webrtc/modules/audio_coding/neteq/decision_logic_normal.cc
+++ b/webrtc/modules/audio_coding/neteq/decision_logic_normal.cc
@@ -27,7 +27,7 @@
 Operations DecisionLogicNormal::GetDecisionSpecialized(
     const SyncBuffer& sync_buffer,
     const Expand& expand,
-    int decoder_frame_length,
+    size_t decoder_frame_length,
     const RTPHeader* packet_header,
     Modes prev_mode,
     bool play_dtmf,
@@ -149,7 +149,7 @@
 Operations DecisionLogicNormal::FuturePacketAvailable(
     const SyncBuffer& sync_buffer,
     const Expand& expand,
-    int decoder_frame_length,
+    size_t decoder_frame_length,
     Modes prev_mode,
     uint32_t target_timestamp,
     uint32_t available_timestamp,
@@ -172,9 +172,9 @@
     }
   }
 
-  const int samples_left = static_cast<int>(sync_buffer.FutureLength() -
-      expand.overlap_length());
-  const int cur_size_samples = samples_left +
+  const size_t samples_left =
+      sync_buffer.FutureLength() - expand.overlap_length();
+  const size_t cur_size_samples = samples_left +
       packet_buffer_.NumPacketsInBuffer() * decoder_frame_length;
 
   // If previous was comfort noise, then no merge is needed.
@@ -205,7 +205,8 @@
   // fs_mult_ * 8 = fs / 1000.)
   if (prev_mode == kModeExpand ||
       (decoder_frame_length < output_size_samples_ &&
-       cur_size_samples > kAllowMergeWithoutExpandMs * fs_mult_ * 8)) {
+       cur_size_samples >
+           static_cast<size_t>(kAllowMergeWithoutExpandMs * fs_mult_ * 8))) {
     return kMerge;
   } else if (play_dtmf) {
     // Play DTMF instead of expand.
diff --git a/webrtc/modules/audio_coding/neteq/decision_logic_normal.h b/webrtc/modules/audio_coding/neteq/decision_logic_normal.h
index 047663f..7867407 100644
--- a/webrtc/modules/audio_coding/neteq/decision_logic_normal.h
+++ b/webrtc/modules/audio_coding/neteq/decision_logic_normal.h
@@ -23,7 +23,7 @@
  public:
   // Constructor.
   DecisionLogicNormal(int fs_hz,
-                      int output_size_samples,
+                      size_t output_size_samples,
                       NetEqPlayoutMode playout_mode,
                       DecoderDatabase* decoder_database,
                       const PacketBuffer& packet_buffer,
@@ -50,7 +50,7 @@
   // remain true if it was true before the call).
   Operations GetDecisionSpecialized(const SyncBuffer& sync_buffer,
                                     const Expand& expand,
-                                    int decoder_frame_length,
+                                    size_t decoder_frame_length,
                                     const RTPHeader* packet_header,
                                     Modes prev_mode,
                                     bool play_dtmf,
@@ -61,7 +61,7 @@
   virtual Operations FuturePacketAvailable(
       const SyncBuffer& sync_buffer,
       const Expand& expand,
-      int decoder_frame_length,
+      size_t decoder_frame_length,
       Modes prev_mode,
       uint32_t target_timestamp,
       uint32_t available_timestamp,
diff --git a/webrtc/modules/audio_coding/neteq/delay_manager.cc b/webrtc/modules/audio_coding/neteq/delay_manager.cc
index a935561..e7f76f6 100644
--- a/webrtc/modules/audio_coding/neteq/delay_manager.cc
+++ b/webrtc/modules/audio_coding/neteq/delay_manager.cc
@@ -22,7 +22,7 @@
 
 namespace webrtc {
 
-DelayManager::DelayManager(int max_packets_in_buffer,
+DelayManager::DelayManager(size_t max_packets_in_buffer,
                            DelayPeakDetector* peak_detector)
     : first_packet_received_(false),
       max_packets_in_buffer_(max_packets_in_buffer),
@@ -239,7 +239,8 @@
   }
 
   // Shift to Q8, then 75%.;
-  int max_buffer_packets_q8 = (3 * (max_packets_in_buffer_ << 8)) / 4;
+  int max_buffer_packets_q8 =
+      static_cast<int>((3 * (max_packets_in_buffer_ << 8)) / 4);
   target_level_ = std::min(target_level_, max_buffer_packets_q8);
 
   // Sanity check, at least 1 packet (in Q8).
@@ -389,7 +390,8 @@
   // |max_packets_in_buffer_|.
   if ((maximum_delay_ms_ > 0 && delay_ms > maximum_delay_ms_) ||
       (packet_len_ms_ > 0 &&
-          delay_ms > 3 * max_packets_in_buffer_ * packet_len_ms_ / 4)) {
+       delay_ms >
+           static_cast<int>(3 * max_packets_in_buffer_ * packet_len_ms_ / 4))) {
     return false;
   }
   minimum_delay_ms_ = delay_ms;
diff --git a/webrtc/modules/audio_coding/neteq/delay_manager.h b/webrtc/modules/audio_coding/neteq/delay_manager.h
index 33c4a40..b0d3f2e 100644
--- a/webrtc/modules/audio_coding/neteq/delay_manager.h
+++ b/webrtc/modules/audio_coding/neteq/delay_manager.h
@@ -32,7 +32,7 @@
   // buffer can hold no more than |max_packets_in_buffer| packets (i.e., this
   // is the number of packet slots in the buffer). Supply a PeakDetector
   // object to the DelayManager.
-  DelayManager(int max_packets_in_buffer, DelayPeakDetector* peak_detector);
+  DelayManager(size_t max_packets_in_buffer, DelayPeakDetector* peak_detector);
 
   virtual ~DelayManager();
 
@@ -132,7 +132,7 @@
   void LimitTargetLevel();
 
   bool first_packet_received_;
-  const int max_packets_in_buffer_;  // Capacity of the packet buffer.
+  const size_t max_packets_in_buffer_;  // Capacity of the packet buffer.
   IATVector iat_vector_;  // Histogram of inter-arrival times.
   int iat_factor_;  // Forgetting factor for updating the IAT histogram (Q15).
   int packet_iat_count_ms_;  // Milliseconds elapsed since last packet.
diff --git a/webrtc/modules/audio_coding/neteq/dsp_helper.cc b/webrtc/modules/audio_coding/neteq/dsp_helper.cc
index 3e5c61d..4188914 100644
--- a/webrtc/modules/audio_coding/neteq/dsp_helper.cc
+++ b/webrtc/modules/audio_coding/neteq/dsp_helper.cc
@@ -99,13 +99,13 @@
   return end_factor;
 }
 
-void DspHelper::PeakDetection(int16_t* data, int data_length,
-                              int num_peaks, int fs_mult,
-                              int* peak_index, int16_t* peak_value) {
-  int16_t min_index = 0;
-  int16_t max_index = 0;
+void DspHelper::PeakDetection(int16_t* data, size_t data_length,
+                              size_t num_peaks, int fs_mult,
+                              size_t* peak_index, int16_t* peak_value) {
+  size_t min_index = 0;
+  size_t max_index = 0;
 
-  for (int i = 0; i <= num_peaks - 1; i++) {
+  for (size_t i = 0; i <= num_peaks - 1; i++) {
     if (num_peaks == 1) {
       // Single peak.  The parabola fit assumes that an extra point is
       // available; worst case it gets a zero on the high end of the signal.
@@ -148,7 +148,7 @@
 }
 
 void DspHelper::ParabolicFit(int16_t* signal_points, int fs_mult,
-                             int* peak_index, int16_t* peak_value) {
+                             size_t* peak_index, int16_t* peak_value) {
   uint16_t fit_index[13];
   if (fs_mult == 1) {
     fit_index[0] = 0;
@@ -235,16 +235,16 @@
   }
 }
 
-int DspHelper::MinDistortion(const int16_t* signal, int min_lag,
-                             int max_lag, int length,
-                             int32_t* distortion_value) {
-  int best_index = 0;
+size_t DspHelper::MinDistortion(const int16_t* signal, size_t min_lag,
+                                size_t max_lag, size_t length,
+                                int32_t* distortion_value) {
+  size_t best_index = 0;
   int32_t min_distortion = WEBRTC_SPL_WORD32_MAX;
-  for (int i = min_lag; i <= max_lag; i++) {
+  for (size_t i = min_lag; i <= max_lag; i++) {
     int32_t sum_diff = 0;
     const int16_t* data1 = signal;
     const int16_t* data2 = signal - i;
-    for (int j = 0; j < length; j++) {
+    for (size_t j = 0; j < length; j++) {
       sum_diff += WEBRTC_SPL_ABS_W32(data1[j] - data2[j]);
     }
     // Compare with previous minimum.
@@ -293,15 +293,15 @@
 }
 
 int DspHelper::DownsampleTo4kHz(const int16_t* input, size_t input_length,
-                                int output_length, int input_rate_hz,
+                                size_t output_length, int input_rate_hz,
                                 bool compensate_delay, int16_t* output) {
   // Set filter parameters depending on input frequency.
   // NOTE: The phase delay values are wrong compared to the true phase delay
   // of the filters. However, the error is preserved (through the +1 term) for
   // consistency.
   const int16_t* filter_coefficients;  // Filter coefficients.
-  int16_t filter_length;  // Number of coefficients.
-  int16_t filter_delay;  // Phase delay in samples.
+  size_t filter_length;  // Number of coefficients.
+  size_t filter_delay;  // Phase delay in samples.
   int16_t factor;  // Conversion rate (inFsHz / 8000).
   switch (input_rate_hz) {
     case 8000: {
@@ -345,9 +345,8 @@
 
   // Returns -1 if input signal is too short; 0 otherwise.
   return WebRtcSpl_DownsampleFast(
-      &input[filter_length - 1], static_cast<int>(input_length) -
-      (filter_length - 1), output, output_length, filter_coefficients,
-      filter_length, factor, filter_delay);
+      &input[filter_length - 1], input_length - filter_length + 1, output,
+      output_length, filter_coefficients, filter_length, factor, filter_delay);
 }
 
 }  // namespace webrtc
diff --git a/webrtc/modules/audio_coding/neteq/dsp_helper.h b/webrtc/modules/audio_coding/neteq/dsp_helper.h
index f903256..c40d10a 100644
--- a/webrtc/modules/audio_coding/neteq/dsp_helper.h
+++ b/webrtc/modules/audio_coding/neteq/dsp_helper.h
@@ -78,9 +78,9 @@
   // locations and values are written to the arrays |peak_index| and
   // |peak_value|, respectively. Both arrays must hold at least |num_peaks|
   // elements.
-  static void PeakDetection(int16_t* data, int data_length,
-                            int num_peaks, int fs_mult,
-                            int* peak_index, int16_t* peak_value);
+  static void PeakDetection(int16_t* data, size_t data_length,
+                            size_t num_peaks, int fs_mult,
+                            size_t* peak_index, int16_t* peak_value);
 
   // Estimates the height and location of a maximum. The three values in the
   // array |signal_points| are used as basis for a parabolic fit, which is then
@@ -89,14 +89,15 @@
   // |peak_index| and |peak_value| is given in the full sample rate, as
   // indicated by the sample rate multiplier |fs_mult|.
   static void ParabolicFit(int16_t* signal_points, int fs_mult,
-                           int* peak_index, int16_t* peak_value);
+                           size_t* peak_index, int16_t* peak_value);
 
   // Calculates the sum-abs-diff for |signal| when compared to a displaced
   // version of itself. Returns the displacement lag that results in the minimum
   // distortion. The resulting distortion is written to |distortion_value|.
   // The values of |min_lag| and |max_lag| are boundaries for the search.
-  static int MinDistortion(const int16_t* signal, int min_lag,
-                           int max_lag, int length, int32_t* distortion_value);
+  static size_t MinDistortion(const int16_t* signal, size_t min_lag,
+                           size_t max_lag, size_t length,
+                           int32_t* distortion_value);
 
   // Mixes |length| samples from |input1| and |input2| together and writes the
   // result to |output|. The gain for |input1| starts at |mix_factor| (Q14) and
@@ -122,7 +123,7 @@
   // filters if |compensate_delay| is true. Returns -1 if the input is too short
   // to produce |output_length| samples, otherwise 0.
   static int DownsampleTo4kHz(const int16_t* input, size_t input_length,
-                              int output_length, int input_rate_hz,
+                              size_t output_length, int input_rate_hz,
                               bool compensate_delay, int16_t* output);
 
  private:
diff --git a/webrtc/modules/audio_coding/neteq/dtmf_tone_generator.cc b/webrtc/modules/audio_coding/neteq/dtmf_tone_generator.cc
index 45601c0..f4d5190 100644
--- a/webrtc/modules/audio_coding/neteq/dtmf_tone_generator.cc
+++ b/webrtc/modules/audio_coding/neteq/dtmf_tone_generator.cc
@@ -149,18 +149,18 @@
 }
 
 // Generate num_samples of DTMF signal and write to |output|.
-int DtmfToneGenerator::Generate(int num_samples,
+int DtmfToneGenerator::Generate(size_t num_samples,
                                 AudioMultiVector* output) {
   if (!initialized_) {
     return kNotInitialized;
   }
 
-  if (num_samples < 0 || !output) {
+  if (!output) {
     return kParameterError;
   }
 
   output->AssertSize(num_samples);
-  for (int i = 0; i < num_samples; ++i) {
+  for (size_t i = 0; i < num_samples; ++i) {
     // Use recursion formula y[n] = a * y[n - 1] - y[n - 2].
     int16_t temp_val_low = ((coeff1_ * sample_history1_[1] + 8192) >> 14)
         - sample_history1_[0];
@@ -186,7 +186,7 @@
     output->CopyChannel(0, channel);
   }
 
-  return num_samples;
+  return static_cast<int>(num_samples);
 }
 
 bool DtmfToneGenerator::initialized() const {
diff --git a/webrtc/modules/audio_coding/neteq/dtmf_tone_generator.h b/webrtc/modules/audio_coding/neteq/dtmf_tone_generator.h
index 4e51e53..767f66c 100644
--- a/webrtc/modules/audio_coding/neteq/dtmf_tone_generator.h
+++ b/webrtc/modules/audio_coding/neteq/dtmf_tone_generator.h
@@ -30,7 +30,7 @@
   virtual ~DtmfToneGenerator() {}
   virtual int Init(int fs, int event, int attenuation);
   virtual void Reset();
-  virtual int Generate(int num_samples, AudioMultiVector* output);
+  virtual int Generate(size_t num_samples, AudioMultiVector* output);
   virtual bool initialized() const;
 
  private:
diff --git a/webrtc/modules/audio_coding/neteq/dtmf_tone_generator_unittest.cc b/webrtc/modules/audio_coding/neteq/dtmf_tone_generator_unittest.cc
index ccd7fa6..a55e6c9 100644
--- a/webrtc/modules/audio_coding/neteq/dtmf_tone_generator_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/dtmf_tone_generator_unittest.cc
@@ -171,8 +171,6 @@
   // Initialize with valid parameters.
   ASSERT_EQ(0, tone_gen.Init(fs, event, attenuation));
   EXPECT_TRUE(tone_gen.initialized());
-  // Negative number of samples.
-  EXPECT_EQ(DtmfToneGenerator::kParameterError, tone_gen.Generate(-1, &signal));
   // NULL pointer to destination.
   EXPECT_EQ(DtmfToneGenerator::kParameterError,
             tone_gen.Generate(kNumSamples, NULL));
diff --git a/webrtc/modules/audio_coding/neteq/expand.cc b/webrtc/modules/audio_coding/neteq/expand.cc
index d01465a..c163fee 100644
--- a/webrtc/modules/audio_coding/neteq/expand.cc
+++ b/webrtc/modules/audio_coding/neteq/expand.cc
@@ -47,7 +47,7 @@
       expand_duration_samples_(0),
       channel_parameters_(new ChannelParameters[num_channels_]) {
   assert(fs == 8000 || fs == 16000 || fs == 32000 || fs == 48000);
-  assert(fs <= kMaxSampleRate);  // Should not be possible.
+  assert(fs <= static_cast<int>(kMaxSampleRate));  // Should not be possible.
   assert(num_channels_ > 0);
   memset(expand_lags_, 0, sizeof(expand_lags_));
   Reset();
@@ -72,7 +72,7 @@
   int16_t temp_data[kTempDataSize];  // TODO(hlundin) Remove this.
   int16_t* voiced_vector_storage = temp_data;
   int16_t* voiced_vector = &voiced_vector_storage[overlap_length_];
-  static const int kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder;
+  static const size_t kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder;
   int16_t unvoiced_array_memory[kNoiseLpcOrder + kMaxSampleRate / 8000 * 125];
   int16_t* unvoiced_vector = unvoiced_array_memory + kUnvoicedLpcOrder;
   int16_t* noise_vector = unvoiced_array_memory + kNoiseLpcOrder;
@@ -87,7 +87,7 @@
   } else {
     // This is not the first expansion, parameters are already estimated.
     // Extract a noise segment.
-    int16_t rand_length = max_lag_;
+    size_t rand_length = max_lag_;
     // This only applies to SWB where length could be larger than 256.
     assert(rand_length <= kMaxSampleRate / 8000 * 120 + 30);
     GenerateRandomVector(2, rand_length, random_vector);
@@ -119,7 +119,7 @@
       WebRtcSpl_ScaleAndAddVectorsWithRound(
           &parameters.expand_vector0[expansion_vector_position], 3,
           &parameters.expand_vector1[expansion_vector_position], 1, 2,
-          voiced_vector_storage, static_cast<int>(temp_length));
+          voiced_vector_storage, temp_length);
     } else if (current_lag_index_ == 2) {
       // Mix 1/2 of expand_vector0 with 1/2 of expand_vector1.
       assert(expansion_vector_position + temp_length <=
@@ -129,7 +129,7 @@
       WebRtcSpl_ScaleAndAddVectorsWithRound(
           &parameters.expand_vector0[expansion_vector_position], 1,
           &parameters.expand_vector1[expansion_vector_position], 1, 1,
-          voiced_vector_storage, static_cast<int>(temp_length));
+          voiced_vector_storage, temp_length);
     }
 
     // Get tapering window parameters. Values are in Q15.
@@ -196,10 +196,10 @@
     WebRtcSpl_AffineTransformVector(scaled_random_vector, random_vector,
                                     parameters.ar_gain, add_constant,
                                     parameters.ar_gain_scale,
-                                    static_cast<int>(current_lag));
+                                    current_lag);
     WebRtcSpl_FilterARFastQ12(scaled_random_vector, unvoiced_vector,
                               parameters.ar_filter, kUnvoicedLpcOrder + 1,
-                              static_cast<int>(current_lag));
+                              current_lag);
     memcpy(parameters.ar_filter_state,
            &(unvoiced_vector[current_lag - kUnvoicedLpcOrder]),
            sizeof(int16_t) * kUnvoicedLpcOrder);
@@ -212,7 +212,8 @@
     //  (>= 31 .. <= 63) * fs_mult  => go from 1 to 0 in about 16 ms;
     //   >= 64 * fs_mult            => go from 1 to 0 in about 32 ms.
     // temp_shift = getbits(max_lag_) - 5.
-    int temp_shift = (31 - WebRtcSpl_NormW32(max_lag_)) - 5;
+    int temp_shift =
+        (31 - WebRtcSpl_NormW32(rtc::checked_cast<int32_t>(max_lag_))) - 5;
     int16_t mix_factor_increment = 256 >> temp_shift;
     if (stop_muting_) {
       mix_factor_increment = 0;
@@ -237,7 +238,7 @@
       WebRtcSpl_ScaleAndAddVectorsWithRound(
           voiced_vector + temp_length, parameters.current_voice_mix_factor,
           unvoiced_vector + temp_length, temp_scale, 14,
-          temp_data + temp_length, static_cast<int>(current_lag - temp_length));
+          temp_data + temp_length, current_lag - temp_length);
     }
 
     // Select muting slope depending on how many consecutive expands we have
@@ -258,7 +259,7 @@
       // Mute to the previous level, then continue with the muting.
       WebRtcSpl_AffineTransformVector(temp_data, temp_data,
                                       parameters.mute_factor, 8192,
-                                      14, static_cast<int>(current_lag));
+                                      14, current_lag);
 
       if (!stop_muting_) {
         DspHelper::MuteSignal(temp_data, parameters.mute_slope, current_lag);
@@ -351,26 +352,26 @@
   int32_t auto_correlation[kUnvoicedLpcOrder + 1];
   int16_t reflection_coeff[kUnvoicedLpcOrder];
   int16_t correlation_vector[kMaxSampleRate / 8000 * 102];
-  int best_correlation_index[kNumCorrelationCandidates];
+  size_t best_correlation_index[kNumCorrelationCandidates];
   int16_t best_correlation[kNumCorrelationCandidates];
-  int16_t best_distortion_index[kNumCorrelationCandidates];
+  size_t best_distortion_index[kNumCorrelationCandidates];
   int16_t best_distortion[kNumCorrelationCandidates];
   int32_t correlation_vector2[(99 * kMaxSampleRate / 8000) + 1];
   int32_t best_distortion_w32[kNumCorrelationCandidates];
-  static const int kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder;
+  static const size_t kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder;
   int16_t unvoiced_array_memory[kNoiseLpcOrder + kMaxSampleRate / 8000 * 125];
   int16_t* unvoiced_vector = unvoiced_array_memory + kUnvoicedLpcOrder;
 
   int fs_mult = fs_hz_ / 8000;
 
   // Pre-calculate common multiplications with fs_mult.
-  int fs_mult_4 = fs_mult * 4;
-  int fs_mult_20 = fs_mult * 20;
-  int fs_mult_120 = fs_mult * 120;
-  int fs_mult_dist_len = fs_mult * kDistortionLength;
-  int fs_mult_lpc_analysis_len = fs_mult * kLpcAnalysisLength;
+  size_t fs_mult_4 = static_cast<size_t>(fs_mult * 4);
+  size_t fs_mult_20 = static_cast<size_t>(fs_mult * 20);
+  size_t fs_mult_120 = static_cast<size_t>(fs_mult * 120);
+  size_t fs_mult_dist_len = fs_mult * kDistortionLength;
+  size_t fs_mult_lpc_analysis_len = fs_mult * kLpcAnalysisLength;
 
-  const size_t signal_length = 256 * fs_mult;
+  const size_t signal_length = static_cast<size_t>(256 * fs_mult);
   const int16_t* audio_history =
       &(*sync_buffer_)[0][sync_buffer_->Size() - signal_length];
 
@@ -379,7 +380,7 @@
 
   // Calculate correlation in downsampled domain (4 kHz sample rate).
   int correlation_scale;
-  int correlation_length = 51;  // TODO(hlundin): Legacy bit-exactness.
+  size_t correlation_length = 51;  // TODO(hlundin): Legacy bit-exactness.
   // If it is decided to break bit-exactness |correlation_length| should be
   // initialized to the return value of Correlation().
   Correlation(audio_history, signal_length, correlation_vector,
@@ -398,11 +399,11 @@
 
   // Calculate distortion around the |kNumCorrelationCandidates| best lags.
   int distortion_scale = 0;
-  for (int i = 0; i < kNumCorrelationCandidates; i++) {
-    int16_t min_index = std::max(fs_mult_20,
-                                 best_correlation_index[i] - fs_mult_4);
-    int16_t max_index = std::min(fs_mult_120 - 1,
-                                 best_correlation_index[i] + fs_mult_4);
+  for (size_t i = 0; i < kNumCorrelationCandidates; i++) {
+    size_t min_index = std::max(fs_mult_20,
+                                best_correlation_index[i] - fs_mult_4);
+    size_t max_index = std::min(fs_mult_120 - 1,
+                                best_correlation_index[i] + fs_mult_4);
     best_distortion_index[i] = DspHelper::MinDistortion(
         &(audio_history[signal_length - fs_mult_dist_len]), min_index,
         max_index, fs_mult_dist_len, &best_distortion_w32[i]);
@@ -416,8 +417,8 @@
   // Find the maximizing index |i| of the cost function
   // f[i] = best_correlation[i] / best_distortion[i].
   int32_t best_ratio = std::numeric_limits<int32_t>::min();
-  int best_index = std::numeric_limits<int>::max();
-  for (int i = 0; i < kNumCorrelationCandidates; ++i) {
+  size_t best_index = std::numeric_limits<size_t>::max();
+  for (size_t i = 0; i < kNumCorrelationCandidates; ++i) {
     int32_t ratio;
     if (best_distortion[i] > 0) {
       ratio = (best_correlation[i] << 16) / best_distortion[i];
@@ -432,19 +433,20 @@
     }
   }
 
-  int distortion_lag = best_distortion_index[best_index];
-  int correlation_lag = best_correlation_index[best_index];
+  size_t distortion_lag = best_distortion_index[best_index];
+  size_t correlation_lag = best_correlation_index[best_index];
   max_lag_ = std::max(distortion_lag, correlation_lag);
 
   // Calculate the exact best correlation in the range between
   // |correlation_lag| and |distortion_lag|.
   correlation_length =
-      std::max(std::min(distortion_lag + 10, fs_mult_120), 60 * fs_mult);
+      std::max(std::min(distortion_lag + 10, fs_mult_120),
+               static_cast<size_t>(60 * fs_mult));
 
-  int start_index = std::min(distortion_lag, correlation_lag);
-  int correlation_lags =
-      WEBRTC_SPL_ABS_W16((distortion_lag-correlation_lag)) + 1;
-  assert(correlation_lags <= 99 * fs_mult + 1);  // Cannot be larger.
+  size_t start_index = std::min(distortion_lag, correlation_lag);
+  size_t correlation_lags = static_cast<size_t>(
+      WEBRTC_SPL_ABS_W16((distortion_lag-correlation_lag)) + 1);
+  assert(correlation_lags <= static_cast<size_t>(99 * fs_mult + 1));
 
   for (size_t channel_ix = 0; channel_ix < num_channels_; ++channel_ix) {
     ChannelParameters& parameters = channel_parameters_[channel_ix];
@@ -454,7 +456,7 @@
                        - correlation_lags],
                        correlation_length + start_index + correlation_lags - 1);
     correlation_scale = (31 - WebRtcSpl_NormW32(signal_max * signal_max)) +
-        (31 - WebRtcSpl_NormW32(correlation_length)) - 31;
+        (31 - WebRtcSpl_NormW32(static_cast<int32_t>(correlation_length))) - 31;
     correlation_scale = std::max(0, correlation_scale);
 
     // Calculate the correlation, store in |correlation_vector2|.
@@ -465,7 +467,8 @@
         correlation_length, correlation_lags, correlation_scale, -1);
 
     // Find maximizing index.
-    best_index = WebRtcSpl_MaxIndexW32(correlation_vector2, correlation_lags);
+    best_index = static_cast<size_t>(
+        WebRtcSpl_MaxIndexW32(correlation_vector2, correlation_lags));
     int32_t max_correlation = correlation_vector2[best_index];
     // Compensate index with start offset.
     best_index = best_index + start_index;
@@ -508,7 +511,7 @@
 
     // Extract the two vectors expand_vector0 and expand_vector1 from
     // |audio_history|.
-    int16_t expansion_length = static_cast<int16_t>(max_lag_ + overlap_length_);
+    size_t expansion_length = max_lag_ + overlap_length_;
     const int16_t* vector1 = &(audio_history[signal_length - expansion_length]);
     const int16_t* vector2 = vector1 - distortion_lag;
     // Normalize the second vector to the same energy as the first.
@@ -527,15 +530,15 @@
       // Calculate scaled_energy1 / scaled_energy2 in Q13.
       int32_t energy_ratio = WebRtcSpl_DivW32W16(
           WEBRTC_SPL_SHIFT_W32(energy1, -scaled_energy1),
-          energy2 >> scaled_energy2);
+          static_cast<int16_t>(energy2 >> scaled_energy2));
       // Calculate sqrt ratio in Q13 (sqrt of en1/en2 in Q26).
-      amplitude_ratio = WebRtcSpl_SqrtFloor(energy_ratio << 13);
+      amplitude_ratio =
+          static_cast<int16_t>(WebRtcSpl_SqrtFloor(energy_ratio << 13));
       // Copy the two vectors and give them the same energy.
       parameters.expand_vector0.Clear();
       parameters.expand_vector0.PushBack(vector1, expansion_length);
       parameters.expand_vector1.Clear();
-      if (parameters.expand_vector1.Size() <
-          static_cast<size_t>(expansion_length)) {
+      if (parameters.expand_vector1.Size() < expansion_length) {
         parameters.expand_vector1.Extend(
             expansion_length - parameters.expand_vector1.Size());
       }
@@ -626,7 +629,7 @@
 
     if (channel_ix == 0) {
       // Extract a noise segment.
-      int16_t noise_length;
+      size_t noise_length;
       if (distortion_lag < 40) {
         noise_length = 2 * distortion_lag + 30;
       } else {
@@ -768,7 +771,7 @@
                          int* output_scale) const {
   // Set parameters depending on sample rate.
   const int16_t* filter_coefficients;
-  int16_t num_coefficients;
+  size_t num_coefficients;
   int16_t downsampling_factor;
   if (fs_hz_ == 8000) {
     num_coefficients = 3;
@@ -790,14 +793,14 @@
 
   // Correlate from lag 10 to lag 60 in downsampled domain.
   // (Corresponds to 20-120 for narrow-band, 40-240 for wide-band, and so on.)
-  static const int kCorrelationStartLag = 10;
-  static const int kNumCorrelationLags = 54;
-  static const int kCorrelationLength = 60;
+  static const size_t kCorrelationStartLag = 10;
+  static const size_t kNumCorrelationLags = 54;
+  static const size_t kCorrelationLength = 60;
   // Downsample to 4 kHz sample rate.
-  static const int kDownsampledLength = kCorrelationStartLag
+  static const size_t kDownsampledLength = kCorrelationStartLag
       + kNumCorrelationLags + kCorrelationLength;
   int16_t downsampled_input[kDownsampledLength];
-  static const int kFilterDelay = 0;
+  static const size_t kFilterDelay = 0;
   WebRtcSpl_DownsampleFast(
       input + input_length - kDownsampledLength * downsampling_factor,
       kDownsampledLength * downsampling_factor, downsampled_input,
@@ -859,9 +862,9 @@
                                      bool too_many_expands,
                                      size_t num_noise_samples,
                                      int16_t* buffer) {
-  static const int kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder;
+  static const size_t kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder;
   int16_t scaled_random_vector[kMaxSampleRate / 8000 * 125];
-  assert(num_noise_samples <= static_cast<size_t>(kMaxSampleRate / 8000 * 125));
+  assert(num_noise_samples <= (kMaxSampleRate / 8000 * 125));
   int16_t* noise_samples = &buffer[kNoiseLpcOrder];
   if (background_noise_->initialized()) {
     // Use background noise parameters.
@@ -879,12 +882,12 @@
         scaled_random_vector, random_vector,
         background_noise_->Scale(channel), dc_offset,
         background_noise_->ScaleShift(channel),
-        static_cast<int>(num_noise_samples));
+        num_noise_samples);
 
     WebRtcSpl_FilterARFastQ12(scaled_random_vector, noise_samples,
                               background_noise_->Filter(channel),
                               kNoiseLpcOrder + 1,
-                              static_cast<int>(num_noise_samples));
+                              num_noise_samples);
 
     background_noise_->SetFilterState(
         channel,
@@ -931,7 +934,7 @@
         // kBgnFade has reached 0.
         WebRtcSpl_AffineTransformVector(noise_samples, noise_samples,
                                         bgn_mute_factor, 8192, 14,
-                                        static_cast<int>(num_noise_samples));
+                                        num_noise_samples);
       }
     }
     // Update mute_factor in BackgroundNoise class.
diff --git a/webrtc/modules/audio_coding/neteq/expand.h b/webrtc/modules/audio_coding/neteq/expand.h
index 3fbafdb..37e58d6 100644
--- a/webrtc/modules/audio_coding/neteq/expand.h
+++ b/webrtc/modules/audio_coding/neteq/expand.h
@@ -64,7 +64,7 @@
 
   // Accessors and mutators.
   virtual size_t overlap_length() const;
-  int16_t max_lag() const { return max_lag_; }
+  size_t max_lag() const { return max_lag_; }
 
  protected:
   static const int kMaxConsecutiveExpands = 200;
@@ -96,11 +96,11 @@
   int consecutive_expands_;
 
  private:
-  static const int kUnvoicedLpcOrder = 6;
-  static const int kNumCorrelationCandidates = 3;
-  static const int kDistortionLength = 20;
-  static const int kLpcAnalysisLength = 160;
-  static const int kMaxSampleRate = 48000;
+  static const size_t kUnvoicedLpcOrder = 6;
+  static const size_t kNumCorrelationCandidates = 3;
+  static const size_t kDistortionLength = 20;
+  static const size_t kLpcAnalysisLength = 160;
+  static const size_t kMaxSampleRate = 48000;
   static const int kNumLags = 3;
 
   struct ChannelParameters {
@@ -132,7 +132,7 @@
   BackgroundNoise* const background_noise_;
   StatisticsCalculator* const statistics_;
   const size_t overlap_length_;
-  int16_t max_lag_;
+  size_t max_lag_;
   size_t expand_lags_[kNumLags];
   int lag_index_direction_;
   int current_lag_index_;
diff --git a/webrtc/modules/audio_coding/neteq/interface/neteq.h b/webrtc/modules/audio_coding/neteq/interface/neteq.h
index 88bf208..865a8b3 100644
--- a/webrtc/modules/audio_coding/neteq/interface/neteq.h
+++ b/webrtc/modules/audio_coding/neteq/interface/neteq.h
@@ -45,7 +45,7 @@
                                     // decoding (in Q14).
   int32_t clockdrift_ppm;  // Average clock-drift in parts-per-million
                            // (positive or negative).
-  int added_zero_samples;  // Number of zero samples added in "off" mode.
+  size_t added_zero_samples;  // Number of zero samples added in "off" mode.
 };
 
 enum NetEqOutputType {
@@ -87,7 +87,7 @@
 
     int sample_rate_hz;  // Initial value. Will change with input data.
     bool enable_audio_classifier;
-    int max_packets_in_buffer;
+    size_t max_packets_in_buffer;
     int max_delay_ms;
     BackgroundNoiseMode background_noise_mode;
     NetEqPlayoutMode playout_mode;
@@ -165,7 +165,7 @@
   // The speech type is written to |type|, if |type| is not NULL.
   // Returns kOK on success, or kFail in case of an error.
   virtual int GetAudio(size_t max_length, int16_t* output_audio,
-                       int* samples_per_channel, int* num_channels,
+                       size_t* samples_per_channel, int* num_channels,
                        NetEqOutputType* type) = 0;
 
   // Associates |rtp_payload_type| with |codec| and stores the information in
diff --git a/webrtc/modules/audio_coding/neteq/merge.cc b/webrtc/modules/audio_coding/neteq/merge.cc
index 2c515c1..b6fb2d8 100644
--- a/webrtc/modules/audio_coding/neteq/merge.cc
+++ b/webrtc/modules/audio_coding/neteq/merge.cc
@@ -31,25 +31,25 @@
     : fs_hz_(fs_hz),
       num_channels_(num_channels),
       fs_mult_(fs_hz_ / 8000),
-      timestamps_per_call_(fs_hz_ / 100),
+      timestamps_per_call_(static_cast<size_t>(fs_hz_ / 100)),
       expand_(expand),
       sync_buffer_(sync_buffer),
       expanded_(num_channels_) {
   assert(num_channels_ > 0);
 }
 
-int Merge::Process(int16_t* input, size_t input_length,
-                   int16_t* external_mute_factor_array,
-                   AudioMultiVector* output) {
+size_t Merge::Process(int16_t* input, size_t input_length,
+                      int16_t* external_mute_factor_array,
+                      AudioMultiVector* output) {
   // TODO(hlundin): Change to an enumerator and skip assert.
   assert(fs_hz_ == 8000 || fs_hz_ == 16000 || fs_hz_ ==  32000 ||
          fs_hz_ == 48000);
   assert(fs_hz_ <= kMaxSampleRate);  // Should not be possible.
 
-  int old_length;
-  int expand_period;
+  size_t old_length;
+  size_t expand_period;
   // Get expansion data to overlap and mix with.
-  int expanded_length = GetExpandedSignal(&old_length, &expand_period);
+  size_t expanded_length = GetExpandedSignal(&old_length, &expand_period);
 
   // Transfer input signal to an AudioMultiVector.
   AudioMultiVector input_vector(num_channels_);
@@ -57,7 +57,7 @@
   size_t input_length_per_channel = input_vector.Size();
   assert(input_length_per_channel == input_length / num_channels_);
 
-  int16_t best_correlation_index = 0;
+  size_t best_correlation_index = 0;
   size_t output_length = 0;
 
   for (size_t channel = 0; channel < num_channels_; ++channel) {
@@ -65,8 +65,8 @@
     int16_t* expanded_channel = &expanded_[channel][0];
     int16_t expanded_max, input_max;
     int16_t new_mute_factor = SignalScaling(
-        input_channel, static_cast<int>(input_length_per_channel),
-        expanded_channel, &expanded_max, &input_max);
+        input_channel, input_length_per_channel, expanded_channel,
+        &expanded_max, &input_max);
 
     // Adjust muting factor (product of "main" muting factor and expand muting
     // factor).
@@ -84,13 +84,13 @@
       // Downsample, correlate, and find strongest correlation period for the
       // master (i.e., first) channel only.
       // Downsample to 4kHz sample rate.
-      Downsample(input_channel, static_cast<int>(input_length_per_channel),
-                 expanded_channel, expanded_length);
+      Downsample(input_channel, input_length_per_channel, expanded_channel,
+                 expanded_length);
 
       // Calculate the lag of the strongest correlation period.
       best_correlation_index = CorrelateAndPeakSearch(
           expanded_max, input_max, old_length,
-          static_cast<int>(input_length_per_channel), expand_period);
+          input_length_per_channel, expand_period);
     }
 
     static const int kTempDataSize = 3600;
@@ -99,11 +99,11 @@
 
     // Mute the new decoded data if needed (and unmute it linearly).
     // This is the overlapping part of expanded_signal.
-    int interpolation_length = std::min(
+    size_t interpolation_length = std::min(
         kMaxCorrelationLength * fs_mult_,
         expanded_length - best_correlation_index);
     interpolation_length = std::min(interpolation_length,
-                                    static_cast<int>(input_length_per_channel));
+                                    input_length_per_channel);
     if (*external_mute_factor < 16384) {
       // Set a suitable muting slope (Q20). 0.004 for NB, 0.002 for WB,
       // and so on.
@@ -153,14 +153,14 @@
 
   // Return new added length. |old_length| samples were borrowed from
   // |sync_buffer_|.
-  return static_cast<int>(output_length) - old_length;
+  return output_length - old_length;
 }
 
-int Merge::GetExpandedSignal(int* old_length, int* expand_period) {
+size_t Merge::GetExpandedSignal(size_t* old_length, size_t* expand_period) {
   // Check how much data that is left since earlier.
-  *old_length = static_cast<int>(sync_buffer_->FutureLength());
+  *old_length = sync_buffer_->FutureLength();
   // Should never be less than overlap_length.
-  assert(*old_length >= static_cast<int>(expand_->overlap_length()));
+  assert(*old_length >= expand_->overlap_length());
   // Generate data to merge the overlap with using expand.
   expand_->SetParametersForMergeAfterExpand();
 
@@ -171,7 +171,7 @@
     // but shift them towards the end of the buffer. This is ok, since all of
     // the buffer will be expand data anyway, so as long as the beginning is
     // left untouched, we're fine.
-    int16_t length_diff = *old_length - 210 * kMaxSampleRate / 8000;
+    size_t length_diff = *old_length - 210 * kMaxSampleRate / 8000;
     sync_buffer_->InsertZerosAtIndex(length_diff, sync_buffer_->next_index());
     *old_length = 210 * kMaxSampleRate / 8000;
     // This is the truncated length.
@@ -181,34 +181,34 @@
 
   AudioMultiVector expanded_temp(num_channels_);
   expand_->Process(&expanded_temp);
-  *expand_period = static_cast<int>(expanded_temp.Size());  // Samples per
-                                                            // channel.
+  *expand_period = expanded_temp.Size();  // Samples per channel.
 
   expanded_.Clear();
   // Copy what is left since earlier into the expanded vector.
   expanded_.PushBackFromIndex(*sync_buffer_, sync_buffer_->next_index());
-  assert(expanded_.Size() == static_cast<size_t>(*old_length));
+  assert(expanded_.Size() == *old_length);
   assert(expanded_temp.Size() > 0);
   // Do "ugly" copy and paste from the expanded in order to generate more data
   // to correlate (but not interpolate) with.
-  const int required_length = (120 + 80 + 2) * fs_mult_;
-  if (expanded_.Size() < static_cast<size_t>(required_length)) {
-    while (expanded_.Size() < static_cast<size_t>(required_length)) {
+  const size_t required_length = static_cast<size_t>((120 + 80 + 2) * fs_mult_);
+  if (expanded_.Size() < required_length) {
+    while (expanded_.Size() < required_length) {
       // Append one more pitch period each time.
       expanded_.PushBack(expanded_temp);
     }
     // Trim the length to exactly |required_length|.
     expanded_.PopBack(expanded_.Size() - required_length);
   }
-  assert(expanded_.Size() >= static_cast<size_t>(required_length));
+  assert(expanded_.Size() >= required_length);
   return required_length;
 }
 
-int16_t Merge::SignalScaling(const int16_t* input, int input_length,
+int16_t Merge::SignalScaling(const int16_t* input, size_t input_length,
                              const int16_t* expanded_signal,
                              int16_t* expanded_max, int16_t* input_max) const {
   // Adjust muting factor if new vector is more or less of the BGN energy.
-  const int mod_input_length = std::min(64 * fs_mult_, input_length);
+  const size_t mod_input_length =
+      std::min(static_cast<size_t>(64 * fs_mult_), input_length);
   *expanded_max = WebRtcSpl_MaxAbsValueW16(expanded_signal, mod_input_length);
   *input_max = WebRtcSpl_MaxAbsValueW16(input, mod_input_length);
 
@@ -260,13 +260,13 @@
 
 // TODO(hlundin): There are some parameter values in this method that seem
 // strange. Compare with Expand::Correlation.
-void Merge::Downsample(const int16_t* input, int input_length,
-                       const int16_t* expanded_signal, int expanded_length) {
+void Merge::Downsample(const int16_t* input, size_t input_length,
+                       const int16_t* expanded_signal, size_t expanded_length) {
   const int16_t* filter_coefficients;
-  int num_coefficients;
+  size_t num_coefficients;
   int decimation_factor = fs_hz_ / 4000;
-  static const int kCompensateDelay = 0;
-  int length_limit = fs_hz_ / 100;  // 10 ms in samples.
+  static const size_t kCompensateDelay = 0;
+  size_t length_limit = static_cast<size_t>(fs_hz_ / 100);  // 10 ms in samples.
   if (fs_hz_ == 8000) {
     filter_coefficients = DspHelper::kDownsample8kHzTbl;
     num_coefficients = 3;
@@ -280,7 +280,7 @@
     filter_coefficients = DspHelper::kDownsample48kHzTbl;
     num_coefficients = 7;
   }
-  int signal_offset = num_coefficients - 1;
+  size_t signal_offset = num_coefficients - 1;
   WebRtcSpl_DownsampleFast(&expanded_signal[signal_offset],
                            expanded_length - signal_offset,
                            expanded_downsampled_, kExpandDownsampLength,
@@ -288,10 +288,10 @@
                            decimation_factor, kCompensateDelay);
   if (input_length <= length_limit) {
     // Not quite long enough, so we have to cheat a bit.
-    int16_t temp_len = input_length - signal_offset;
+    size_t temp_len = input_length - signal_offset;
     // TODO(hlundin): Should |downsamp_temp_len| be corrected for round-off
     // errors? I.e., (temp_len + decimation_factor - 1) / decimation_factor?
-    int16_t downsamp_temp_len = temp_len / decimation_factor;
+    size_t downsamp_temp_len = temp_len / decimation_factor;
     WebRtcSpl_DownsampleFast(&input[signal_offset], temp_len,
                              input_downsampled_, downsamp_temp_len,
                              filter_coefficients, num_coefficients,
@@ -307,12 +307,12 @@
   }
 }
 
-int16_t Merge::CorrelateAndPeakSearch(int16_t expanded_max, int16_t input_max,
-                                      int start_position, int input_length,
-                                      int expand_period) const {
+size_t Merge::CorrelateAndPeakSearch(int16_t expanded_max, int16_t input_max,
+                                     size_t start_position, size_t input_length,
+                                     size_t expand_period) const {
   // Calculate correlation without any normalization.
-  const int max_corr_length = kMaxCorrelationLength;
-  int stop_position_downsamp =
+  const size_t max_corr_length = kMaxCorrelationLength;
+  size_t stop_position_downsamp =
       std::min(max_corr_length, expand_->max_lag() / (fs_mult_ * 2) + 1);
   int correlation_shift = 0;
   if (expanded_max * input_max > 26843546) {
@@ -325,8 +325,8 @@
                              stop_position_downsamp, correlation_shift, 1);
 
   // Normalize correlation to 14 bits and copy to a 16-bit array.
-  const int pad_length = static_cast<int>(expand_->overlap_length() - 1);
-  const int correlation_buffer_size = 2 * pad_length + kMaxCorrelationLength;
+  const size_t pad_length = expand_->overlap_length() - 1;
+  const size_t correlation_buffer_size = 2 * pad_length + kMaxCorrelationLength;
   rtc::scoped_ptr<int16_t[]> correlation16(
       new int16_t[correlation_buffer_size]);
   memset(correlation16.get(), 0, correlation_buffer_size * sizeof(int16_t));
@@ -342,21 +342,20 @@
   // (1) w16_bestIndex + input_length <
   //     timestamps_per_call_ + expand_->overlap_length();
   // (2) w16_bestIndex + input_length < start_position.
-  int start_index = timestamps_per_call_ +
-      static_cast<int>(expand_->overlap_length());
+  size_t start_index = timestamps_per_call_ + expand_->overlap_length();
   start_index = std::max(start_position, start_index);
   start_index = (input_length > start_index) ? 0 : (start_index - input_length);
   // Downscale starting index to 4kHz domain. (fs_mult_ * 2 = fs_hz_ / 4000.)
-  int start_index_downsamp = start_index / (fs_mult_ * 2);
+  size_t start_index_downsamp = start_index / (fs_mult_ * 2);
 
   // Calculate a modified |stop_position_downsamp| to account for the increased
   // start index |start_index_downsamp| and the effective array length.
-  int modified_stop_pos =
+  size_t modified_stop_pos =
       std::min(stop_position_downsamp,
                kMaxCorrelationLength + pad_length - start_index_downsamp);
-  int best_correlation_index;
+  size_t best_correlation_index;
   int16_t best_correlation;
-  static const int kNumCorrelationCandidates = 1;
+  static const size_t kNumCorrelationCandidates = 1;
   DspHelper::PeakDetection(&correlation_ptr[start_index_downsamp],
                            modified_stop_pos, kNumCorrelationCandidates,
                            fs_mult_, &best_correlation_index,
@@ -368,16 +367,16 @@
   // least 10ms + overlap . (This should never happen thanks to the above
   // modification of peak-finding starting point.)
   while (((best_correlation_index + input_length) <
-      static_cast<int>(timestamps_per_call_ + expand_->overlap_length())) ||
-      ((best_correlation_index + input_length) < start_position)) {
+          (timestamps_per_call_ + expand_->overlap_length())) ||
+         ((best_correlation_index + input_length) < start_position)) {
     assert(false);  // Should never happen.
     best_correlation_index += expand_period;  // Jump one lag ahead.
   }
   return best_correlation_index;
 }
 
-int Merge::RequiredFutureSamples() {
-  return static_cast<int>(fs_hz_ / 100 * num_channels_);  // 10 ms.
+size_t Merge::RequiredFutureSamples() {
+  return fs_hz_ / 100 * num_channels_;  // 10 ms.
 }
 
 
diff --git a/webrtc/modules/audio_coding/neteq/merge.h b/webrtc/modules/audio_coding/neteq/merge.h
index 1b60aec..727e9a6 100644
--- a/webrtc/modules/audio_coding/neteq/merge.h
+++ b/webrtc/modules/audio_coding/neteq/merge.h
@@ -46,11 +46,11 @@
   // de-interleaving |input|. The values in |external_mute_factor_array| (Q14)
   // will be used to scale the audio, and is updated in the process. The array
   // must have |num_channels_| elements.
-  virtual int Process(int16_t* input, size_t input_length,
-                      int16_t* external_mute_factor_array,
-                      AudioMultiVector* output);
+  virtual size_t Process(int16_t* input, size_t input_length,
+                         int16_t* external_mute_factor_array,
+                         AudioMultiVector* output);
 
-  virtual int RequiredFutureSamples();
+  virtual size_t RequiredFutureSamples();
 
  protected:
   const int fs_hz_;
@@ -58,38 +58,38 @@
 
  private:
   static const int kMaxSampleRate = 48000;
-  static const int kExpandDownsampLength = 100;
-  static const int kInputDownsampLength = 40;
-  static const int kMaxCorrelationLength = 60;
+  static const size_t kExpandDownsampLength = 100;
+  static const size_t kInputDownsampLength = 40;
+  static const size_t kMaxCorrelationLength = 60;
 
   // Calls |expand_| to get more expansion data to merge with. The data is
   // written to |expanded_signal_|. Returns the length of the expanded data,
   // while |expand_period| will be the number of samples in one expansion period
   // (typically one pitch period). The value of |old_length| will be the number
   // of samples that were taken from the |sync_buffer_|.
-  int GetExpandedSignal(int* old_length, int* expand_period);
+  size_t GetExpandedSignal(size_t* old_length, size_t* expand_period);
 
   // Analyzes |input| and |expanded_signal| to find maximum values. Returns
   // a muting factor (Q14) to be used on the new data.
-  int16_t SignalScaling(const int16_t* input, int input_length,
+  int16_t SignalScaling(const int16_t* input, size_t input_length,
                         const int16_t* expanded_signal,
                         int16_t* expanded_max, int16_t* input_max) const;
 
   // Downsamples |input| (|input_length| samples) and |expanded_signal| to
   // 4 kHz sample rate. The downsampled signals are written to
   // |input_downsampled_| and |expanded_downsampled_|, respectively.
-  void Downsample(const int16_t* input, int input_length,
-                  const int16_t* expanded_signal, int expanded_length);
+  void Downsample(const int16_t* input, size_t input_length,
+                  const int16_t* expanded_signal, size_t expanded_length);
 
   // Calculates cross-correlation between |input_downsampled_| and
   // |expanded_downsampled_|, and finds the correlation maximum. The maximizing
   // lag is returned.
-  int16_t CorrelateAndPeakSearch(int16_t expanded_max, int16_t input_max,
-                                 int start_position, int input_length,
-                                 int expand_period) const;
+  size_t CorrelateAndPeakSearch(int16_t expanded_max, int16_t input_max,
+                                size_t start_position, size_t input_length,
+                                size_t expand_period) const;
 
   const int fs_mult_;  // fs_hz_ / 8000.
-  const int timestamps_per_call_;
+  const size_t timestamps_per_call_;
   Expand* expand_;
   SyncBuffer* sync_buffer_;
   int16_t expanded_downsampled_[kExpandDownsampLength];
diff --git a/webrtc/modules/audio_coding/neteq/mock/mock_audio_decoder.h b/webrtc/modules/audio_coding/neteq/mock/mock_audio_decoder.h
index 93261ab..d26e2a1 100644
--- a/webrtc/modules/audio_coding/neteq/mock/mock_audio_decoder.h
+++ b/webrtc/modules/audio_coding/neteq/mock/mock_audio_decoder.h
@@ -26,7 +26,7 @@
       Decode,
       int(const uint8_t*, size_t, int, size_t, int16_t*, SpeechType*));
   MOCK_CONST_METHOD0(HasDecodePlc, bool());
-  MOCK_METHOD2(DecodePlc, int(int, int16_t*));
+  MOCK_METHOD2(DecodePlc, size_t(size_t, int16_t*));
   MOCK_METHOD0(Init, int());
   MOCK_METHOD5(IncomingPacket, int(const uint8_t*, size_t, uint16_t, uint32_t,
                                    uint32_t));
diff --git a/webrtc/modules/audio_coding/neteq/mock/mock_buffer_level_filter.h b/webrtc/modules/audio_coding/neteq/mock/mock_buffer_level_filter.h
index ebc6acd..82dee2a 100644
--- a/webrtc/modules/audio_coding/neteq/mock/mock_buffer_level_filter.h
+++ b/webrtc/modules/audio_coding/neteq/mock/mock_buffer_level_filter.h
@@ -25,8 +25,8 @@
   MOCK_METHOD0(Reset,
       void());
   MOCK_METHOD3(Update,
-      void(int buffer_size_packets, int time_stretched_samples,
-           int packet_len_samples));
+      void(size_t buffer_size_packets, int time_stretched_samples,
+           size_t packet_len_samples));
   MOCK_METHOD1(SetTargetBufferLevel,
       void(int target_buffer_level));
   MOCK_CONST_METHOD0(filtered_current_level,
diff --git a/webrtc/modules/audio_coding/neteq/mock/mock_delay_manager.h b/webrtc/modules/audio_coding/neteq/mock/mock_delay_manager.h
index 1d2dc8e..6fb8585 100644
--- a/webrtc/modules/audio_coding/neteq/mock/mock_delay_manager.h
+++ b/webrtc/modules/audio_coding/neteq/mock/mock_delay_manager.h
@@ -19,7 +19,8 @@
 
 class MockDelayManager : public DelayManager {
  public:
-  MockDelayManager(int max_packets_in_buffer, DelayPeakDetector* peak_detector)
+  MockDelayManager(size_t max_packets_in_buffer,
+                   DelayPeakDetector* peak_detector)
       : DelayManager(max_packets_in_buffer, peak_detector) {}
   virtual ~MockDelayManager() { Die(); }
   MOCK_METHOD0(Die, void());
diff --git a/webrtc/modules/audio_coding/neteq/mock/mock_dtmf_tone_generator.h b/webrtc/modules/audio_coding/neteq/mock/mock_dtmf_tone_generator.h
index 881e900..a1c370e 100644
--- a/webrtc/modules/audio_coding/neteq/mock/mock_dtmf_tone_generator.h
+++ b/webrtc/modules/audio_coding/neteq/mock/mock_dtmf_tone_generator.h
@@ -26,7 +26,7 @@
   MOCK_METHOD0(Reset,
       void());
   MOCK_METHOD2(Generate,
-      int(int num_samples, AudioMultiVector* output));
+      int(size_t num_samples, AudioMultiVector* output));
   MOCK_CONST_METHOD0(initialized,
       bool());
 };
diff --git a/webrtc/modules/audio_coding/neteq/mock/mock_external_decoder_pcm16b.h b/webrtc/modules/audio_coding/neteq/mock/mock_external_decoder_pcm16b.h
index d8c8856..f239b4a 100644
--- a/webrtc/modules/audio_coding/neteq/mock/mock_external_decoder_pcm16b.h
+++ b/webrtc/modules/audio_coding/neteq/mock/mock_external_decoder_pcm16b.h
@@ -36,10 +36,9 @@
                      int sample_rate_hz,
                      int16_t* decoded,
                      SpeechType* speech_type) override {
-    int16_t ret = WebRtcPcm16b_Decode(
-        encoded, static_cast<int16_t>(encoded_len), decoded);
+    size_t ret = WebRtcPcm16b_Decode(encoded, encoded_len, decoded);
     *speech_type = ConvertSpeechType(1);
-    return ret;
+    return static_cast<int>(ret);
   }
   size_t Channels() const override { return 1; }
 
@@ -79,7 +78,7 @@
   MOCK_CONST_METHOD0(HasDecodePlc,
       bool());
   MOCK_METHOD2(DecodePlc,
-      int(int num_frames, int16_t* decoded));
+      size_t(size_t num_frames, int16_t* decoded));
   MOCK_METHOD0(Init,
       int());
   MOCK_METHOD5(IncomingPacket,
diff --git a/webrtc/modules/audio_coding/neteq/mock/mock_packet_buffer.h b/webrtc/modules/audio_coding/neteq/mock/mock_packet_buffer.h
index 0eb7edc..97e54d8 100644
--- a/webrtc/modules/audio_coding/neteq/mock/mock_packet_buffer.h
+++ b/webrtc/modules/audio_coding/neteq/mock/mock_packet_buffer.h
@@ -41,7 +41,7 @@
   MOCK_CONST_METHOD0(NextRtpHeader,
       const RTPHeader*());
   MOCK_METHOD1(GetNextPacket,
-      Packet*(int* discard_count));
+      Packet*(size_t* discard_count));
   MOCK_METHOD0(DiscardNextPacket,
       int());
   MOCK_METHOD2(DiscardOldPackets,
@@ -49,7 +49,7 @@
   MOCK_METHOD1(DiscardAllOldPackets,
       int(uint32_t timestamp_limit));
   MOCK_CONST_METHOD0(NumPacketsInBuffer,
-      int());
+      size_t());
   MOCK_METHOD1(IncrementWaitingTimes,
       void(int));
   MOCK_CONST_METHOD0(current_memory_bytes,
diff --git a/webrtc/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc b/webrtc/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc
index 6f57a4a..3c945f9 100644
--- a/webrtc/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc
@@ -169,7 +169,7 @@
 class NetEqExternalVsInternalDecoderTest : public NetEqExternalDecoderUnitTest,
                                            public ::testing::Test {
  protected:
-  static const int kMaxBlockSize = 480;  // 10 ms @ 48 kHz.
+  static const size_t kMaxBlockSize = 480;  // 10 ms @ 48 kHz.
 
   NetEqExternalVsInternalDecoderTest()
       : NetEqExternalDecoderUnitTest(kDecoderPCM16Bswb32kHz,
@@ -188,7 +188,7 @@
 
   void GetAndVerifyOutput() override {
     NetEqOutputType output_type;
-    int samples_per_channel;
+    size_t samples_per_channel;
     int num_channels;
     // Get audio from internal decoder instance.
     EXPECT_EQ(NetEq::kOK,
@@ -198,12 +198,13 @@
                                         &num_channels,
                                         &output_type));
     EXPECT_EQ(1, num_channels);
-    EXPECT_EQ(kOutputLengthMs * sample_rate_hz_ / 1000, samples_per_channel);
+    EXPECT_EQ(static_cast<size_t>(kOutputLengthMs * sample_rate_hz_ / 1000),
+              samples_per_channel);
 
     // Get audio from external decoder instance.
     samples_per_channel = GetOutputAudio(kMaxBlockSize, output_, &output_type);
 
-    for (int i = 0; i < samples_per_channel; ++i) {
+    for (size_t i = 0; i < samples_per_channel; ++i) {
       ASSERT_EQ(output_[i], output_internal_[i]) <<
           "Diff in sample " << i << ".";
     }
@@ -240,7 +241,7 @@
 class LargeTimestampJumpTest : public NetEqExternalDecoderUnitTest,
                                public ::testing::Test {
  protected:
-  static const int kMaxBlockSize = 480;  // 10 ms @ 48 kHz.
+  static const size_t kMaxBlockSize = 480;  // 10 ms @ 48 kHz.
 
   enum TestStates {
     kInitialPhase,
@@ -293,7 +294,7 @@
   }
 
   void GetAndVerifyOutput() override {
-    int num_samples;
+    size_t num_samples;
     NetEqOutputType output_type;
     num_samples = GetOutputAudio(kMaxBlockSize, output_, &output_type);
     UpdateState(output_type);
@@ -303,7 +304,7 @@
       return;
     }
 
-    for (int i = 0; i < num_samples; ++i) {
+    for (size_t i = 0; i < num_samples; ++i) {
       if (output_[i] != 0)
         return;
     }
diff --git a/webrtc/modules/audio_coding/neteq/neteq_impl.cc b/webrtc/modules/audio_coding/neteq/neteq_impl.cc
index 636ae87..d890acb 100644
--- a/webrtc/modules/audio_coding/neteq/neteq_impl.cc
+++ b/webrtc/modules/audio_coding/neteq/neteq_impl.cc
@@ -16,6 +16,7 @@
 #include <algorithm>
 
 #include "webrtc/base/logging.h"
+#include "webrtc/base/safe_conversions.h"
 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
 #include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
 #include "webrtc/modules/audio_coding/neteq/accelerate.h"
@@ -104,7 +105,7 @@
   }
   fs_hz_ = fs;
   fs_mult_ = fs / 8000;
-  output_size_samples_ = kOutputSizeMs * 8 * fs_mult_;
+  output_size_samples_ = static_cast<size_t>(kOutputSizeMs * 8 * fs_mult_);
   decoder_frame_length_ = 3 * output_size_samples_;
   WebRtcSpl_Init();
   if (create_components) {
@@ -154,7 +155,7 @@
 }
 
 int NetEqImpl::GetAudio(size_t max_length, int16_t* output_audio,
-                        int* samples_per_channel, int* num_channels,
+                        size_t* samples_per_channel, int* num_channels,
                         NetEqOutputType* type) {
   CriticalSectionScoped lock(crit_sect_.get());
   LOG(LS_VERBOSE) << "GetAudio";
@@ -305,10 +306,10 @@
 int NetEqImpl::NetworkStatistics(NetEqNetworkStatistics* stats) {
   CriticalSectionScoped lock(crit_sect_.get());
   assert(decoder_database_.get());
-  const int total_samples_in_buffers =
+  const size_t total_samples_in_buffers =
       packet_buffer_->NumSamplesInBuffer(decoder_database_.get(),
                                          decoder_frame_length_) +
-      static_cast<int>(sync_buffer_->FutureLength());
+      sync_buffer_->FutureLength();
   assert(delay_manager_.get());
   assert(decision_logic_.get());
   stats_.GetNetworkStatistics(fs_hz_, total_samples_in_buffers,
@@ -603,7 +604,7 @@
   }
 
   // Insert packets in buffer.
-  int temp_bufsize = packet_buffer_->NumPacketsInBuffer();
+  size_t temp_bufsize = packet_buffer_->NumPacketsInBuffer();
   ret = packet_buffer_->InsertPacketList(
       &packet_list,
       *decoder_database_,
@@ -665,7 +666,8 @@
     if ((temp_bufsize > 0) &&
         (temp_bufsize != decision_logic_->packet_length_samples())) {
       decision_logic_->set_packet_length_samples(temp_bufsize);
-      delay_manager_->SetPacketAudioLength((1000 * temp_bufsize) / fs_hz_);
+      delay_manager_->SetPacketAudioLength(
+          static_cast<int>((1000 * temp_bufsize) / fs_hz_));
     }
 
     // Update statistics.
@@ -688,7 +690,7 @@
 
 int NetEqImpl::GetAudioInternal(size_t max_length,
                                 int16_t* output,
-                                int* samples_per_channel,
+                                size_t* samples_per_channel,
                                 int* num_channels) {
   PacketList packet_list;
   DtmfEvent dtmf_event;
@@ -712,7 +714,7 @@
   assert(vad_.get());
   bool sid_frame_available =
       (operation == kRfc3389Cng && !packet_list.empty());
-  vad_->Update(decoded_buffer_.get(), length, speech_type,
+  vad_->Update(decoded_buffer_.get(), static_cast<size_t>(length), speech_type,
                sid_frame_available, fs_hz_);
 
   algorithm_buffer_->Clear();
@@ -811,12 +813,11 @@
     LOG(LS_WARNING) << "Output array is too short. " << max_length << " < " <<
         output_size_samples_ << " * " << sync_buffer_->Channels();
     num_output_samples = max_length;
-    num_output_samples_per_channel = static_cast<int>(
-        max_length / sync_buffer_->Channels());
+    num_output_samples_per_channel = max_length / sync_buffer_->Channels();
   }
-  const int samples_from_sync =
-      static_cast<int>(sync_buffer_->GetNextAudioInterleaved(
-          num_output_samples_per_channel, output));
+  const size_t samples_from_sync =
+      sync_buffer_->GetNextAudioInterleaved(num_output_samples_per_channel,
+                                            output);
   *num_channels = static_cast<int>(sync_buffer_->Channels());
   LOG(LS_VERBOSE) << "Sync buffer (" << *num_channels << " channel(s)):" <<
       " insert " << algorithm_buffer_->Size() << " samples, extract " <<
@@ -922,7 +923,8 @@
       last_mode_ == kModePreemptiveExpandSuccess ||
       last_mode_ == kModePreemptiveExpandLowEnergy) {
     // Subtract (samples_left + output_size_samples_) from sampleMemory.
-    decision_logic_->AddSampleMemory(-(samples_left + output_size_samples_));
+    decision_logic_->AddSampleMemory(
+        -(samples_left + rtc::checked_cast<int>(output_size_samples_)));
   }
 
   // Check if it is time to play a DTMF event.
@@ -947,8 +949,10 @@
   // Check if we already have enough samples in the |sync_buffer_|. If so,
   // change decision to normal, unless the decision was merge, accelerate, or
   // preemptive expand.
-  if (samples_left >= output_size_samples_ && *operation != kMerge &&
-      *operation != kAccelerate && *operation != kFastAccelerate &&
+  if (samples_left >= rtc::checked_cast<int>(output_size_samples_) &&
+      *operation != kMerge &&
+      *operation != kAccelerate &&
+      *operation != kFastAccelerate &&
       *operation != kPreemptiveExpand) {
     *operation = kNormal;
     return 0;
@@ -996,10 +1000,10 @@
     stats_.ResetMcu();
   }
 
-  int required_samples = output_size_samples_;
-  const int samples_10_ms = 80 * fs_mult_;
-  const int samples_20_ms = 2 * samples_10_ms;
-  const int samples_30_ms = 3 * samples_10_ms;
+  size_t required_samples = output_size_samples_;
+  const size_t samples_10_ms = static_cast<size_t>(80 * fs_mult_);
+  const size_t samples_20_ms = 2 * samples_10_ms;
+  const size_t samples_30_ms = 3 * samples_10_ms;
 
   switch (*operation) {
     case kExpand: {
@@ -1028,17 +1032,17 @@
     case kAccelerate:
     case kFastAccelerate: {
       // In order to do an accelerate we need at least 30 ms of audio data.
-      if (samples_left >= samples_30_ms) {
+      if (samples_left >= static_cast<int>(samples_30_ms)) {
         // Already have enough data, so we do not need to extract any more.
         decision_logic_->set_sample_memory(samples_left);
         decision_logic_->set_prev_time_scale(true);
         return 0;
-      } else if (samples_left >= samples_10_ms &&
+      } else if (samples_left >= static_cast<int>(samples_10_ms) &&
           decoder_frame_length_ >= samples_30_ms) {
         // Avoid decoding more data as it might overflow the playout buffer.
         *operation = kNormal;
         return 0;
-      } else if (samples_left < samples_20_ms &&
+      } else if (samples_left < static_cast<int>(samples_20_ms) &&
           decoder_frame_length_ < samples_30_ms) {
         // Build up decoded data by decoding at least 20 ms of audio data. Do
         // not perform accelerate yet, but wait until we only need to do one
@@ -1056,8 +1060,8 @@
     case kPreemptiveExpand: {
       // In order to do a preemptive expand we need at least 30 ms of decoded
       // audio data.
-      if ((samples_left >= samples_30_ms) ||
-          (samples_left >= samples_10_ms &&
+      if ((samples_left >= static_cast<int>(samples_30_ms)) ||
+          (samples_left >= static_cast<int>(samples_10_ms) &&
               decoder_frame_length_ >= samples_30_ms)) {
         // Already have enough data, so we do not need to extract any more.
         // Or, avoid decoding more data as it might overflow the playout buffer.
@@ -1066,7 +1070,7 @@
         decision_logic_->set_prev_time_scale(true);
         return 0;
       }
-      if (samples_left < samples_20_ms &&
+      if (samples_left < static_cast<int>(samples_20_ms) &&
           decoder_frame_length_ < samples_30_ms) {
         // Build up decoded data by decoding at least 20 ms of audio data.
         // Still try to perform preemptive expand.
@@ -1123,7 +1127,7 @@
 
   if (*operation == kAccelerate || *operation == kFastAccelerate) {
     // Check that we have enough data (30ms) to do accelerate.
-    if (extracted_samples + samples_left < samples_30_ms) {
+    if (extracted_samples + samples_left < static_cast<int>(samples_30_ms)) {
       // TODO(hlundin): Write test for this.
       // Not enough, do normal operation instead.
       *operation = kNormal;
@@ -1274,7 +1278,7 @@
       memset(&decoded_buffer_[*decoded_length], 0,
              decoder_frame_length_ * decoder->Channels() *
                  sizeof(decoded_buffer_[0]));
-      decode_length = decoder_frame_length_;
+      decode_length = rtc::checked_cast<int>(decoder_frame_length_);
     } else if (!packet->primary) {
       // This is a redundant payload; call the special decoder method.
       LOG(LS_VERBOSE) << "Decoding packet (redundant):" <<
@@ -1307,7 +1311,7 @@
       *decoded_length += decode_length;
       // Update |decoder_frame_length_| with number of samples per channel.
       decoder_frame_length_ =
-          decode_length / static_cast<int>(decoder->Channels());
+          static_cast<size_t>(decode_length) / decoder->Channels();
       LOG(LS_VERBOSE) << "Decoded " << decode_length << " samples ("
                       << decoder->Channels() << " channel(s) -> "
                       << decoder_frame_length_ << " samples per channel)";
@@ -1366,11 +1370,11 @@
                         AudioDecoder::SpeechType speech_type, bool play_dtmf) {
   assert(mute_factor_array_.get());
   assert(merge_.get());
-  int new_length = merge_->Process(decoded_buffer, decoded_length,
-                                   mute_factor_array_.get(),
-                                   algorithm_buffer_.get());
-  int expand_length_correction = new_length -
-      static_cast<int>(decoded_length / algorithm_buffer_->Channels());
+  size_t new_length = merge_->Process(decoded_buffer, decoded_length,
+                                      mute_factor_array_.get(),
+                                      algorithm_buffer_.get());
+  size_t expand_length_correction = new_length -
+      decoded_length / algorithm_buffer_->Channels();
 
   // Update in-call and post-call statistics.
   if (expand_->MuteFactor(0) == 0) {
@@ -1394,10 +1398,10 @@
 
 int NetEqImpl::DoExpand(bool play_dtmf) {
   while ((sync_buffer_->FutureLength() - expand_->overlap_length()) <
-      static_cast<size_t>(output_size_samples_)) {
+      output_size_samples_) {
     algorithm_buffer_->Clear();
     int return_value = expand_->Process(algorithm_buffer_.get());
-    int length = static_cast<int>(algorithm_buffer_->Size());
+    size_t length = algorithm_buffer_->Size();
 
     // Update in-call and post-call statistics.
     if (expand_->MuteFactor(0) == 0) {
@@ -1428,7 +1432,8 @@
                             AudioDecoder::SpeechType speech_type,
                             bool play_dtmf,
                             bool fast_accelerate) {
-  const size_t required_samples = 240 * fs_mult_;  // Must have 30 ms.
+  const size_t required_samples =
+      static_cast<size_t>(240 * fs_mult_);  // Must have 30 ms.
   size_t borrowed_samples_per_channel = 0;
   size_t num_channels = algorithm_buffer_->Channels();
   size_t decoded_length_per_channel = decoded_length / num_channels;
@@ -1444,7 +1449,7 @@
     decoded_length = required_samples * num_channels;
   }
 
-  int16_t samples_removed;
+  size_t samples_removed;
   Accelerate::ReturnCodes return_code =
       accelerate_->Process(decoded_buffer, decoded_length, fast_accelerate,
                            algorithm_buffer_.get(), &samples_removed);
@@ -1501,20 +1506,20 @@
                                   size_t decoded_length,
                                   AudioDecoder::SpeechType speech_type,
                                   bool play_dtmf) {
-  const size_t required_samples = 240 * fs_mult_;  // Must have 30 ms.
+  const size_t required_samples =
+      static_cast<size_t>(240 * fs_mult_);  // Must have 30 ms.
   size_t num_channels = algorithm_buffer_->Channels();
-  int borrowed_samples_per_channel = 0;
-  int old_borrowed_samples_per_channel = 0;
+  size_t borrowed_samples_per_channel = 0;
+  size_t old_borrowed_samples_per_channel = 0;
   size_t decoded_length_per_channel = decoded_length / num_channels;
   if (decoded_length_per_channel < required_samples) {
     // Must move data from the |sync_buffer_| in order to get 30 ms.
-    borrowed_samples_per_channel = static_cast<int>(required_samples -
-        decoded_length_per_channel);
+    borrowed_samples_per_channel =
+        required_samples - decoded_length_per_channel;
     // Calculate how many of these were already played out.
-    const int future_length = static_cast<int>(sync_buffer_->FutureLength());
     old_borrowed_samples_per_channel =
-        (borrowed_samples_per_channel > future_length) ?
-        (borrowed_samples_per_channel - future_length) : 0;
+        (borrowed_samples_per_channel > sync_buffer_->FutureLength()) ?
+        (borrowed_samples_per_channel - sync_buffer_->FutureLength()) : 0;
     memmove(&decoded_buffer[borrowed_samples_per_channel * num_channels],
             decoded_buffer,
             sizeof(int16_t) * decoded_length);
@@ -1523,9 +1528,9 @@
     decoded_length = required_samples * num_channels;
   }
 
-  int16_t samples_added;
+  size_t samples_added;
   PreemptiveExpand::ReturnCodes return_code = preemptive_expand_->Process(
-      decoded_buffer, static_cast<int>(decoded_length),
+      decoded_buffer, decoded_length,
       old_borrowed_samples_per_channel,
       algorithm_buffer_.get(), &samples_added);
   stats_.PreemptiveExpandedSamples(samples_added);
@@ -1719,17 +1724,14 @@
 
 void NetEqImpl::DoAlternativePlc(bool increase_timestamp) {
   AudioDecoder* decoder = decoder_database_->GetActiveDecoder();
-  int length;
+  size_t length;
   if (decoder && decoder->HasDecodePlc()) {
     // Use the decoder's packet-loss concealment.
     // TODO(hlundin): Will probably need a longer buffer for multi-channel.
     int16_t decoded_buffer[kMaxFrameSize];
     length = decoder->DecodePlc(1, decoded_buffer);
-    if (length > 0) {
+    if (length > 0)
       algorithm_buffer_->PushBackInterleaved(decoded_buffer, length);
-    } else {
-      length = 0;
-    }
   } else {
     // Do simple zero-stuffing.
     length = output_size_samples_;
@@ -1746,14 +1748,14 @@
 int NetEqImpl::DtmfOverdub(const DtmfEvent& dtmf_event, size_t num_channels,
                            int16_t* output) const {
   size_t out_index = 0;
-  int overdub_length = output_size_samples_;  // Default value.
+  size_t overdub_length = output_size_samples_;  // Default value.
 
   if (sync_buffer_->dtmf_index() > sync_buffer_->next_index()) {
     // Special operation for transition from "DTMF only" to "DTMF overdub".
     out_index = std::min(
         sync_buffer_->dtmf_index() - sync_buffer_->next_index(),
-        static_cast<size_t>(output_size_samples_));
-    overdub_length = output_size_samples_ - static_cast<int>(out_index);
+        output_size_samples_);
+    overdub_length = output_size_samples_ - out_index;
   }
 
   AudioMultiVector dtmf_output(num_channels);
@@ -1765,13 +1767,14 @@
   if (dtmf_return_value == 0) {
     dtmf_return_value = dtmf_tone_generator_->Generate(overdub_length,
                                                        &dtmf_output);
-    assert((size_t) overdub_length == dtmf_output.Size());
+    assert(overdub_length == dtmf_output.Size());
   }
   dtmf_output.ReadInterleaved(overdub_length, &output[out_index]);
   return dtmf_return_value < 0 ? dtmf_return_value : 0;
 }
 
-int NetEqImpl::ExtractPackets(int required_samples, PacketList* packet_list) {
+int NetEqImpl::ExtractPackets(size_t required_samples,
+                              PacketList* packet_list) {
   bool first_packet = true;
   uint8_t prev_payload_type = 0;
   uint32_t prev_timestamp = 0;
@@ -1790,7 +1793,7 @@
   // Packet extraction loop.
   do {
     timestamp_ = header->timestamp;
-    int discard_count = 0;
+    size_t discard_count = 0;
     Packet* packet = packet_buffer_->GetNextPacket(&discard_count);
     // |header| may be invalid after the |packet_buffer_| operation.
     header = NULL;
@@ -1819,7 +1822,7 @@
         packet->header.payloadType);
     if (decoder) {
       if (packet->sync_packet) {
-        packet_duration = decoder_frame_length_;
+        packet_duration = rtc::checked_cast<int>(decoder_frame_length_);
       } else {
         if (packet->primary) {
           packet_duration = decoder->PacketDuration(packet->payload,
@@ -1838,7 +1841,7 @@
     if (packet_duration <= 0) {
       // Decoder did not return a packet duration. Assume that the packet
       // contains the same number of samples as the previous one.
-      packet_duration = decoder_frame_length_;
+      packet_duration = rtc::checked_cast<int>(decoder_frame_length_);
     }
     extracted_samples = packet->header.timestamp - first_timestamp +
         packet_duration;
@@ -1848,7 +1851,7 @@
     next_packet_available = false;
     if (header && prev_payload_type == header->payloadType) {
       int16_t seq_no_diff = header->sequenceNumber - prev_sequence_number;
-      int32_t ts_diff = header->timestamp - prev_timestamp;
+      size_t ts_diff = header->timestamp - prev_timestamp;
       if (seq_no_diff == 1 ||
           (seq_no_diff == 0 && ts_diff == decoder_frame_length_)) {
         // The next sequence number is available, or the next part of a packet
@@ -1857,7 +1860,8 @@
       }
       prev_sequence_number = header->sequenceNumber;
     }
-  } while (extracted_samples < required_samples && next_packet_available);
+  } while (extracted_samples < rtc::checked_cast<int>(required_samples) &&
+           next_packet_available);
 
   if (extracted_samples > 0) {
     // Delete old packets only when we are going to decode something. Otherwise,
@@ -1886,7 +1890,7 @@
 
   fs_hz_ = fs_hz;
   fs_mult_ = fs_hz / 8000;
-  output_size_samples_ = kOutputSizeMs * 8 * fs_mult_;
+  output_size_samples_ = static_cast<size_t>(kOutputSizeMs * 8 * fs_mult_);
   decoder_frame_length_ = 3 * output_size_samples_;  // Initialize to 30ms.
 
   last_mode_ = kModeNormal;
@@ -1931,9 +1935,7 @@
   accelerate_.reset(
       accelerate_factory_->Create(fs_hz, channels, *background_noise_));
   preemptive_expand_.reset(preemptive_expand_factory_->Create(
-      fs_hz, channels,
-      *background_noise_,
-      static_cast<int>(expand_->overlap_length())));
+      fs_hz, channels, *background_noise_, expand_->overlap_length()));
 
   // Delete ComfortNoise object and create a new one.
   comfort_noise_.reset(new ComfortNoise(fs_hz, decoder_database_.get(),
diff --git a/webrtc/modules/audio_coding/neteq/neteq_impl.h b/webrtc/modules/audio_coding/neteq/neteq_impl.h
index 55ba067..502204a 100644
--- a/webrtc/modules/audio_coding/neteq/neteq_impl.h
+++ b/webrtc/modules/audio_coding/neteq/neteq_impl.h
@@ -106,7 +106,7 @@
   // Returns kOK on success, or kFail in case of an error.
   int GetAudio(size_t max_length,
                int16_t* output_audio,
-               int* samples_per_channel,
+               size_t* samples_per_channel,
                int* num_channels,
                NetEqOutputType* type) override;
 
@@ -203,9 +203,9 @@
 
  protected:
   static const int kOutputSizeMs = 10;
-  static const int kMaxFrameSize = 2880;  // 60 ms @ 48 kHz.
+  static const size_t kMaxFrameSize = 2880;  // 60 ms @ 48 kHz.
   // TODO(hlundin): Provide a better value for kSyncBufferSize.
-  static const int kSyncBufferSize = 2 * kMaxFrameSize;
+  static const size_t kSyncBufferSize = 2 * kMaxFrameSize;
 
   // Inserts a new packet into NetEq. This is used by the InsertPacket method
   // above. Returns 0 on success, otherwise an error code.
@@ -225,7 +225,7 @@
   // Returns 0 on success, otherwise an error code.
   int GetAudioInternal(size_t max_length,
                        int16_t* output,
-                       int* samples_per_channel,
+                       size_t* samples_per_channel,
                        int* num_channels) EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
 
   // Provides a decision to the GetAudioInternal method. The decision what to
@@ -318,7 +318,7 @@
   // |required_samples| samples. The packets are inserted into |packet_list|.
   // Returns the number of samples that the packets in the list will produce, or
   // -1 in case of an error.
-  int ExtractPackets(int required_samples, PacketList* packet_list)
+  int ExtractPackets(size_t required_samples, PacketList* packet_list)
       EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
 
   // Resets various variables and objects to new values based on the sample rate
@@ -375,8 +375,8 @@
   StatisticsCalculator stats_ GUARDED_BY(crit_sect_);
   int fs_hz_ GUARDED_BY(crit_sect_);
   int fs_mult_ GUARDED_BY(crit_sect_);
-  int output_size_samples_ GUARDED_BY(crit_sect_);
-  int decoder_frame_length_ GUARDED_BY(crit_sect_);
+  size_t output_size_samples_ GUARDED_BY(crit_sect_);
+  size_t decoder_frame_length_ GUARDED_BY(crit_sect_);
   Modes last_mode_ GUARDED_BY(crit_sect_);
   rtc::scoped_ptr<int16_t[]> mute_factor_array_ GUARDED_BY(crit_sect_);
   size_t decoded_buffer_length_ GUARDED_BY(crit_sect_);
diff --git a/webrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc b/webrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc
index 05a8de2..006a5ad 100644
--- a/webrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc
@@ -384,7 +384,7 @@
             neteq_->RegisterPayloadType(kDecoderPCM16B, kPayloadType));
 
   // Insert packets. The buffer should not flush.
-  for (int i = 1; i <= config_.max_packets_in_buffer; ++i) {
+  for (size_t i = 1; i <= config_.max_packets_in_buffer; ++i) {
     EXPECT_EQ(NetEq::kOK,
               neteq_->InsertPacket(
                   rtp_header, payload, kPayloadLengthBytes, kReceiveTime));
@@ -398,7 +398,7 @@
   EXPECT_EQ(NetEq::kOK,
             neteq_->InsertPacket(
                 rtp_header, payload, kPayloadLengthBytes, kReceiveTime));
-  EXPECT_EQ(1, packet_buffer_->NumPacketsInBuffer());
+  EXPECT_EQ(1u, packet_buffer_->NumPacketsInBuffer());
   const RTPHeader* test_header = packet_buffer_->NextRtpHeader();
   EXPECT_EQ(rtp_header.header.timestamp, test_header->timestamp);
   EXPECT_EQ(rtp_header.header.sequenceNumber, test_header->sequenceNumber);
@@ -413,7 +413,8 @@
   const uint8_t kPayloadType = 17;   // Just an arbitrary number.
   const uint32_t kReceiveTime = 17;  // Value doesn't matter for this test.
   const int kSampleRateHz = 8000;
-  const int kPayloadLengthSamples = 10 * kSampleRateHz / 1000;  // 10 ms.
+  const size_t kPayloadLengthSamples =
+      static_cast<size_t>(10 * kSampleRateHz / 1000);  // 10 ms.
   const size_t kPayloadLengthBytes = kPayloadLengthSamples;
   uint8_t payload[kPayloadLengthBytes] = {0};
   WebRtcRTPHeader rtp_header;
@@ -466,9 +467,9 @@
                 rtp_header, payload, kPayloadLengthBytes, kReceiveTime));
 
   // Pull audio once.
-  const int kMaxOutputSize = 10 * kSampleRateHz / 1000;
+  const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
   int16_t output[kMaxOutputSize];
-  int samples_per_channel;
+  size_t samples_per_channel;
   int num_channels;
   NetEqOutputType type;
   EXPECT_EQ(
@@ -480,7 +481,8 @@
   EXPECT_EQ(kOutputNormal, type);
 
   // Start with a simple check that the fake decoder is behaving as expected.
-  EXPECT_EQ(kPayloadLengthSamples, decoder_.next_value() - 1);
+  EXPECT_EQ(kPayloadLengthSamples,
+            static_cast<size_t>(decoder_.next_value() - 1));
 
   // The value of the last of the output samples is the same as the number of
   // samples played from the decoded packet. Thus, this number + the RTP
@@ -500,7 +502,7 @@
   // Check that the number of samples still to play from the sync buffer add
   // up with what was already played out.
   EXPECT_EQ(kPayloadLengthSamples - output[samples_per_channel - 1],
-            static_cast<int>(sync_buffer->FutureLength()));
+            sync_buffer->FutureLength());
 }
 
 TEST_F(NetEqImplTest, ReorderedPacket) {
@@ -510,7 +512,8 @@
   const uint8_t kPayloadType = 17;   // Just an arbitrary number.
   const uint32_t kReceiveTime = 17;  // Value doesn't matter for this test.
   const int kSampleRateHz = 8000;
-  const int kPayloadLengthSamples = 10 * kSampleRateHz / 1000;  // 10 ms.
+  const size_t kPayloadLengthSamples =
+      static_cast<size_t>(10 * kSampleRateHz / 1000);  // 10 ms.
   const size_t kPayloadLengthBytes = kPayloadLengthSamples;
   uint8_t payload[kPayloadLengthBytes] = {0};
   WebRtcRTPHeader rtp_header;
@@ -544,9 +547,9 @@
                 rtp_header, payload, kPayloadLengthBytes, kReceiveTime));
 
   // Pull audio once.
-  const int kMaxOutputSize = 10 * kSampleRateHz / 1000;
+  const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
   int16_t output[kMaxOutputSize];
-  int samples_per_channel;
+  size_t samples_per_channel;
   int num_channels;
   NetEqOutputType type;
   EXPECT_EQ(
@@ -606,7 +609,8 @@
   const uint8_t kPayloadType = 17;   // Just an arbitrary number.
   const uint32_t kReceiveTime = 17;  // Value doesn't matter for this test.
   const int kSampleRateHz = 8000;
-  const int kPayloadLengthSamples = 10 * kSampleRateHz / 1000;  // 10 ms.
+  const size_t kPayloadLengthSamples =
+      static_cast<size_t>(10 * kSampleRateHz / 1000);  // 10 ms.
   const size_t kPayloadLengthBytes = kPayloadLengthSamples;
   uint8_t payload[kPayloadLengthBytes] = {0};
   WebRtcRTPHeader rtp_header;
@@ -623,9 +627,9 @@
   EXPECT_EQ(NetEq::kUnknownRtpPayloadType, neteq_->LastError());
 
   // Pull audio once.
-  const int kMaxOutputSize = 10 * kSampleRateHz / 1000;
+  const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
   int16_t output[kMaxOutputSize];
-  int samples_per_channel;
+  size_t samples_per_channel;
   int num_channels;
   NetEqOutputType type;
   EXPECT_EQ(NetEq::kOK,
@@ -641,7 +645,7 @@
             neteq_->RegisterPayloadType(kDecoderPCM16B, kPayloadType));
 
   // Insert 10 packets.
-  for (int i = 0; i < 10; ++i) {
+  for (size_t i = 0; i < 10; ++i) {
     rtp_header.header.sequenceNumber++;
     rtp_header.header.timestamp += kPayloadLengthSamples;
     EXPECT_EQ(NetEq::kOK,
@@ -651,7 +655,7 @@
   }
 
   // Pull audio repeatedly and make sure we get normal output, that is not PLC.
-  for (int i = 0; i < 3; ++i) {
+  for (size_t i = 0; i < 3; ++i) {
     EXPECT_EQ(NetEq::kOK,
               neteq_->GetAudio(kMaxOutputSize, output, &samples_per_channel,
                                &num_channels, &type));
@@ -672,8 +676,9 @@
   const uint8_t kPayloadType = 17;   // Just an arbitrary number.
   const uint32_t kReceiveTime = 17;  // Value doesn't matter for this test.
   const int kSampleRateKhz = 48;
-  const int kPayloadLengthSamples = 20 * kSampleRateKhz;  // 20 ms.
-  const int kPayloadLengthBytes = 10;
+  const size_t kPayloadLengthSamples =
+      static_cast<size_t>(20 * kSampleRateKhz);  // 20 ms.
+  const size_t kPayloadLengthBytes = 10;
   uint8_t payload[kPayloadLengthBytes] = {0};
   int16_t dummy_output[kPayloadLengthSamples] = {0};
 
@@ -736,9 +741,9 @@
             neteq_->InsertPacket(
                 rtp_header, payload, kPayloadLengthBytes, kReceiveTime));
 
-  const int kMaxOutputSize = 10 * kSampleRateKhz;
+  const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateKhz);
   int16_t output[kMaxOutputSize];
-  int samples_per_channel;
+  size_t samples_per_channel;
   int num_channels;
   uint32_t timestamp;
   uint32_t last_timestamp;
@@ -762,7 +767,7 @@
                              &num_channels, &type));
   EXPECT_TRUE(neteq_->GetPlayoutTimestamp(&last_timestamp));
 
-  for (int i = 1; i < 6; ++i) {
+  for (size_t i = 1; i < 6; ++i) {
     ASSERT_EQ(kMaxOutputSize, samples_per_channel);
     EXPECT_EQ(1, num_channels);
     EXPECT_EQ(expected_type[i - 1], type);
@@ -783,7 +788,7 @@
             neteq_->InsertPacket(
                 rtp_header, payload, kPayloadLengthBytes, kReceiveTime));
 
-  for (int i = 6; i < 8; ++i) {
+  for (size_t i = 6; i < 8; ++i) {
     ASSERT_EQ(kMaxOutputSize, samples_per_channel);
     EXPECT_EQ(1, num_channels);
     EXPECT_EQ(expected_type[i - 1], type);
@@ -811,7 +816,8 @@
   const uint32_t kReceiveTime = 17;  // Value doesn't matter for this test.
   const int kSampleRateHz = 8000;
 
-  const int kPayloadLengthSamples = 10 * kSampleRateHz / 1000;  // 10 ms.
+  const size_t kPayloadLengthSamples =
+      static_cast<size_t>(10 * kSampleRateHz / 1000);  // 10 ms.
   const size_t kPayloadLengthBytes = 1;
   uint8_t payload[kPayloadLengthBytes]= {0};
   int16_t dummy_output[kPayloadLengthSamples * kChannels] = {0};
@@ -852,7 +858,8 @@
                                           dummy_output +
                                           kPayloadLengthSamples * kChannels),
                       SetArgPointee<4>(AudioDecoder::kSpeech),
-                      Return(kPayloadLengthSamples * kChannels)));
+                      Return(static_cast<int>(
+                          kPayloadLengthSamples * kChannels))));
 
   EXPECT_CALL(decoder_, PacketDuration(Pointee(kSecondPayloadValue),
                                        kPayloadLengthBytes))
@@ -879,9 +886,10 @@
             neteq_->InsertPacket(
                 rtp_header, payload, kPayloadLengthBytes, kReceiveTime));
 
-  const int kMaxOutputSize = 10 * kSampleRateHz / 1000 * kChannels;
+  const size_t kMaxOutputSize =
+      static_cast<size_t>(10 * kSampleRateHz / 1000 * kChannels);
   int16_t output[kMaxOutputSize];
-  int samples_per_channel;
+  size_t samples_per_channel;
   int num_channels;
   NetEqOutputType type;
 
diff --git a/webrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc b/webrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc
index ea88f24..5564e20 100644
--- a/webrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc
@@ -43,7 +43,7 @@
 class NetEqStereoTest : public ::testing::TestWithParam<TestParameters> {
  protected:
   static const int kTimeStepMs = 10;
-  static const int kMaxBlockSize = 480;  // 10 ms @ 48 kHz.
+  static const size_t kMaxBlockSize = 480;  // 10 ms @ 48 kHz.
   static const uint8_t kPayloadTypeMono = 95;
   static const uint8_t kPayloadTypeMulti = 96;
 
@@ -52,7 +52,8 @@
         sample_rate_hz_(GetParam().sample_rate),
         samples_per_ms_(sample_rate_hz_ / 1000),
         frame_size_ms_(GetParam().frame_size),
-        frame_size_samples_(frame_size_ms_ * samples_per_ms_),
+        frame_size_samples_(
+            static_cast<size_t>(frame_size_ms_ * samples_per_ms_)),
         output_size_samples_(10 * samples_per_ms_),
         rtp_generator_mono_(samples_per_ms_),
         rtp_generator_(samples_per_ms_),
@@ -212,7 +213,7 @@
       }
       NetEqOutputType output_type;
       // Get audio from mono instance.
-      int samples_per_channel;
+      size_t samples_per_channel;
       int num_channels;
       EXPECT_EQ(NetEq::kOK,
                 neteq_mono_->GetAudio(kMaxBlockSize, output_,
@@ -242,8 +243,8 @@
   const int sample_rate_hz_;
   const int samples_per_ms_;
   const int frame_size_ms_;
-  const int frame_size_samples_;
-  const int output_size_samples_;
+  const size_t frame_size_samples_;
+  const size_t output_size_samples_;
   NetEq* neteq_mono_;
   NetEq* neteq_;
   test::RtpGenerator rtp_generator_mono_;
@@ -256,8 +257,8 @@
   int16_t* output_multi_channel_;
   WebRtcRTPHeader rtp_header_mono_;
   WebRtcRTPHeader rtp_header_;
-  int payload_size_bytes_;
-  int multi_payload_size_bytes_;
+  size_t payload_size_bytes_;
+  size_t multi_payload_size_bytes_;
   int last_send_time_;
   int last_arrival_time_;
   rtc::scoped_ptr<test::InputAudioFile> input_file_;
diff --git a/webrtc/modules/audio_coding/neteq/neteq_unittest.cc b/webrtc/modules/audio_coding/neteq/neteq_unittest.cc
index 7137a68..03fde53 100644
--- a/webrtc/modules/audio_coding/neteq/neteq_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/neteq_unittest.cc
@@ -37,16 +37,16 @@
 
 namespace webrtc {
 
-static bool IsAllZero(const int16_t* buf, int buf_length) {
+static bool IsAllZero(const int16_t* buf, size_t buf_length) {
   bool all_zero = true;
-  for (int n = 0; n < buf_length && all_zero; ++n)
+  for (size_t n = 0; n < buf_length && all_zero; ++n)
     all_zero = buf[n] == 0;
   return all_zero;
 }
 
-static bool IsAllNonZero(const int16_t* buf, int buf_length) {
+static bool IsAllNonZero(const int16_t* buf, size_t buf_length) {
   bool all_non_zero = true;
-  for (int n = 0; n < buf_length && all_non_zero; ++n)
+  for (size_t n = 0; n < buf_length && all_non_zero; ++n)
     all_non_zero = buf[n] != 0;
   return all_non_zero;
 }
@@ -172,7 +172,8 @@
     ASSERT_EQ(stats.preemptive_rate, ref_stats.preemptive_rate);
     ASSERT_EQ(stats.accelerate_rate, ref_stats.accelerate_rate);
     ASSERT_EQ(stats.clockdrift_ppm, ref_stats.clockdrift_ppm);
-    ASSERT_EQ(stats.added_zero_samples, ref_stats.added_zero_samples);
+    ASSERT_EQ(stats.added_zero_samples,
+              static_cast<size_t>(ref_stats.added_zero_samples));
     ASSERT_EQ(stats.secondary_decoded_rate, 0);
     ASSERT_LE(stats.speech_expand_rate, ref_stats.expand_rate);
   }
@@ -220,9 +221,9 @@
   // NetEQ must be polled for data once every 10 ms. Thus, neither of the
   // constants below can be changed.
   static const int kTimeStepMs = 10;
-  static const int kBlockSize8kHz = kTimeStepMs * 8;
-  static const int kBlockSize16kHz = kTimeStepMs * 16;
-  static const int kBlockSize32kHz = kTimeStepMs * 32;
+  static const size_t kBlockSize8kHz = kTimeStepMs * 8;
+  static const size_t kBlockSize16kHz = kTimeStepMs * 16;
+  static const size_t kBlockSize32kHz = kTimeStepMs * 32;
   static const size_t kMaxBlockSize = kBlockSize32kHz;
   static const int kInitSampleRateHz = 8000;
 
@@ -232,7 +233,7 @@
   void SelectDecoders(NetEqDecoder* used_codec);
   void LoadDecoders();
   void OpenInputFile(const std::string &rtp_file);
-  void Process(int* out_len);
+  void Process(size_t* out_len);
   void DecodeAndCompare(const std::string& rtp_file,
                         const std::string& ref_file,
                         const std::string& stat_ref_file,
@@ -272,9 +273,9 @@
 
 // Allocating the static const so that it can be passed by reference.
 const int NetEqDecodingTest::kTimeStepMs;
-const int NetEqDecodingTest::kBlockSize8kHz;
-const int NetEqDecodingTest::kBlockSize16kHz;
-const int NetEqDecodingTest::kBlockSize32kHz;
+const size_t NetEqDecodingTest::kBlockSize8kHz;
+const size_t NetEqDecodingTest::kBlockSize16kHz;
+const size_t NetEqDecodingTest::kBlockSize32kHz;
 const size_t NetEqDecodingTest::kMaxBlockSize;
 const int NetEqDecodingTest::kInitSampleRateHz;
 
@@ -334,7 +335,7 @@
   rtp_source_.reset(test::RtpFileSource::Create(rtp_file));
 }
 
-void NetEqDecodingTest::Process(int* out_len) {
+void NetEqDecodingTest::Process(size_t* out_len) {
   // Check if time to receive.
   while (packet_ && sim_clock_ >= packet_->time_ms()) {
     if (packet_->payload_length_bytes() > 0) {
@@ -358,7 +359,7 @@
   ASSERT_TRUE((*out_len == kBlockSize8kHz) ||
               (*out_len == kBlockSize16kHz) ||
               (*out_len == kBlockSize32kHz));
-  output_sample_rate_ = *out_len / 10 * 1000;
+  output_sample_rate_ = static_cast<int>(*out_len / 10 * 1000);
 
   // Increase time.
   sim_clock_ += kTimeStepMs;
@@ -394,7 +395,7 @@
     std::ostringstream ss;
     ss << "Lap number " << i++ << " in DecodeAndCompare while loop";
     SCOPED_TRACE(ss.str());  // Print out the parameter values on failure.
-    int out_len = 0;
+    size_t out_len = 0;
     ASSERT_NO_FATAL_FAILURE(Process(&out_len));
     ASSERT_NO_FATAL_FAILURE(ref_files.ProcessReference(out_data_, out_len));
 
@@ -498,7 +499,7 @@
   }
   // Pull out all data.
   for (size_t i = 0; i < num_frames; ++i) {
-    int out_len;
+    size_t out_len;
     int num_channels;
     NetEqOutputType type;
     ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
@@ -536,7 +537,7 @@
         rtp_info,
         reinterpret_cast<uint8_t*>(payload),
         kPayloadBytes, 0));
-    int out_len;
+    size_t out_len;
     int num_channels;
     NetEqOutputType type;
     ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
@@ -566,7 +567,7 @@
     }
 
     // Pull out data once.
-    int out_len;
+    size_t out_len;
     int num_channels;
     NetEqOutputType type;
     ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
@@ -597,7 +598,7 @@
     }
 
     // Pull out data once.
-    int out_len;
+    size_t out_len;
     int num_channels;
     NetEqOutputType type;
     ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
@@ -622,7 +623,7 @@
   const size_t kPayloadBytes = kSamples * 2;
   double next_input_time_ms = 0.0;
   double t_ms;
-  int out_len;
+  size_t out_len;
   int num_channels;
   NetEqOutputType type;
 
@@ -854,7 +855,7 @@
     out_data_[i] = 1;
   }
   int num_channels;
-  int samples_per_channel;
+  size_t samples_per_channel;
   EXPECT_EQ(NetEq::kFail,
             neteq_->GetAudio(kMaxBlockSize, out_data_,
                              &samples_per_channel, &num_channels, &type));
@@ -887,7 +888,7 @@
     out_data_[i] = 1;
   }
   int num_channels;
-  int samples_per_channel;
+  size_t samples_per_channel;
   EXPECT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_,
                                 &samples_per_channel,
                                 &num_channels, &type));
@@ -908,7 +909,7 @@
                              bool should_be_faded) = 0;
 
   void CheckBgn(int sampling_rate_hz) {
-    int16_t expected_samples_per_channel = 0;
+    size_t expected_samples_per_channel = 0;
     uint8_t payload_type = 0xFF;  // Invalid.
     if (sampling_rate_hz == 8000) {
       expected_samples_per_channel = kBlockSize8kHz;
@@ -932,7 +933,7 @@
     ASSERT_TRUE(input.Init(
         webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"),
         10 * sampling_rate_hz,  // Max 10 seconds loop length.
-        static_cast<size_t>(expected_samples_per_channel)));
+        expected_samples_per_channel));
 
     // Payload of 10 ms of PCM16 32 kHz.
     uint8_t payload[kBlockSize32kHz * sizeof(int16_t)];
@@ -941,19 +942,18 @@
     rtp_info.header.payloadType = payload_type;
 
     int number_channels = 0;
-    int samples_per_channel = 0;
+    size_t samples_per_channel = 0;
 
     uint32_t receive_timestamp = 0;
     for (int n = 0; n < 10; ++n) {  // Insert few packets and get audio.
-      int16_t enc_len_bytes = WebRtcPcm16b_Encode(
+      size_t enc_len_bytes = WebRtcPcm16b_Encode(
           input.GetNextBlock(), expected_samples_per_channel, payload);
       ASSERT_EQ(enc_len_bytes, expected_samples_per_channel * 2);
 
       number_channels = 0;
       samples_per_channel = 0;
       ASSERT_EQ(0,
-                neteq_->InsertPacket(rtp_info, payload,
-                                     static_cast<size_t>(enc_len_bytes),
+                neteq_->InsertPacket(rtp_info, payload, enc_len_bytes,
                                      receive_timestamp));
       ASSERT_EQ(0,
                 neteq_->GetAudio(kBlockSize32kHz,
@@ -1009,7 +1009,7 @@
       if (type == kOutputPLCtoCNG) {
         plc_to_cng = true;
         double sum_squared = 0;
-        for (int k = 0; k < number_channels * samples_per_channel; ++k)
+        for (size_t k = 0; k < number_channels * samples_per_channel; ++k)
           sum_squared += output[k] * output[k];
         TestCondition(sum_squared, n > kFadingThreshold);
       } else {
@@ -1168,7 +1168,7 @@
   // actual decoded values.
   NetEqOutputType output_type;
   int num_channels;
-  int samples_per_channel;
+  size_t samples_per_channel;
   uint32_t receive_timestamp = 0;
   for (int n = 0; n < 100; ++n) {
     ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes,
@@ -1246,7 +1246,7 @@
   // actual decoded values.
   NetEqOutputType output_type;
   int num_channels;
-  int samples_per_channel;
+  size_t samples_per_channel;
   uint32_t receive_timestamp = 0;
   int algorithmic_frame_delay = algorithmic_delay_ms_ / 10 + 1;
   for (int n = 0; n < algorithmic_frame_delay; ++n) {
@@ -1315,7 +1315,7 @@
   double next_input_time_ms = 0.0;
   int16_t decoded[kBlockSize16kHz];
   int num_channels;
-  int samples_per_channel;
+  size_t samples_per_channel;
   NetEqOutputType output_type;
   uint32_t receive_timestamp = 0;
 
@@ -1418,7 +1418,7 @@
       algorithmic_delay_ms_ * kSampleRateKhz, 5 * kSampleRateKhz / 8);
   // Insert three speech packets. Three are needed to get the frame length
   // correct.
-  int out_len;
+  size_t out_len;
   int num_channels;
   NetEqOutputType type;
   uint8_t payload[kPayloadBytes] = {0};
@@ -1515,7 +1515,7 @@
   timestamp += kCngPeriodSamples;
 
   // Pull audio once and make sure CNG is played.
-  int out_len;
+  size_t out_len;
   int num_channels;
   NetEqOutputType type;
   ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
diff --git a/webrtc/modules/audio_coding/neteq/normal.cc b/webrtc/modules/audio_coding/neteq/normal.cc
index bf455c9..ebecbf9 100644
--- a/webrtc/modules/audio_coding/neteq/normal.cc
+++ b/webrtc/modules/audio_coding/neteq/normal.cc
@@ -45,12 +45,12 @@
   output->PushBackInterleaved(input, length);
   int16_t* signal = &(*output)[0][0];
 
-  const unsigned fs_mult = fs_hz_ / 8000;
+  const int fs_mult = fs_hz_ / 8000;
   assert(fs_mult > 0);
   // fs_shift = log2(fs_mult), rounded down.
   // Note that |fs_shift| is not "exact" for 48 kHz.
   // TODO(hlundin): Investigate this further.
-  const int fs_shift = 30 - WebRtcSpl_NormW32(static_cast<int32_t>(fs_mult));
+  const int fs_shift = 30 - WebRtcSpl_NormW32(fs_mult);
 
   // Check if last RecOut call resulted in an Expand. If so, we have to take
   // care of some cross-fading and unmuting.
@@ -73,11 +73,11 @@
       int16_t* signal = &(*output)[channel_ix][0];
       size_t length_per_channel = length / output->Channels();
       // Find largest absolute value in new data.
-      int16_t decoded_max = WebRtcSpl_MaxAbsValueW16(
-        signal,  static_cast<int>(length_per_channel));
+      int16_t decoded_max =
+          WebRtcSpl_MaxAbsValueW16(signal, length_per_channel);
       // Adjust muting factor if needed (to BGN level).
-      int energy_length = std::min(static_cast<int>(fs_mult * 64),
-                                   static_cast<int>(length_per_channel));
+      size_t energy_length =
+          std::min(static_cast<size_t>(fs_mult * 64), length_per_channel);
       int scaling = 6 + fs_shift
           - WebRtcSpl_NormW32(decoded_max * decoded_max);
       scaling = std::max(scaling, 0);  // |scaling| should always be >= 0.
@@ -111,7 +111,7 @@
       }
 
       // If muted increase by 0.64 for every 20 ms (NB/WB 0.0040/0.0020 in Q14).
-      int increment = static_cast<int>(64 / fs_mult);
+      int increment = 64 / fs_mult;
       for (size_t i = 0; i < length_per_channel; i++) {
         // Scale with mute factor.
         assert(channel_ix < output->Channels());
@@ -131,7 +131,7 @@
       assert(fs_shift < 3);  // Will always be 0, 1, or, 2.
       increment = 4 >> fs_shift;
       int fraction = increment;
-      for (size_t i = 0; i < 8 * fs_mult; i++) {
+      for (size_t i = 0; i < static_cast<size_t>(8 * fs_mult); i++) {
         // TODO(hlundin): Add 16 instead of 8 for correct rounding. Keeping 8
         // now for legacy bit-exactness.
         assert(channel_ix < output->Channels());
@@ -144,7 +144,7 @@
     }
   } else if (last_mode == kModeRfc3389Cng) {
     assert(output->Channels() == 1);  // Not adapted for multi-channel yet.
-    static const int kCngLength = 32;
+    static const size_t kCngLength = 32;
     int16_t cng_output[kCngLength];
     // Reset mute factor and start up fresh.
     external_mute_factor_array[0] = 16384;
@@ -167,7 +167,7 @@
     assert(fs_shift < 3);  // Will always be 0, 1, or, 2.
     int16_t increment = 4 >> fs_shift;
     int16_t fraction = increment;
-    for (size_t i = 0; i < 8 * fs_mult; i++) {
+    for (size_t i = 0; i < static_cast<size_t>(8 * fs_mult); i++) {
       // TODO(hlundin): Add 16 instead of 8 for correct rounding. Keeping 8 now
       // for legacy bit-exactness.
       signal[i] =
@@ -178,7 +178,7 @@
     // Previous was neither of Expand, FadeToBGN or RFC3389_CNG, but we are
     // still ramping up from previous muting.
     // If muted increase by 0.64 for every 20 ms (NB/WB 0.0040/0.0020 in Q14).
-    int increment = static_cast<int>(64 / fs_mult);
+    int increment = 64 / fs_mult;
     size_t length_per_channel = length / output->Channels();
     for (size_t i = 0; i < length_per_channel; i++) {
       for (size_t channel_ix = 0; channel_ix < output->Channels();
diff --git a/webrtc/modules/audio_coding/neteq/packet_buffer.cc b/webrtc/modules/audio_coding/neteq/packet_buffer.cc
index 431e0f1..c89de12 100644
--- a/webrtc/modules/audio_coding/neteq/packet_buffer.cc
+++ b/webrtc/modules/audio_coding/neteq/packet_buffer.cc
@@ -181,7 +181,7 @@
   return const_cast<const RTPHeader*>(&(buffer_.front()->header));
 }
 
-Packet* PacketBuffer::GetNextPacket(int* discard_count) {
+Packet* PacketBuffer::GetNextPacket(size_t* discard_count) {
   if (Empty()) {
     // Buffer is empty.
     return NULL;
@@ -194,7 +194,7 @@
 
   // Discard other packets with the same timestamp. These are duplicates or
   // redundant payloads that should not be used.
-  int discards = 0;
+  size_t discards = 0;
 
   while (!Empty() &&
       buffer_.front()->header.timestamp == packet->header.timestamp) {
@@ -240,15 +240,15 @@
   return DiscardOldPackets(timestamp_limit, 0);
 }
 
-int PacketBuffer::NumPacketsInBuffer() const {
-  return static_cast<int>(buffer_.size());
+size_t PacketBuffer::NumPacketsInBuffer() const {
+  return buffer_.size();
 }
 
-int PacketBuffer::NumSamplesInBuffer(DecoderDatabase* decoder_database,
-                                     int last_decoded_length) const {
+size_t PacketBuffer::NumSamplesInBuffer(DecoderDatabase* decoder_database,
+                                        size_t last_decoded_length) const {
   PacketList::const_iterator it;
-  int num_samples = 0;
-  int last_duration = last_decoded_length;
+  size_t num_samples = 0;
+  size_t last_duration = last_decoded_length;
   for (it = buffer_.begin(); it != buffer_.end(); ++it) {
     Packet* packet = (*it);
     AudioDecoder* decoder =
diff --git a/webrtc/modules/audio_coding/neteq/packet_buffer.h b/webrtc/modules/audio_coding/neteq/packet_buffer.h
index d2d429b..737845e 100644
--- a/webrtc/modules/audio_coding/neteq/packet_buffer.h
+++ b/webrtc/modules/audio_coding/neteq/packet_buffer.h
@@ -88,7 +88,7 @@
   // Subsequent packets with the same timestamp as the one extracted will be
   // discarded and properly deleted. The number of discarded packets will be
   // written to the output variable |discard_count|.
-  virtual Packet* GetNextPacket(int* discard_count);
+  virtual Packet* GetNextPacket(size_t* discard_count);
 
   // Discards the first packet in the buffer. The packet is deleted.
   // Returns PacketBuffer::kBufferEmpty if the buffer is empty,
@@ -109,12 +109,12 @@
 
   // Returns the number of packets in the buffer, including duplicates and
   // redundant packets.
-  virtual int NumPacketsInBuffer() const;
+  virtual size_t NumPacketsInBuffer() const;
 
   // Returns the number of samples in the buffer, including samples carried in
   // duplicate and redundant packets.
-  virtual int NumSamplesInBuffer(DecoderDatabase* decoder_database,
-                                 int last_decoded_length) const;
+  virtual size_t NumSamplesInBuffer(DecoderDatabase* decoder_database,
+                                    size_t last_decoded_length) const;
 
   // Increase the waiting time counter for every packet in the buffer by |inc|.
   // The default value for |inc| is 1.
diff --git a/webrtc/modules/audio_coding/neteq/packet_buffer_unittest.cc b/webrtc/modules/audio_coding/neteq/packet_buffer_unittest.cc
index 61a8ee1..435b6c8 100644
--- a/webrtc/modules/audio_coding/neteq/packet_buffer_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/packet_buffer_unittest.cc
@@ -97,7 +97,7 @@
   EXPECT_EQ(PacketBuffer::kOK, buffer.NextTimestamp(&next_ts));
   EXPECT_EQ(4711u, next_ts);
   EXPECT_FALSE(buffer.Empty());
-  EXPECT_EQ(1, buffer.NumPacketsInBuffer());
+  EXPECT_EQ(1u, buffer.NumPacketsInBuffer());
   const RTPHeader* hdr = buffer.NextRtpHeader();
   EXPECT_EQ(&(packet->header), hdr);  // Compare pointer addresses.
 
@@ -116,12 +116,12 @@
     Packet* packet = gen.NextPacket(payload_len);
     EXPECT_EQ(PacketBuffer::kOK, buffer.InsertPacket(packet));
   }
-  EXPECT_EQ(10, buffer.NumPacketsInBuffer());
+  EXPECT_EQ(10u, buffer.NumPacketsInBuffer());
   EXPECT_FALSE(buffer.Empty());
 
   buffer.Flush();
   // Buffer should delete the payloads itself.
-  EXPECT_EQ(0, buffer.NumPacketsInBuffer());
+  EXPECT_EQ(0u, buffer.NumPacketsInBuffer());
   EXPECT_TRUE(buffer.Empty());
 }
 
@@ -137,7 +137,7 @@
     Packet* packet = gen.NextPacket(payload_len);
     EXPECT_EQ(PacketBuffer::kOK, buffer.InsertPacket(packet));
   }
-  EXPECT_EQ(10, buffer.NumPacketsInBuffer());
+  EXPECT_EQ(10u, buffer.NumPacketsInBuffer());
   uint32_t next_ts;
   EXPECT_EQ(PacketBuffer::kOK, buffer.NextTimestamp(&next_ts));
   EXPECT_EQ(0u, next_ts);  // Expect first inserted packet to be first in line.
@@ -145,7 +145,7 @@
   // Insert 11th packet; should flush the buffer and insert it after flushing.
   Packet* packet = gen.NextPacket(payload_len);
   EXPECT_EQ(PacketBuffer::kFlushed, buffer.InsertPacket(packet));
-  EXPECT_EQ(1, buffer.NumPacketsInBuffer());
+  EXPECT_EQ(1u, buffer.NumPacketsInBuffer());
   EXPECT_EQ(PacketBuffer::kOK, buffer.NextTimestamp(&next_ts));
   // Expect last inserted packet to be first in line.
   EXPECT_EQ(packet->header.timestamp, next_ts);
@@ -179,7 +179,7 @@
                                                        &current_pt,
                                                        &current_cng_pt));
   EXPECT_TRUE(list.empty());  // The PacketBuffer should have depleted the list.
-  EXPECT_EQ(10, buffer.NumPacketsInBuffer());
+  EXPECT_EQ(10u, buffer.NumPacketsInBuffer());
   EXPECT_EQ(0, current_pt);  // Current payload type changed to 0.
   EXPECT_EQ(0xFF, current_cng_pt);  // CNG payload type not changed.
 
@@ -220,7 +220,7 @@
                                                             &current_pt,
                                                             &current_cng_pt));
   EXPECT_TRUE(list.empty());  // The PacketBuffer should have depleted the list.
-  EXPECT_EQ(1, buffer.NumPacketsInBuffer());  // Only the last packet.
+  EXPECT_EQ(1u, buffer.NumPacketsInBuffer());  // Only the last packet.
   EXPECT_EQ(1, current_pt);  // Current payload type changed to 0.
   EXPECT_EQ(0xFF, current_cng_pt);  // CNG payload type not changed.
 
@@ -256,7 +256,7 @@
     {0x0006, 0x0000001E, 1, false, -1},
   };
 
-  const int kExpectPacketsInBuffer = 9;
+  const size_t kExpectPacketsInBuffer = 9;
 
   std::vector<Packet*> expect_order(kExpectPacketsInBuffer);
 
@@ -277,10 +277,10 @@
 
   EXPECT_EQ(kExpectPacketsInBuffer, buffer.NumPacketsInBuffer());
 
-  int drop_count;
-  for (int i = 0; i < kExpectPacketsInBuffer; ++i) {
+  size_t drop_count;
+  for (size_t i = 0; i < kExpectPacketsInBuffer; ++i) {
     Packet* packet = buffer.GetNextPacket(&drop_count);
-    EXPECT_EQ(0, drop_count);
+    EXPECT_EQ(0u, drop_count);
     EXPECT_EQ(packet, expect_order[i]);  // Compare pointer addresses.
     delete[] packet->payload;
     delete packet;
@@ -302,7 +302,7 @@
     Packet* packet = gen.NextPacket(payload_len);
     buffer.InsertPacket(packet);
   }
-  EXPECT_EQ(10, buffer.NumPacketsInBuffer());
+  EXPECT_EQ(10u, buffer.NumPacketsInBuffer());
 
   // Discard them one by one and make sure that the right packets are at the
   // front of the buffer.
@@ -350,7 +350,7 @@
                                                        decoder_database,
                                                        &current_pt,
                                                        &current_cng_pt));
-  EXPECT_EQ(10, buffer.NumPacketsInBuffer());
+  EXPECT_EQ(10u, buffer.NumPacketsInBuffer());
 
   // Extract them and make sure that come out in the right order.
   uint32_t current_ts = start_ts;
@@ -425,7 +425,7 @@
                                      &current_pt,
                                      &current_cng_pt));
   EXPECT_TRUE(list.empty());  // The PacketBuffer should have depleted the list.
-  EXPECT_EQ(1, buffer->NumPacketsInBuffer());
+  EXPECT_EQ(1u, buffer->NumPacketsInBuffer());
   delete buffer;
   EXPECT_CALL(decoder_database, Die());  // Called when object is deleted.
 }
diff --git a/webrtc/modules/audio_coding/neteq/post_decode_vad.cc b/webrtc/modules/audio_coding/neteq/post_decode_vad.cc
index 0749673..714073a 100644
--- a/webrtc/modules/audio_coding/neteq/post_decode_vad.cc
+++ b/webrtc/modules/audio_coding/neteq/post_decode_vad.cc
@@ -45,7 +45,7 @@
   }
 }
 
-void PostDecodeVad::Update(int16_t* signal, int length,
+void PostDecodeVad::Update(int16_t* signal, size_t length,
                            AudioDecoder::SpeechType speech_type,
                            bool sid_frame,
                            int fs_hz) {
@@ -68,12 +68,13 @@
   }
 
   if (length > 0 && running_) {
-    int vad_sample_index = 0;
+    size_t vad_sample_index = 0;
     active_speech_ = false;
     // Loop through frame sizes 30, 20, and 10 ms.
     for (int vad_frame_size_ms = 30; vad_frame_size_ms >= 10;
         vad_frame_size_ms -= 10) {
-      int vad_frame_size_samples = vad_frame_size_ms * fs_hz / 1000;
+      size_t vad_frame_size_samples =
+          static_cast<size_t>(vad_frame_size_ms * fs_hz / 1000);
       while (length - vad_sample_index >= vad_frame_size_samples) {
         int vad_return = WebRtcVad_Process(
             vad_instance_, fs_hz, &signal[vad_sample_index],
diff --git a/webrtc/modules/audio_coding/neteq/post_decode_vad.h b/webrtc/modules/audio_coding/neteq/post_decode_vad.h
index fa276aa..2886cf9 100644
--- a/webrtc/modules/audio_coding/neteq/post_decode_vad.h
+++ b/webrtc/modules/audio_coding/neteq/post_decode_vad.h
@@ -46,7 +46,7 @@
 
   // Updates post-decode VAD with the audio data in |signal| having |length|
   // samples. The data is of type |speech_type|, at the sample rate |fs_hz|.
-  void Update(int16_t* signal, int length,
+  void Update(int16_t* signal, size_t length,
               AudioDecoder::SpeechType speech_type, bool sid_frame, int fs_hz);
 
   // Accessors.
diff --git a/webrtc/modules/audio_coding/neteq/preemptive_expand.cc b/webrtc/modules/audio_coding/neteq/preemptive_expand.cc
index 6a3f8ec..f51a5bd 100644
--- a/webrtc/modules/audio_coding/neteq/preemptive_expand.cc
+++ b/webrtc/modules/audio_coding/neteq/preemptive_expand.cc
@@ -18,14 +18,14 @@
 
 PreemptiveExpand::ReturnCodes PreemptiveExpand::Process(
     const int16_t* input,
-    int input_length,
-    int old_data_length,
+    size_t input_length,
+    size_t old_data_length,
     AudioMultiVector* output,
-    int16_t* length_change_samples) {
+    size_t* length_change_samples) {
   old_data_length_per_channel_ = old_data_length;
   // Input length must be (almost) 30 ms.
   // Also, the new part must be at least |overlap_samples_| elements.
-  static const int k15ms = 120;  // 15 ms = 120 samples at 8 kHz sample rate.
+  static const size_t k15ms = 120;  // 15 ms = 120 samples at 8 kHz sample rate.
   if (num_channels_ == 0 ||
       input_length / num_channels_ < (2 * k15ms - 1) * fs_mult_ ||
       old_data_length >= input_length / num_channels_ - overlap_samples_) {
@@ -41,7 +41,7 @@
 
 void PreemptiveExpand::SetParametersForPassiveSpeech(size_t len,
                                                      int16_t* best_correlation,
-                                                     int* peak_index) const {
+                                                     size_t* peak_index) const {
   // When the signal does not contain any active speech, the correlation does
   // not matter. Simply set it to zero.
   *best_correlation = 0;
@@ -51,7 +51,7 @@
   // the new data.
   // but we must ensure that best_correlation is not larger than the new data.
   *peak_index = std::min(*peak_index,
-                         static_cast<int>(len - old_data_length_per_channel_));
+                         len - old_data_length_per_channel_);
 }
 
 PreemptiveExpand::ReturnCodes PreemptiveExpand::CheckCriteriaAndStretch(
@@ -64,8 +64,7 @@
     AudioMultiVector* output) const {
   // Pre-calculate common multiplication with |fs_mult_|.
   // 120 corresponds to 15 ms.
-  int fs_mult_120 = fs_mult_ * 120;
-  assert(old_data_length_per_channel_ >= 0);  // Make sure it's been set.
+  size_t fs_mult_120 = static_cast<size_t>(fs_mult_ * 120);
   // Check for strong correlation (>0.9 in Q14) and at least 15 ms new data,
   // or passive speech.
   if (((best_correlation > kCorrelationThreshold) &&
@@ -107,7 +106,7 @@
     int sample_rate_hz,
     size_t num_channels,
     const BackgroundNoise& background_noise,
-    int overlap_samples) const {
+    size_t overlap_samples) const {
   return new PreemptiveExpand(
       sample_rate_hz, num_channels, background_noise, overlap_samples);
 }
diff --git a/webrtc/modules/audio_coding/neteq/preemptive_expand.h b/webrtc/modules/audio_coding/neteq/preemptive_expand.h
index 65da703..ca48e19 100644
--- a/webrtc/modules/audio_coding/neteq/preemptive_expand.h
+++ b/webrtc/modules/audio_coding/neteq/preemptive_expand.h
@@ -32,9 +32,9 @@
   PreemptiveExpand(int sample_rate_hz,
                    size_t num_channels,
                    const BackgroundNoise& background_noise,
-                   int overlap_samples)
+                   size_t overlap_samples)
       : TimeStretch(sample_rate_hz, num_channels, background_noise),
-        old_data_length_per_channel_(-1),
+        old_data_length_per_channel_(0),
         overlap_samples_(overlap_samples) {
   }
 
@@ -44,17 +44,17 @@
   // is provided in the output |length_change_samples|. The method returns
   // the outcome of the operation as an enumerator value.
   ReturnCodes Process(const int16_t *pw16_decoded,
-                      int len,
-                      int old_data_len,
+                      size_t len,
+                      size_t old_data_len,
                       AudioMultiVector* output,
-                      int16_t* length_change_samples);
+                      size_t* length_change_samples);
 
  protected:
   // Sets the parameters |best_correlation| and |peak_index| to suitable
   // values when the signal contains no active speech.
   void SetParametersForPassiveSpeech(size_t input_length,
                                      int16_t* best_correlation,
-                                     int* peak_index) const override;
+                                     size_t* peak_index) const override;
 
   // Checks the criteria for performing the time-stretching operation and,
   // if possible, performs the time-stretching.
@@ -67,8 +67,8 @@
                                       AudioMultiVector* output) const override;
 
  private:
-  int old_data_length_per_channel_;
-  int overlap_samples_;
+  size_t old_data_length_per_channel_;
+  size_t overlap_samples_;
 
   DISALLOW_COPY_AND_ASSIGN(PreemptiveExpand);
 };
@@ -81,7 +81,7 @@
       int sample_rate_hz,
       size_t num_channels,
       const BackgroundNoise& background_noise,
-      int overlap_samples) const;
+      size_t overlap_samples) const;
 };
 
 }  // namespace webrtc
diff --git a/webrtc/modules/audio_coding/neteq/random_vector.h b/webrtc/modules/audio_coding/neteq/random_vector.h
index 767dc48..8c75eae 100644
--- a/webrtc/modules/audio_coding/neteq/random_vector.h
+++ b/webrtc/modules/audio_coding/neteq/random_vector.h
@@ -21,7 +21,7 @@
 // This class generates pseudo-random samples.
 class RandomVector {
  public:
-  static const int kRandomTableSize = 256;
+  static const size_t kRandomTableSize = 256;
   static const int16_t kRandomTable[kRandomTableSize];
 
   RandomVector()
diff --git a/webrtc/modules/audio_coding/neteq/statistics_calculator.cc b/webrtc/modules/audio_coding/neteq/statistics_calculator.cc
index df139f7..c716fe4 100644
--- a/webrtc/modules/audio_coding/neteq/statistics_calculator.cc
+++ b/webrtc/modules/audio_coding/neteq/statistics_calculator.cc
@@ -14,6 +14,7 @@
 #include <string.h>  // memset
 
 #include "webrtc/base/checks.h"
+#include "webrtc/base/safe_conversions.h"
 #include "webrtc/modules/audio_coding/neteq/decision_logic.h"
 #include "webrtc/modules/audio_coding/neteq/delay_manager.h"
 #include "webrtc/system_wrappers/interface/metrics.h"
@@ -140,36 +141,37 @@
   next_waiting_time_index_ = 0;
 }
 
-void StatisticsCalculator::ExpandedVoiceSamples(int num_samples) {
+void StatisticsCalculator::ExpandedVoiceSamples(size_t num_samples) {
   expanded_speech_samples_ += num_samples;
 }
 
-void StatisticsCalculator::ExpandedNoiseSamples(int num_samples) {
+void StatisticsCalculator::ExpandedNoiseSamples(size_t num_samples) {
   expanded_noise_samples_ += num_samples;
 }
 
-void StatisticsCalculator::PreemptiveExpandedSamples(int num_samples) {
+void StatisticsCalculator::PreemptiveExpandedSamples(size_t num_samples) {
   preemptive_samples_ += num_samples;
 }
 
-void StatisticsCalculator::AcceleratedSamples(int num_samples) {
+void StatisticsCalculator::AcceleratedSamples(size_t num_samples) {
   accelerate_samples_ += num_samples;
 }
 
-void StatisticsCalculator::AddZeros(int num_samples) {
+void StatisticsCalculator::AddZeros(size_t num_samples) {
   added_zero_samples_ += num_samples;
 }
 
-void StatisticsCalculator::PacketsDiscarded(int num_packets) {
+void StatisticsCalculator::PacketsDiscarded(size_t num_packets) {
   discarded_packets_ += num_packets;
 }
 
-void StatisticsCalculator::LostSamples(int num_samples) {
+void StatisticsCalculator::LostSamples(size_t num_samples) {
   lost_timestamps_ += num_samples;
 }
 
-void StatisticsCalculator::IncreaseCounter(int num_samples, int fs_hz) {
-  const int time_step_ms = rtc::CheckedDivExact(1000 * num_samples, fs_hz);
+void StatisticsCalculator::IncreaseCounter(size_t num_samples, int fs_hz) {
+  const int time_step_ms =
+      rtc::CheckedDivExact(static_cast<int>(1000 * num_samples), fs_hz);
   delayed_packet_outage_counter_.AdvanceClock(time_step_ms);
   excess_buffer_delay_.AdvanceClock(time_step_ms);
   timestamps_since_last_report_ += static_cast<uint32_t>(num_samples);
@@ -207,8 +209,8 @@
 
 void StatisticsCalculator::GetNetworkStatistics(
     int fs_hz,
-    int num_samples_in_buffers,
-    int samples_per_packet,
+    size_t num_samples_in_buffers,
+    size_t samples_per_packet,
     const DelayManager& delay_manager,
     const DecisionLogic& decision_logic,
     NetEqNetworkStatistics *stats) {
@@ -220,8 +222,8 @@
   stats->added_zero_samples = added_zero_samples_;
   stats->current_buffer_size_ms =
       static_cast<uint16_t>(num_samples_in_buffers * 1000 / fs_hz);
-  const int ms_per_packet = decision_logic.packet_length_samples() /
-      (fs_hz / 1000);
+  const int ms_per_packet = rtc::checked_cast<int>(
+      decision_logic.packet_length_samples() / (fs_hz / 1000));
   stats->preferred_buffer_size_ms = (delay_manager.TargetLevel() >> 8) *
       ms_per_packet;
   stats->jitter_peaks_found = delay_manager.PeakFound();
@@ -230,7 +232,7 @@
   stats->packet_loss_rate =
       CalculateQ14Ratio(lost_timestamps_, timestamps_since_last_report_);
 
-  const unsigned discarded_samples = discarded_packets_ * samples_per_packet;
+  const size_t discarded_samples = discarded_packets_ * samples_per_packet;
   stats->packet_discard_rate =
       CalculateQ14Ratio(discarded_samples, timestamps_since_last_report_);
 
@@ -265,7 +267,7 @@
   ResetWaitingTimeStatistics();
 }
 
-uint16_t StatisticsCalculator::CalculateQ14Ratio(uint32_t numerator,
+uint16_t StatisticsCalculator::CalculateQ14Ratio(size_t numerator,
                                                  uint32_t denominator) {
   if (numerator == 0) {
     return 0;
diff --git a/webrtc/modules/audio_coding/neteq/statistics_calculator.h b/webrtc/modules/audio_coding/neteq/statistics_calculator.h
index d743e5f..3bd3e55 100644
--- a/webrtc/modules/audio_coding/neteq/statistics_calculator.h
+++ b/webrtc/modules/audio_coding/neteq/statistics_calculator.h
@@ -42,32 +42,32 @@
 
   // Reports that |num_samples| samples were produced through expansion, and
   // that the expansion produced other than just noise samples.
-  void ExpandedVoiceSamples(int num_samples);
+  void ExpandedVoiceSamples(size_t num_samples);
 
   // Reports that |num_samples| samples were produced through expansion, and
   // that the expansion produced only noise samples.
-  void ExpandedNoiseSamples(int num_samples);
+  void ExpandedNoiseSamples(size_t num_samples);
 
   // Reports that |num_samples| samples were produced through preemptive
   // expansion.
-  void PreemptiveExpandedSamples(int num_samples);
+  void PreemptiveExpandedSamples(size_t num_samples);
 
   // Reports that |num_samples| samples were removed through accelerate.
-  void AcceleratedSamples(int num_samples);
+  void AcceleratedSamples(size_t num_samples);
 
   // Reports that |num_samples| zeros were inserted into the output.
-  void AddZeros(int num_samples);
+  void AddZeros(size_t num_samples);
 
   // Reports that |num_packets| packets were discarded.
-  void PacketsDiscarded(int num_packets);
+  void PacketsDiscarded(size_t num_packets);
 
   // Reports that |num_samples| were lost.
-  void LostSamples(int num_samples);
+  void LostSamples(size_t num_samples);
 
   // Increases the report interval counter with |num_samples| at a sample rate
   // of |fs_hz|. This is how the StatisticsCalculator gets notified that current
   // time is increasing.
-  void IncreaseCounter(int num_samples, int fs_hz);
+  void IncreaseCounter(size_t num_samples, int fs_hz);
 
   // Stores new packet waiting time in waiting time statistics.
   void StoreWaitingTime(int waiting_time_ms);
@@ -85,8 +85,8 @@
   // yet to play out is |num_samples_in_buffers|, and the number of samples per
   // packet is |samples_per_packet|.
   void GetNetworkStatistics(int fs_hz,
-                            int num_samples_in_buffers,
-                            int samples_per_packet,
+                            size_t num_samples_in_buffers,
+                            size_t samples_per_packet,
                             const DelayManager& delay_manager,
                             const DecisionLogic& decision_logic,
                             NetEqNetworkStatistics *stats);
@@ -150,15 +150,15 @@
   };
 
   // Calculates numerator / denominator, and returns the value in Q14.
-  static uint16_t CalculateQ14Ratio(uint32_t numerator, uint32_t denominator);
+  static uint16_t CalculateQ14Ratio(size_t numerator, uint32_t denominator);
 
-  uint32_t preemptive_samples_;
-  uint32_t accelerate_samples_;
-  int added_zero_samples_;
-  uint32_t expanded_speech_samples_;
-  uint32_t expanded_noise_samples_;
-  int discarded_packets_;
-  uint32_t lost_timestamps_;
+  size_t preemptive_samples_;
+  size_t accelerate_samples_;
+  size_t added_zero_samples_;
+  size_t expanded_speech_samples_;
+  size_t expanded_noise_samples_;
+  size_t discarded_packets_;
+  size_t lost_timestamps_;
   uint32_t timestamps_since_last_report_;
   int waiting_times_[kLenWaitingTimes];  // Used as a circular buffer.
   int len_waiting_times_;
diff --git a/webrtc/modules/audio_coding/neteq/test/RTPencode.cc b/webrtc/modules/audio_coding/neteq/test/RTPencode.cc
index 7e778b8..b2df07a 100644
--- a/webrtc/modules/audio_coding/neteq/test/RTPencode.cc
+++ b/webrtc/modules/audio_coding/neteq/test/RTPencode.cc
@@ -23,6 +23,8 @@
 
 #include <assert.h>
 
+#include <algorithm>
+
 #include "webrtc/typedefs.h"
 // needed for NetEqDecoder
 #include "webrtc/modules/audio_coding/neteq/audio_decoder_impl.h"
@@ -76,27 +78,27 @@
 void NetEQTest_GetCodec_and_PT(char* name,
                                webrtc::NetEqDecoder* codec,
                                int* PT,
-                               int frameLen,
+                               size_t frameLen,
                                int* fs,
                                int* bitrate,
                                int* useRed);
 int NetEQTest_init_coders(webrtc::NetEqDecoder coder,
-                          int enc_frameSize,
+                          size_t enc_frameSize,
                           int bitrate,
                           int sampfreq,
                           int vad,
-                          int numChannels);
+                          size_t numChannels);
 void defineCodecs(webrtc::NetEqDecoder* usedCodec, int* noOfCodecs);
-int NetEQTest_free_coders(webrtc::NetEqDecoder coder, int numChannels);
-int NetEQTest_encode(int coder,
-                     int16_t* indata,
-                     int frameLen,
-                     unsigned char* encoded,
-                     int sampleRate,
-                     int* vad,
-                     int useVAD,
-                     int bitrate,
-                     int numChannels);
+int NetEQTest_free_coders(webrtc::NetEqDecoder coder, size_t numChannels);
+size_t NetEQTest_encode(int coder,
+                        int16_t* indata,
+                        size_t frameLen,
+                        unsigned char* encoded,
+                        int sampleRate,
+                        int* vad,
+                        int useVAD,
+                        int bitrate,
+                        size_t numChannels);
 void makeRTPheader(unsigned char* rtp_data,
                    int payloadType,
                    int seqNo,
@@ -109,13 +111,13 @@
                         uint16_t* blockLen,
                         int seqNo,
                         uint32_t ssrc);
-int makeDTMFpayload(unsigned char* payload_data,
-                    int Event,
-                    int End,
-                    int Volume,
-                    int Duration);
-void stereoDeInterleave(int16_t* audioSamples, int numSamples);
-void stereoInterleave(unsigned char* data, int dataLen, int stride);
+size_t makeDTMFpayload(unsigned char* payload_data,
+                       int Event,
+                       int End,
+                       int Volume,
+                       int Duration);
+void stereoDeInterleave(int16_t* audioSamples, size_t numSamples);
+void stereoInterleave(unsigned char* data, size_t dataLen, size_t stride);
 
 /*********************/
 /* Codec definitions */
@@ -264,13 +266,14 @@
 #endif
 
 int main(int argc, char* argv[]) {
-  int packet_size, fs;
+  size_t packet_size;
+  int fs;
   webrtc::NetEqDecoder usedCodec;
   int payloadType;
   int bitrate = 0;
   int useVAD, vad;
   int useRed = 0;
-  int len, enc_len;
+  size_t len, enc_len;
   int16_t org_data[4000];
   unsigned char rtp_data[8000];
   int16_t seqNo = 0xFFF;
@@ -282,14 +285,14 @@
   int red_PT[2] = {0};
   uint32_t red_TS[2] = {0};
   uint16_t red_len[2] = {0};
-  int RTPheaderLen = 12;
+  size_t RTPheaderLen = 12;
   uint8_t red_data[8000];
 #ifdef INSERT_OLD_PACKETS
   uint16_t old_length, old_plen;
-  int old_enc_len;
+  size_t old_enc_len;
   int first_old_packet = 1;
   unsigned char old_rtp_data[8000];
-  int packet_age = 0;
+  size_t packet_age = 0;
 #endif
 #ifdef INSERT_DTMF_PACKETS
   int NTone = 1;
@@ -298,8 +301,8 @@
   bool dtmfSent = false;
 #endif
   bool usingStereo = false;
-  int stereoMode = 0;
-  int numChannels = 1;
+  size_t stereoMode = 0;
+  size_t numChannels = 1;
 
   /* check number of parameters */
   if ((argc != 6) && (argc != 7)) {
@@ -449,12 +452,13 @@
   FILE* out_file = fopen(argv[2], "wb");
   CHECK_NOT_NULL(out_file);
   printf("Output file: %s\n\n", argv[2]);
-  packet_size = atoi(argv[3]);
-  if (packet_size <= 0) {
-     printf("Packet size %d must be positive", packet_size);
+  int packet_size_int = atoi(argv[3]);
+  if (packet_size_int <= 0) {
+     printf("Packet size %d must be positive", packet_size_int);
      return -1;
   }
-  printf("Packet size: %d\n", packet_size);
+  printf("Packet size: %d\n", packet_size_int);
+  packet_size = static_cast<size_t>(packet_size_int);
 
   // check for stereo
   if (argv[4][strlen(argv[4]) - 1] == '*') {
@@ -653,10 +657,6 @@
       enc_len =
           NetEQTest_encode(usedCodec, org_data, packet_size, &rtp_data[12], fs,
                            &vad, useVAD, bitrate, numChannels);
-      if (enc_len == -1) {
-        printf("Error encoding frame\n");
-        exit(0);
-      }
 
       if (usingStereo && stereoMode != STEREO_MODE_FRAME && vad == 1) {
         // interleave the encoded payload for sample-based codecs (not for CNG)
@@ -729,12 +729,12 @@
           return -1;
         }
 #ifdef RANDOM_DATA
-        for (int k = 0; k < 12 + enc_len; k++) {
+        for (size_t k = 0; k < 12 + enc_len; k++) {
           rtp_data[k] = rand() + rand();
         }
 #endif
 #ifdef RANDOM_PAYLOAD_DATA
-        for (int k = 12; k < 12 + enc_len; k++) {
+        for (size_t k = 12; k < 12 + enc_len; k++) {
           rtp_data[k] = rand() + rand();
         }
 #endif
@@ -822,7 +822,7 @@
 void NetEQTest_GetCodec_and_PT(char* name,
                                webrtc::NetEqDecoder* codec,
                                int* PT,
-                               int frameLen,
+                               size_t frameLen,
                                int* fs,
                                int* bitrate,
                                int* useRed) {
@@ -887,14 +887,14 @@
 }
 
 int NetEQTest_init_coders(webrtc::NetEqDecoder coder,
-                          int enc_frameSize,
+                          size_t enc_frameSize,
                           int bitrate,
                           int sampfreq,
                           int vad,
-                          int numChannels) {
+                          size_t numChannels) {
   int ok = 0;
 
-  for (int k = 0; k < numChannels; k++) {
+  for (size_t k = 0; k < numChannels; k++) {
     VAD_inst[k] = WebRtcVad_Create();
     if (!VAD_inst[k]) {
       printf("Error: Couldn't allocate memory for VAD instance\n");
@@ -962,7 +962,7 @@
           WebRtcG729_EncoderInit(G729enc_inst[k], vad);
           if ((vad == 1) && (enc_frameSize != 80)) {
             printf("\nError - This simulation only supports VAD for G729 at "
-                   "10ms packets (not %dms)\n", (enc_frameSize >> 3));
+                   "10ms packets (not %" PRIuS "ms)\n", (enc_frameSize >> 3));
           }
         } else {
           printf("\nError - g729 is only developed for 8kHz \n");
@@ -1018,7 +1018,7 @@
           }
           if ((vad == 1) && (enc_frameSize != 160)) {
             printf("\nError - This simulation only supports VAD for Speex at "
-                   "20ms packets (not %dms)\n",
+                   "20ms packets (not %" PRIuS "ms)\n",
                 (enc_frameSize >> 3));
             vad = 0;
           }
@@ -1049,7 +1049,7 @@
           }
           if ((vad == 1) && (enc_frameSize != 320)) {
             printf("\nError - This simulation only supports VAD for Speex at "
-                   "20ms packets (not %dms)\n",
+                   "20ms packets (not %" PRIuS "ms)\n",
                 (enc_frameSize >> 4));
             vad = 0;
           }
@@ -1238,8 +1238,7 @@
                    "instance\n");
             exit(0);
           }
-          if (((enc_frameSize / 320) < 0) || ((enc_frameSize / 320) > 3) ||
-              ((enc_frameSize % 320) != 0)) {
+          if (((enc_frameSize / 320) > 3) || ((enc_frameSize % 320) != 0)) {
             printf("\nError - AMRwb must have frameSize of 20, 40 or 60ms\n");
             exit(0);
           }
@@ -1320,7 +1319,8 @@
                 bitrate);
             exit(0);
           }
-          WebRtcIsac_Control(ISAC_inst[k], bitrate, enc_frameSize >> 4);
+          WebRtcIsac_Control(ISAC_inst[k], bitrate,
+                             static_cast<int>(enc_frameSize >> 4));
         } else {
           printf("\nError - iSAC only supports 480 or 960 enc_frameSize (30 or "
                  "60 ms)\n");
@@ -1379,7 +1379,8 @@
                    "56000 bps (not %i)\n", bitrate);
             exit(0);
           }
-          WebRtcIsac_Control(ISACSWB_inst[k], bitrate, enc_frameSize >> 5);
+          WebRtcIsac_Control(ISACSWB_inst[k], bitrate,
+                             static_cast<int>(enc_frameSize >> 5));
         } else {
           printf("\nError - iSAC SWB only supports 960 enc_frameSize (30 "
                  "ms)\n");
@@ -1424,8 +1425,8 @@
   return (0);
 }
 
-int NetEQTest_free_coders(webrtc::NetEqDecoder coder, int numChannels) {
-  for (int k = 0; k < numChannels; k++) {
+int NetEQTest_free_coders(webrtc::NetEqDecoder coder, size_t numChannels) {
+  for (size_t k = 0; k < numChannels; k++) {
     WebRtcVad_Free(VAD_inst[k]);
 #if (defined(CODEC_CNGCODEC8) || defined(CODEC_CNGCODEC16) || \
      defined(CODEC_CNGCODEC32) || defined(CODEC_CNGCODEC48))
@@ -1552,35 +1553,34 @@
   return (0);
 }
 
-int NetEQTest_encode(int coder,
-                     int16_t* indata,
-                     int frameLen,
-                     unsigned char* encoded,
-                     int sampleRate,
-                     int* vad,
-                     int useVAD,
-                     int bitrate,
-                     int numChannels) {
-  int cdlen = 0;
+size_t NetEQTest_encode(int coder,
+                        int16_t* indata,
+                        size_t frameLen,
+                        unsigned char* encoded,
+                        int sampleRate,
+                        int* vad,
+                        int useVAD,
+                        int bitrate,
+                        size_t numChannels) {
+  size_t cdlen = 0;
   int16_t* tempdata;
   static int first_cng = 1;
-  int16_t tempLen;
-
+  size_t tempLen;
   *vad = 1;
 
   // check VAD first
   if (useVAD) {
     *vad = 0;
 
-    int sampleRate_10 = 10 * sampleRate / 1000;
-    int sampleRate_20 = 20 * sampleRate / 1000;
-    int sampleRate_30 = 30 * sampleRate / 1000;
-    for (int k = 0; k < numChannels; k++) {
+    size_t sampleRate_10 = static_cast<size_t>(10 * sampleRate / 1000);
+    size_t sampleRate_20 = static_cast<size_t>(20 * sampleRate / 1000);
+    size_t sampleRate_30 = static_cast<size_t>(30 * sampleRate / 1000);
+    for (size_t k = 0; k < numChannels; k++) {
       tempLen = frameLen;
       tempdata = &indata[k * frameLen];
       int localVad = 0;
       /* Partition the signal and test each chunk for VAD.
-      All chunks must be VAD=0 to produce a total VAD=0. */
+         All chunks must be VAD=0 to produce a total VAD=0. */
       while (tempLen >= sampleRate_10) {
         if ((tempLen % sampleRate_30) == 0) {  // tempLen is multiple of 30ms
           localVad |= WebRtcVad_Process(VAD_inst[k], sampleRate, tempdata,
@@ -1607,7 +1607,7 @@
     if (!*vad) {
       // all channels are silent
       cdlen = 0;
-      for (int k = 0; k < numChannels; k++) {
+      for (size_t k = 0; k < numChannels; k++) {
         WebRtcCng_Encode(CNGenc_inst[k], &indata[k * frameLen],
                          (frameLen <= 640 ? frameLen : 640) /* max 640 */,
                          encoded, &tempLen, first_cng);
@@ -1621,9 +1621,9 @@
   }
 
   // loop over all channels
-  int totalLen = 0;
+  size_t totalLen = 0;
 
-  for (int k = 0; k < numChannels; k++) {
+  for (size_t k = 0; k < numChannels; k++) {
     /* Encode with the selected coder type */
     if (coder == webrtc::kDecoderPCMu) { /*g711 u-law */
 #ifdef CODEC_G711
@@ -1652,7 +1652,8 @@
 #endif
 #ifdef CODEC_ILBC
     else if (coder == webrtc::kDecoderILBC) { /*iLBC */
-      cdlen = WebRtcIlbcfix_Encode(iLBCenc_inst[k], indata, frameLen, encoded);
+      cdlen = static_cast<size_t>(std::max(
+          WebRtcIlbcfix_Encode(iLBCenc_inst[k], indata, frameLen, encoded), 0));
     }
 #endif
 #if (defined(CODEC_ISAC) || \
@@ -1660,28 +1661,30 @@
                                               // NETEQ_ISACFIX_CODEC
     else if (coder == webrtc::kDecoderISAC) { /*iSAC */
       int noOfCalls = 0;
-      cdlen = 0;
-      while (cdlen <= 0) {
+      int res = 0;
+      while (res <= 0) {
 #ifdef CODEC_ISAC /* floating point */
-        cdlen =
+        res =
             WebRtcIsac_Encode(ISAC_inst[k], &indata[noOfCalls * 160], encoded);
 #else /* fixed point */
-        cdlen = WebRtcIsacfix_Encode(ISAC_inst[k], &indata[noOfCalls * 160],
-                                     encoded);
+        res = WebRtcIsacfix_Encode(ISAC_inst[k], &indata[noOfCalls * 160],
+                                   encoded);
 #endif
         noOfCalls++;
       }
+      cdlen = static_cast<size_t>(res);
     }
 #endif
 #ifdef CODEC_ISAC_SWB
     else if (coder == webrtc::kDecoderISACswb) { /* iSAC SWB */
       int noOfCalls = 0;
-      cdlen = 0;
-      while (cdlen <= 0) {
-        cdlen = WebRtcIsac_Encode(ISACSWB_inst[k], &indata[noOfCalls * 320],
-                                  encoded);
+      int res = 0;
+      while (res <= 0) {
+        res = WebRtcIsac_Encode(ISACSWB_inst[k], &indata[noOfCalls * 320],
+                                encoded);
         noOfCalls++;
       }
+      cdlen = static_cast<size_t>(res);
     }
 #endif
     indata += frameLen;
@@ -1757,11 +1760,11 @@
   return rtpPointer - rtp_data;  // length of header in bytes
 }
 
-int makeDTMFpayload(unsigned char* payload_data,
-                    int Event,
-                    int End,
-                    int Volume,
-                    int Duration) {
+size_t makeDTMFpayload(unsigned char* payload_data,
+                       int Event,
+                       int End,
+                       int Volume,
+                       int Duration) {
   unsigned char E, R, V;
   R = 0;
   V = (unsigned char)Volume;
@@ -1778,11 +1781,11 @@
   return (4);
 }
 
-void stereoDeInterleave(int16_t* audioSamples, int numSamples) {
+void stereoDeInterleave(int16_t* audioSamples, size_t numSamples) {
   int16_t* tempVec;
   int16_t* readPtr, *writeL, *writeR;
 
-  if (numSamples <= 0)
+  if (numSamples == 0)
     return;
 
   tempVec = (int16_t*)malloc(sizeof(int16_t) * numSamples);
@@ -1797,7 +1800,7 @@
   writeR = &audioSamples[numSamples / 2];
   readPtr = tempVec;
 
-  for (int k = 0; k < numSamples; k += 2) {
+  for (size_t k = 0; k < numSamples; k += 2) {
     *writeL = *readPtr;
     readPtr++;
     *writeR = *readPtr;
@@ -1809,7 +1812,7 @@
   free(tempVec);
 }
 
-void stereoInterleave(unsigned char* data, int dataLen, int stride) {
+void stereoInterleave(unsigned char* data, size_t dataLen, size_t stride) {
   unsigned char* ptrL, *ptrR;
   unsigned char temp[10];
 
diff --git a/webrtc/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc b/webrtc/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc
index 134539f..cb0780c 100644
--- a/webrtc/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc
+++ b/webrtc/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc
@@ -59,11 +59,11 @@
   }
 
   int EncodeBlock(int16_t* in_data,
-                  int block_size_samples,
+                  size_t block_size_samples,
                   uint8_t* payload,
-                  int max_bytes) override {
-    const int kFrameSizeSamples = 80;  // Samples per 10 ms.
-    int encoded_samples = 0;
+                  size_t max_bytes) override {
+    const size_t kFrameSizeSamples = 80;  // Samples per 10 ms.
+    size_t encoded_samples = 0;
     uint32_t dummy_timestamp = 0;
     AudioEncoder::EncodedInfo info;
     do {
diff --git a/webrtc/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc b/webrtc/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc
index 85dd54d..47fae36 100644
--- a/webrtc/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc
+++ b/webrtc/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc
@@ -43,8 +43,8 @@
   NetEqIsacQualityTest();
   void SetUp() override;
   void TearDown() override;
-  virtual int EncodeBlock(int16_t* in_data, int block_size_samples,
-                          uint8_t* payload, int max_bytes);
+  virtual int EncodeBlock(int16_t* in_data, size_t block_size_samples,
+                          uint8_t* payload, size_t max_bytes);
  private:
   ISACFIX_MainStruct* isac_encoder_;
   int bit_rate_kbps_;
@@ -78,8 +78,8 @@
 }
 
 int NetEqIsacQualityTest::EncodeBlock(int16_t* in_data,
-                                      int block_size_samples,
-                                      uint8_t* payload, int max_bytes) {
+                                      size_t block_size_samples,
+                                      uint8_t* payload, size_t max_bytes) {
   // ISAC takes 10 ms for every call.
   const int subblocks = kIsacBlockDurationMs / 10;
   const int subblock_length = 10 * kIsacInputSamplingKhz;
diff --git a/webrtc/modules/audio_coding/neteq/test/neteq_opus_quality_test.cc b/webrtc/modules/audio_coding/neteq/test/neteq_opus_quality_test.cc
index 3a3b326..0406da2 100644
--- a/webrtc/modules/audio_coding/neteq/test/neteq_opus_quality_test.cc
+++ b/webrtc/modules/audio_coding/neteq/test/neteq_opus_quality_test.cc
@@ -103,12 +103,12 @@
   NetEqOpusQualityTest();
   void SetUp() override;
   void TearDown() override;
-  virtual int EncodeBlock(int16_t* in_data, int block_size_samples,
-                          uint8_t* payload, int max_bytes);
+  virtual int EncodeBlock(int16_t* in_data, size_t block_size_samples,
+                          uint8_t* payload, size_t max_bytes);
  private:
   WebRtcOpusEncInst* opus_encoder_;
   OpusRepacketizer* repacketizer_;
-  int sub_block_size_samples_;
+  size_t sub_block_size_samples_;
   int bit_rate_kbps_;
   bool fec_;
   bool dtx_;
@@ -126,7 +126,8 @@
                        kDecoderOpus),
       opus_encoder_(NULL),
       repacketizer_(NULL),
-      sub_block_size_samples_(kOpusBlockDurationMs * kOpusSamplingKhz),
+      sub_block_size_samples_(
+          static_cast<size_t>(kOpusBlockDurationMs * kOpusSamplingKhz)),
       bit_rate_kbps_(FLAGS_bit_rate_kbps),
       fec_(FLAGS_fec),
       dtx_(FLAGS_dtx),
@@ -173,8 +174,8 @@
 }
 
 int NetEqOpusQualityTest::EncodeBlock(int16_t* in_data,
-                                      int block_size_samples,
-                                      uint8_t* payload, int max_bytes) {
+                                      size_t block_size_samples,
+                                      uint8_t* payload, size_t max_bytes) {
   EXPECT_EQ(block_size_samples, sub_block_size_samples_ * sub_packets_);
   int16_t* pointer = in_data;
   int value;
@@ -192,7 +193,8 @@
     }
     pointer += sub_block_size_samples_ * channels_;
   }
-  value = opus_repacketizer_out(repacketizer_, payload, max_bytes);
+  value = opus_repacketizer_out(repacketizer_, payload,
+                                static_cast<opus_int32>(max_bytes));
   EXPECT_GE(value, 0);
   return value;
 }
diff --git a/webrtc/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc b/webrtc/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc
index d94ceb6..0b89352 100644
--- a/webrtc/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc
+++ b/webrtc/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc
@@ -59,11 +59,11 @@
   }
 
   int EncodeBlock(int16_t* in_data,
-                  int block_size_samples,
+                  size_t block_size_samples,
                   uint8_t* payload,
-                  int max_bytes) override {
-    const int kFrameSizeSamples = 80;  // Samples per 10 ms.
-    int encoded_samples = 0;
+                  size_t max_bytes) override {
+    const size_t kFrameSizeSamples = 80;  // Samples per 10 ms.
+    size_t encoded_samples = 0;
     uint32_t dummy_timestamp = 0;
     AudioEncoder::EncodedInfo info;
     do {
diff --git a/webrtc/modules/audio_coding/neteq/time_stretch.cc b/webrtc/modules/audio_coding/neteq/time_stretch.cc
index 5577cd2..6ae81e6 100644
--- a/webrtc/modules/audio_coding/neteq/time_stretch.cc
+++ b/webrtc/modules/audio_coding/neteq/time_stretch.cc
@@ -12,6 +12,7 @@
 
 #include <algorithm>  // min, max
 
+#include "webrtc/base/safe_conversions.h"
 #include "webrtc/base/scoped_ptr.h"
 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
 #include "webrtc/modules/audio_coding/neteq/background_noise.h"
@@ -23,9 +24,10 @@
                                               size_t input_len,
                                               bool fast_mode,
                                               AudioMultiVector* output,
-                                              int16_t* length_change_samples) {
+                                              size_t* length_change_samples) {
   // Pre-calculate common multiplication with |fs_mult_|.
-  int fs_mult_120 = fs_mult_ * 120;  // Corresponds to 15 ms.
+  size_t fs_mult_120 =
+      static_cast<size_t>(fs_mult_ * 120);  // Corresponds to 15 ms.
 
   const int16_t* signal;
   rtc::scoped_ptr<int16_t[]> signal_array;
@@ -48,8 +50,7 @@
   }
 
   // Find maximum absolute value of input signal.
-  max_input_value_ = WebRtcSpl_MaxAbsValueW16(signal,
-                                              static_cast<int>(signal_len));
+  max_input_value_ = WebRtcSpl_MaxAbsValueW16(signal, signal_len);
 
   // Downsample to 4 kHz sample rate and calculate auto-correlation.
   DspHelper::DownsampleTo4kHz(signal, signal_len, kDownsampledLen,
@@ -58,13 +59,12 @@
   AutoCorrelation();
 
   // Find the strongest correlation peak.
-  static const int kNumPeaks = 1;
-  int peak_index;
+  static const size_t kNumPeaks = 1;
+  size_t peak_index;
   int16_t peak_value;
   DspHelper::PeakDetection(auto_correlation_, kCorrelationLen, kNumPeaks,
                            fs_mult_, &peak_index, &peak_value);
   // Assert that |peak_index| stays within boundaries.
-  assert(peak_index >= 0);
   assert(peak_index <= (2 * kCorrelationLen - 1) * fs_mult_);
 
   // Compensate peak_index for displaced starting position. The displacement
@@ -73,13 +73,13 @@
   // multiplication by fs_mult_ * 2.
   peak_index += kMinLag * fs_mult_ * 2;
   // Assert that |peak_index| stays within boundaries.
-  assert(peak_index >= 20 * fs_mult_);
+  assert(peak_index >= static_cast<size_t>(20 * fs_mult_));
   assert(peak_index <= 20 * fs_mult_ + (2 * kCorrelationLen - 1) * fs_mult_);
 
   // Calculate scaling to ensure that |peak_index| samples can be square-summed
   // without overflowing.
   int scaling = 31 - WebRtcSpl_NormW32(max_input_value_ * max_input_value_) -
-      WebRtcSpl_NormW32(peak_index);
+      WebRtcSpl_NormW32(static_cast<int32_t>(peak_index));
   scaling = std::max(0, scaling);
 
   // |vec1| starts at 15 ms minus one pitch period.
@@ -177,7 +177,7 @@
 }
 
 bool TimeStretch::SpeechDetection(int32_t vec1_energy, int32_t vec2_energy,
-                                  int peak_index, int scaling) const {
+                                  size_t peak_index, int scaling) const {
   // Check if the signal seems to be active speech or not (simple VAD).
   // If (vec1_energy + vec2_energy) / (2 * peak_index) <=
   // 8 * background_noise_energy, then we say that the signal contains no
@@ -197,7 +197,8 @@
   int right_scale = 16 - WebRtcSpl_NormW32(right_side);
   right_scale = std::max(0, right_scale);
   left_side = left_side >> right_scale;
-  right_side = peak_index * (right_side >> right_scale);
+  right_side =
+      rtc::checked_cast<int32_t>(peak_index) * (right_side >> right_scale);
 
   // Scale |left_side| properly before comparing with |right_side|.
   // (|scaling| is the scale factor before energy calculation, thus the scale
diff --git a/webrtc/modules/audio_coding/neteq/time_stretch.h b/webrtc/modules/audio_coding/neteq/time_stretch.h
index 7c84e1a..14383d8 100644
--- a/webrtc/modules/audio_coding/neteq/time_stretch.h
+++ b/webrtc/modules/audio_coding/neteq/time_stretch.h
@@ -39,7 +39,7 @@
               const BackgroundNoise& background_noise)
       : sample_rate_hz_(sample_rate_hz),
         fs_mult_(sample_rate_hz / 8000),
-        num_channels_(static_cast<int>(num_channels)),
+        num_channels_(num_channels),
         master_channel_(0),  // First channel is master.
         background_noise_(background_noise),
         max_input_value_(0) {
@@ -48,7 +48,7 @@
            sample_rate_hz_ == 32000 ||
            sample_rate_hz_ == 48000);
     assert(num_channels_ > 0);
-    assert(static_cast<int>(master_channel_) < num_channels_);
+    assert(master_channel_ < num_channels_);
     memset(auto_correlation_, 0, sizeof(auto_correlation_));
   }
 
@@ -60,7 +60,7 @@
                       size_t input_len,
                       bool fast_mode,
                       AudioMultiVector* output,
-                      int16_t* length_change_samples);
+                      size_t* length_change_samples);
 
  protected:
   // Sets the parameters |best_correlation| and |peak_index| to suitable
@@ -68,7 +68,7 @@
   // implemented by the sub-classes.
   virtual void SetParametersForPassiveSpeech(size_t input_length,
                                              int16_t* best_correlation,
-                                             int* peak_index) const = 0;
+                                             size_t* peak_index) const = 0;
 
   // Checks the criteria for performing the time-stretching operation and,
   // if possible, performs the time-stretching. This method must be implemented
@@ -82,16 +82,16 @@
       bool fast_mode,
       AudioMultiVector* output) const = 0;
 
-  static const int kCorrelationLen = 50;
-  static const int kLogCorrelationLen = 6;  // >= log2(kCorrelationLen).
-  static const int kMinLag = 10;
-  static const int kMaxLag = 60;
-  static const int kDownsampledLen = kCorrelationLen + kMaxLag;
+  static const size_t kCorrelationLen = 50;
+  static const size_t kLogCorrelationLen = 6;  // >= log2(kCorrelationLen).
+  static const size_t kMinLag = 10;
+  static const size_t kMaxLag = 60;
+  static const size_t kDownsampledLen = kCorrelationLen + kMaxLag;
   static const int kCorrelationThreshold = 14746;  // 0.9 in Q14.
 
   const int sample_rate_hz_;
   const int fs_mult_;  // Sample rate multiplier = sample_rate_hz_ / 8000.
-  const int num_channels_;
+  const size_t num_channels_;
   const size_t master_channel_;
   const BackgroundNoise& background_noise_;
   int16_t max_input_value_;
@@ -107,7 +107,7 @@
 
   // Performs a simple voice-activity detection based on the input parameters.
   bool SpeechDetection(int32_t vec1_energy, int32_t vec2_energy,
-                       int peak_index, int scaling) const;
+                       size_t peak_index, int scaling) const;
 
   DISALLOW_COPY_AND_ASSIGN(TimeStretch);
 };
diff --git a/webrtc/modules/audio_coding/neteq/time_stretch_unittest.cc b/webrtc/modules/audio_coding/neteq/time_stretch_unittest.cc
index 05385a1..cbe4b04 100644
--- a/webrtc/modules/audio_coding/neteq/time_stretch_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/time_stretch_unittest.cc
@@ -75,12 +75,12 @@
 
   // Returns the total length change (in samples) that the accelerate operation
   // resulted in during the run.
-  int TestAccelerate(int loops, bool fast_mode) {
+  size_t TestAccelerate(size_t loops, bool fast_mode) {
     Accelerate accelerate(sample_rate_hz_, kNumChannels, background_noise_);
-    int total_length_change = 0;
-    for (int i = 0; i < loops; ++i) {
+    size_t total_length_change = 0;
+    for (size_t i = 0; i < loops; ++i) {
       AudioMultiVector output(kNumChannels);
-      int16_t length_change;
+      size_t length_change;
       UpdateReturnStats(accelerate.Process(Next30Ms(), block_size_, fast_mode,
                                            &output, &length_change));
       total_length_change += length_change;
@@ -110,7 +110,7 @@
 
 TEST_F(TimeStretchTest, Accelerate) {
   // TestAccelerate returns the total length change in samples.
-  EXPECT_EQ(15268, TestAccelerate(100, false));
+  EXPECT_EQ(15268U, TestAccelerate(100, false));
   EXPECT_EQ(9, return_stats_[TimeStretch::kSuccess]);
   EXPECT_EQ(58, return_stats_[TimeStretch::kSuccessLowEnergy]);
   EXPECT_EQ(33, return_stats_[TimeStretch::kNoStretch]);
@@ -118,7 +118,7 @@
 
 TEST_F(TimeStretchTest, AccelerateFastMode) {
   // TestAccelerate returns the total length change in samples.
-  EXPECT_EQ(21400, TestAccelerate(100, true));
+  EXPECT_EQ(21400U, TestAccelerate(100, true));
   EXPECT_EQ(31, return_stats_[TimeStretch::kSuccess]);
   EXPECT_EQ(58, return_stats_[TimeStretch::kSuccessLowEnergy]);
   EXPECT_EQ(11, return_stats_[TimeStretch::kNoStretch]);
diff --git a/webrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc b/webrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc
index af4b8e1..016acde 100644
--- a/webrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc
@@ -31,8 +31,8 @@
       seq_number_(0),
       timestamp_(0),
       payload_ssrc_(0xABCD1234) {
-  int encoded_len = WebRtcPcm16b_Encode(&sample_value, 1, encoded_sample_);
-  CHECK_EQ(2, encoded_len);
+  size_t encoded_len = WebRtcPcm16b_Encode(&sample_value, 1, encoded_sample_);
+  CHECK_EQ(2U, encoded_len);
 }
 
 Packet* ConstantPcmPacketSource::NextPacket() {
diff --git a/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc b/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc
index 52c34bb..49750c2 100644
--- a/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc
@@ -43,11 +43,11 @@
           rtp_header, payload, payload_size_bytes, receive_timestamp));
 }
 
-int NetEqExternalDecoderTest::GetOutputAudio(size_t max_length,
-                                             int16_t* output,
-                                             NetEqOutputType* output_type) {
+size_t NetEqExternalDecoderTest::GetOutputAudio(size_t max_length,
+                                                int16_t* output,
+                                                NetEqOutputType* output_type) {
   // Get audio from regular instance.
-  int samples_per_channel;
+  size_t samples_per_channel;
   int num_channels;
   EXPECT_EQ(NetEq::kOK,
             neteq_->GetAudio(max_length,
@@ -56,7 +56,8 @@
                              &num_channels,
                              output_type));
   EXPECT_EQ(channels_, num_channels);
-  EXPECT_EQ(kOutputLengthMs * sample_rate_hz_ / 1000, samples_per_channel);
+  EXPECT_EQ(static_cast<size_t>(kOutputLengthMs * sample_rate_hz_ / 1000),
+            samples_per_channel);
   return samples_per_channel;
 }
 
diff --git a/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h b/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h
index 0d4d2f9..202d1f3 100644
--- a/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h
+++ b/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h
@@ -42,8 +42,8 @@
 
   // Get 10 ms of audio data. The data is written to |output|, which can hold
   // (at least) |max_length| elements. Returns number of samples.
-  int GetOutputAudio(size_t max_length, int16_t* output,
-                     NetEqOutputType* output_type);
+  size_t GetOutputAudio(size_t max_length, int16_t* output,
+                        NetEqOutputType* output_type);
 
   NetEq* neteq() { return neteq_.get(); }
 
diff --git a/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc b/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc
index 1c76d76..57397e1 100644
--- a/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc
@@ -101,19 +101,19 @@
 
     // Get output audio, but don't do anything with it.
     static const int kMaxChannels = 1;
-    static const int kMaxSamplesPerMs = 48000 / 1000;
+    static const size_t kMaxSamplesPerMs = 48000 / 1000;
     static const int kOutputBlockSizeMs = 10;
-    static const int kOutDataLen =
+    static const size_t kOutDataLen =
         kOutputBlockSizeMs * kMaxSamplesPerMs * kMaxChannels;
     int16_t out_data[kOutDataLen];
     int num_channels;
-    int samples_per_channel;
+    size_t samples_per_channel;
     int error = neteq->GetAudio(kOutDataLen, out_data, &samples_per_channel,
                                 &num_channels, NULL);
     if (error != NetEq::kOK)
       return -1;
 
-    assert(samples_per_channel == kSampRateHz * 10 / 1000);
+    assert(samples_per_channel == static_cast<size_t>(kSampRateHz * 10 / 1000));
 
     time_now_ms += kOutputBlockSizeMs;
     if (time_now_ms >= runtime_ms / 2 && !drift_flipped) {
diff --git a/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc b/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc
index c60b993..1c028c9 100644
--- a/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc
@@ -218,8 +218,9 @@
       block_duration_ms_(block_duration_ms),
       in_sampling_khz_(in_sampling_khz),
       out_sampling_khz_(out_sampling_khz),
-      in_size_samples_(in_sampling_khz_ * block_duration_ms_),
-      out_size_samples_(out_sampling_khz_ * kOutputSizeMs),
+      in_size_samples_(
+          static_cast<size_t>(in_sampling_khz_ * block_duration_ms_)),
+      out_size_samples_(static_cast<size_t>(out_sampling_khz_ * kOutputSizeMs)),
       payload_size_bytes_(0),
       max_payload_bytes_(0),
       in_file_(new ResampleInputAudioFile(FLAGS_in_filename,
@@ -392,7 +393,7 @@
 
 int NetEqQualityTest::DecodeBlock() {
   int channels;
-  int samples;
+  size_t samples;
   int ret = neteq_->GetAudio(out_size_samples_ * channels_, &out_data_[0],
                              &samples, &channels, NULL);
 
@@ -400,9 +401,9 @@
     return -1;
   } else {
     assert(channels == channels_);
-    assert(samples == kOutputSizeMs * out_sampling_khz_);
+    assert(samples == static_cast<size_t>(kOutputSizeMs * out_sampling_khz_));
     CHECK(output_->WriteArray(out_data_.get(), samples * channels));
-    return samples;
+    return static_cast<int>(samples);
   }
 }
 
diff --git a/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.h b/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.h
index 4a0d808..ba87dbf 100644
--- a/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.h
+++ b/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.h
@@ -76,8 +76,8 @@
   // |block_size_samples| (samples per channel),
   // 2. save the bit stream to |payload| of |max_bytes| bytes in size,
   // 3. returns the length of the payload (in bytes),
-  virtual int EncodeBlock(int16_t* in_data, int block_size_samples,
-                          uint8_t* payload, int max_bytes) = 0;
+  virtual int EncodeBlock(int16_t* in_data, size_t block_size_samples,
+                          uint8_t* payload, size_t max_bytes) = 0;
 
   // PacketLost(...) determines weather a packet sent at an indicated time gets
   // lost or not.
@@ -111,13 +111,13 @@
   const int out_sampling_khz_;
 
   // Number of samples per channel in a frame.
-  const int in_size_samples_;
+  const size_t in_size_samples_;
 
   // Expected output number of samples per channel in a frame.
-  const int out_size_samples_;
+  const size_t out_size_samples_;
 
   size_t payload_size_bytes_;
-  int max_payload_bytes_;
+  size_t max_payload_bytes_;
 
   rtc::scoped_ptr<InputAudioFile> in_file_;
   rtc::scoped_ptr<AudioSink> output_;
diff --git a/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc b/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc
index 6bcd717..1c08078 100644
--- a/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc
@@ -23,6 +23,7 @@
 
 #include "google/gflags.h"
 #include "webrtc/base/checks.h"
+#include "webrtc/base/safe_conversions.h"
 #include "webrtc/base/scoped_ptr.h"
 #include "webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h"
 #include "webrtc/modules/audio_coding/neteq/interface/neteq.h"
@@ -324,7 +325,7 @@
     // Encode it as PCM16.
     assert((*payload).get());
     payload_len = WebRtcPcm16b_Encode((*replacement_audio).get(),
-                                      static_cast<int16_t>(*frame_size_samples),
+                                      *frame_size_samples,
                                       (*payload).get());
     assert(payload_len == 2 * *frame_size_samples);
     // Change payload type to PCM16.
@@ -358,7 +359,7 @@
 
 int main(int argc, char* argv[]) {
   static const int kMaxChannels = 5;
-  static const int kMaxSamplesPerMs = 48000 / 1000;
+  static const size_t kMaxSamplesPerMs = 48000 / 1000;
   static const int kOutputBlockSizeMs = 10;
 
   std::string program_name = argv[0];
@@ -552,11 +553,11 @@
 
     // Check if it is time to get output audio.
     if (time_now_ms >= next_output_time_ms) {
-      static const int kOutDataLen =
+      static const size_t kOutDataLen =
           kOutputBlockSizeMs * kMaxSamplesPerMs * kMaxChannels;
       int16_t out_data[kOutDataLen];
       int num_channels;
-      int samples_per_channel;
+      size_t samples_per_channel;
       int error = neteq->GetAudio(kOutDataLen, out_data, &samples_per_channel,
                                    &num_channels, NULL);
       if (error != NetEq::kOK) {
@@ -564,7 +565,8 @@
             neteq->LastError() << std::endl;
       } else {
         // Calculate sample rate from output size.
-        sample_rate_hz = 1000 * samples_per_channel / kOutputBlockSizeMs;
+        sample_rate_hz = rtc::checked_cast<int>(
+            1000 * samples_per_channel / kOutputBlockSizeMs);
       }
 
       // Write to file.
diff --git a/webrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.cc b/webrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.cc
index 47450bc..d69918b 100644
--- a/webrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.cc
@@ -26,14 +26,11 @@
   if (!InputAudioFile::Read(samples_to_read, temp_destination.get()))
     return false;
   resampler_.ResetIfNeeded(file_rate_hz_, output_rate_hz, 1);
-  int output_length = 0;
-  CHECK_EQ(resampler_.Push(temp_destination.get(),
-                           static_cast<int>(samples_to_read),
-                           destination,
-                           static_cast<int>(samples),
-                           output_length),
+  size_t output_length = 0;
+  CHECK_EQ(resampler_.Push(temp_destination.get(), samples_to_read, destination,
+                           samples, output_length),
            0);
-  CHECK_EQ(static_cast<int>(samples), output_length);
+  CHECK_EQ(samples, output_length);
   return true;
 }
 
diff --git a/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc b/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc
index ad491fb..490fe58 100644
--- a/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc
+++ b/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc
@@ -287,7 +287,7 @@
                                 AudioFrame::kNormalSpeech,
                                 AudioFrame::kVadPassive, num_mixed_channels);
 
-        _timeStamp += _sampleSize;
+        _timeStamp += static_cast<uint32_t>(_sampleSize);
 
         // We only use the limiter if it supports the output sample rate and
         // we're actually mixing multiple streams.
@@ -357,7 +357,8 @@
     CriticalSectionScoped cs(_crit.get());
 
     _outputFrequency = frequency;
-    _sampleSize = (_outputFrequency*kProcessPeriodicityInMs) / 1000;
+    _sampleSize =
+        static_cast<size_t>((_outputFrequency*kProcessPeriodicityInMs) / 1000);
 
     return 0;
 }
diff --git a/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h b/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h
index 2e02448..14b15da 100644
--- a/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h
+++ b/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h
@@ -160,7 +160,7 @@
 
     // The current sample frequency and sample size when mixing.
     Frequency _outputFrequency;
-    uint16_t _sampleSize;
+    size_t _sampleSize;
 
     // Memory pool to avoid allocating/deallocating AudioFrames
     MemoryPool<AudioFrame>* _audioFramePool;
diff --git a/webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.cc b/webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.cc
index 3dce5c8..636698e 100644
--- a/webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.cc
+++ b/webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.cc
@@ -35,14 +35,14 @@
                            0.8608f, 0.8734f, 0.8861f, 0.8987f,
                            0.9114f, 0.9241f, 0.9367f, 0.9494f,
                            0.9620f, 0.9747f, 0.9873f, 1.0000f};
-const int rampSize = sizeof(rampArray)/sizeof(rampArray[0]);
+const size_t rampSize = sizeof(rampArray)/sizeof(rampArray[0]);
 }  // namespace
 
 namespace webrtc {
 void CalculateEnergy(AudioFrame& audioFrame)
 {
     audioFrame.energy_ = 0;
-    for(int position = 0; position < audioFrame.samples_per_channel_;
+    for(size_t position = 0; position < audioFrame.samples_per_channel_;
         position++)
     {
         // TODO(andrew): this can easily overflow.
@@ -54,7 +54,7 @@
 void RampIn(AudioFrame& audioFrame)
 {
     assert(rampSize <= audioFrame.samples_per_channel_);
-    for(int i = 0; i < rampSize; i++)
+    for(size_t i = 0; i < rampSize; i++)
     {
         audioFrame.data_[i] = static_cast<int16_t>(rampArray[i] *
                                                    audioFrame.data_[i]);
@@ -64,9 +64,9 @@
 void RampOut(AudioFrame& audioFrame)
 {
     assert(rampSize <= audioFrame.samples_per_channel_);
-    for(int i = 0; i < rampSize; i++)
+    for(size_t i = 0; i < rampSize; i++)
     {
-        const int rampPos = rampSize - 1 - i;
+        const size_t rampPos = rampSize - 1 - i;
         audioFrame.data_[i] = static_cast<int16_t>(rampArray[rampPos] *
                                                    audioFrame.data_[i]);
     }
diff --git a/webrtc/modules/audio_device/android/audio_common.h b/webrtc/modules/audio_device/android/audio_common.h
index cb25983..4eecae4 100644
--- a/webrtc/modules/audio_device/android/audio_common.h
+++ b/webrtc/modules/audio_device/android/audio_common.h
@@ -13,22 +13,19 @@
 
 namespace webrtc {
 
-enum {
-  kDefaultSampleRate = 44100,
-  kNumChannels = 1,
-  // Number of bytes per audio frame.
-  // Example: 16-bit PCM in mono => 1*(16/8)=2 [bytes/frame]
-  kBytesPerFrame = kNumChannels * (16 / 8),
-  // Delay estimates for the two different supported modes. These values
-  // are based on real-time round-trip delay estimates on a large set of
-  // devices and they are lower bounds since the filter length is 128 ms,
-  // so the AEC works for delays in the range [50, ~170] ms and [150, ~270] ms.
-  // Note that, in most cases, the lowest delay estimate will not be utilized
-  // since devices that support low-latency output audio often supports
-  // HW AEC as well.
-  kLowLatencyModeDelayEstimateInMilliseconds = 50,
-  kHighLatencyModeDelayEstimateInMilliseconds = 150,
-};
+const int kDefaultSampleRate = 44100;
+const int kNumChannels = 1;
+// Number of bytes per audio frame.
+// Example: 16-bit PCM in mono => 1*(16/8)=2 [bytes/frame]
+const size_t kBytesPerFrame = kNumChannels * (16 / 8);
+// Delay estimates for the two different supported modes. These values are based
+// on real-time round-trip delay estimates on a large set of devices and they
+// are lower bounds since the filter length is 128 ms, so the AEC works for
+// delays in the range [50, ~170] ms and [150, ~270] ms. Note that, in most
+// cases, the lowest delay estimate will not be utilized since devices that
+// support low-latency output audio often supports HW AEC as well.
+const int kLowLatencyModeDelayEstimateInMilliseconds = 50;
+const int kHighLatencyModeDelayEstimateInMilliseconds = 150;
 
 }  // namespace webrtc
 
diff --git a/webrtc/modules/audio_device/android/audio_device_unittest.cc b/webrtc/modules/audio_device/android/audio_device_unittest.cc
index 0aef6f9..9440d50 100644
--- a/webrtc/modules/audio_device/android/audio_device_unittest.cc
+++ b/webrtc/modules/audio_device/android/audio_device_unittest.cc
@@ -19,6 +19,7 @@
 #include "testing/gtest/include/gtest/gtest.h"
 #include "webrtc/base/arraysize.h"
 #include "webrtc/base/criticalsection.h"
+#include "webrtc/base/format_macros.h"
 #include "webrtc/base/scoped_ptr.h"
 #include "webrtc/base/scoped_ref_ptr.h"
 #include "webrtc/modules/audio_device/android/audio_common.h"
@@ -55,22 +56,22 @@
 
 // Number of callbacks (input or output) the tests waits for before we set
 // an event indicating that the test was OK.
-static const int kNumCallbacks = 10;
+static const size_t kNumCallbacks = 10;
 // Max amount of time we wait for an event to be set while counting callbacks.
 static const int kTestTimeOutInMilliseconds = 10 * 1000;
 // Average number of audio callbacks per second assuming 10ms packet size.
-static const int kNumCallbacksPerSecond = 100;
+static const size_t kNumCallbacksPerSecond = 100;
 // Play out a test file during this time (unit is in seconds).
 static const int kFilePlayTimeInSec = 5;
-static const int kBitsPerSample = 16;
-static const int kBytesPerSample = kBitsPerSample / 8;
+static const size_t kBitsPerSample = 16;
+static const size_t kBytesPerSample = kBitsPerSample / 8;
 // Run the full-duplex test during this time (unit is in seconds).
 // Note that first |kNumIgnoreFirstCallbacks| are ignored.
 static const int kFullDuplexTimeInSec = 5;
 // Wait for the callback sequence to stabilize by ignoring this amount of the
 // initial callbacks (avoids initial FIFO access).
 // Only used in the RunPlayoutAndRecordingInFullDuplex test.
-static const int kNumIgnoreFirstCallbacks = 50;
+static const size_t kNumIgnoreFirstCallbacks = 50;
 // Sets the number of impulses per second in the latency test.
 static const int kImpulseFrequencyInHz = 1;
 // Length of round-trip latency measurements. Number of transmitted impulses
@@ -90,8 +91,8 @@
 // measurements.
 class AudioStreamInterface {
  public:
-  virtual void Write(const void* source, int num_frames) = 0;
-  virtual void Read(void* destination, int num_frames) = 0;
+  virtual void Write(const void* source, size_t num_frames) = 0;
+  virtual void Read(void* destination, size_t num_frames) = 0;
  protected:
   virtual ~AudioStreamInterface() {}
 };
@@ -101,7 +102,7 @@
 class FileAudioStream : public AudioStreamInterface {
  public:
   FileAudioStream(
-      int num_callbacks, const std::string& file_name, int sample_rate)
+      size_t num_callbacks, const std::string& file_name, int sample_rate)
       : file_size_in_bytes_(0),
         sample_rate_(sample_rate),
         file_pos_(0) {
@@ -109,23 +110,23 @@
     sample_rate_ = sample_rate;
     EXPECT_GE(file_size_in_callbacks(), num_callbacks)
         << "Size of test file is not large enough to last during the test.";
-    const int num_16bit_samples =
+    const size_t num_16bit_samples =
         test::GetFileSize(file_name) / kBytesPerSample;
     file_.reset(new int16_t[num_16bit_samples]);
     FILE* audio_file = fopen(file_name.c_str(), "rb");
     EXPECT_NE(audio_file, nullptr);
-    int num_samples_read = fread(
+    size_t num_samples_read = fread(
         file_.get(), sizeof(int16_t), num_16bit_samples, audio_file);
     EXPECT_EQ(num_samples_read, num_16bit_samples);
     fclose(audio_file);
   }
 
   // AudioStreamInterface::Write() is not implemented.
-  void Write(const void* source, int num_frames) override {}
+  void Write(const void* source, size_t num_frames) override {}
 
   // Read samples from file stored in memory (at construction) and copy
   // |num_frames| (<=> 10ms) to the |destination| byte buffer.
-  void Read(void* destination, int num_frames) override {
+  void Read(void* destination, size_t num_frames) override {
     memcpy(destination,
            static_cast<int16_t*> (&file_[file_pos_]),
            num_frames * sizeof(int16_t));
@@ -133,17 +134,18 @@
   }
 
   int file_size_in_seconds() const {
-    return (file_size_in_bytes_ / (kBytesPerSample * sample_rate_));
+    return static_cast<int>(
+        file_size_in_bytes_ / (kBytesPerSample * sample_rate_));
   }
-  int file_size_in_callbacks() const {
+  size_t file_size_in_callbacks() const {
     return file_size_in_seconds() * kNumCallbacksPerSecond;
   }
 
  private:
-  int file_size_in_bytes_;
+  size_t file_size_in_bytes_;
   int sample_rate_;
   rtc::scoped_ptr<int16_t[]> file_;
-  int file_pos_;
+  size_t file_pos_;
 };
 
 // Simple first in first out (FIFO) class that wraps a list of 16-bit audio
@@ -156,7 +158,7 @@
 // since both sides (playout and recording) are driven by its own thread.
 class FifoAudioStream : public AudioStreamInterface {
  public:
-  explicit FifoAudioStream(int frames_per_buffer)
+  explicit FifoAudioStream(size_t frames_per_buffer)
       : frames_per_buffer_(frames_per_buffer),
         bytes_per_buffer_(frames_per_buffer_ * sizeof(int16_t)),
         fifo_(new AudioBufferList),
@@ -173,7 +175,7 @@
   // Allocate new memory, copy |num_frames| samples from |source| into memory
   // and add pointer to the memory location to end of the list.
   // Increases the size of the FIFO by one element.
-  void Write(const void* source, int num_frames) override {
+  void Write(const void* source, size_t num_frames) override {
     ASSERT_EQ(num_frames, frames_per_buffer_);
     PRINTD("+");
     if (write_count_++ < kNumIgnoreFirstCallbacks) {
@@ -185,10 +187,10 @@
            bytes_per_buffer_);
     rtc::CritScope lock(&lock_);
     fifo_->push_back(memory);
-    const int size = fifo_->size();
+    const size_t size = fifo_->size();
     if (size > largest_size_) {
       largest_size_ = size;
-      PRINTD("(%d)", largest_size_);
+      PRINTD("(%" PRIuS ")", largest_size_);
     }
     total_written_elements_ += size;
   }
@@ -196,7 +198,7 @@
   // Read pointer to data buffer from front of list, copy |num_frames| of stored
   // data into |destination| and delete the utilized memory allocation.
   // Decreases the size of the FIFO by one element.
-  void Read(void* destination, int num_frames) override {
+  void Read(void* destination, size_t num_frames) override {
     ASSERT_EQ(num_frames, frames_per_buffer_);
     PRINTD("-");
     rtc::CritScope lock(&lock_);
@@ -212,15 +214,15 @@
     }
   }
 
-  int size() const {
+  size_t size() const {
     return fifo_->size();
   }
 
-  int largest_size() const {
+  size_t largest_size() const {
     return largest_size_;
   }
 
-  int average_size() const {
+  size_t average_size() const {
     return (total_written_elements_ == 0) ? 0.0 : 0.5 + static_cast<float> (
       total_written_elements_) / (write_count_ - kNumIgnoreFirstCallbacks);
   }
@@ -235,12 +237,12 @@
 
   using AudioBufferList = std::list<int16_t*>;
   rtc::CriticalSection lock_;
-  const int frames_per_buffer_;
-  const int bytes_per_buffer_;
+  const size_t frames_per_buffer_;
+  const size_t bytes_per_buffer_;
   rtc::scoped_ptr<AudioBufferList> fifo_;
-  int largest_size_;
-  int total_written_elements_;
-  int write_count_;
+  size_t largest_size_;
+  size_t total_written_elements_;
+  size_t write_count_;
 };
 
 // Inserts periodic impulses and measures the latency between the time of
@@ -249,7 +251,7 @@
 // See http://source.android.com/devices/audio/loopback.html for details.
 class LatencyMeasuringAudioStream : public AudioStreamInterface {
  public:
-  explicit LatencyMeasuringAudioStream(int frames_per_buffer)
+  explicit LatencyMeasuringAudioStream(size_t frames_per_buffer)
       : clock_(Clock::GetRealTimeClock()),
         frames_per_buffer_(frames_per_buffer),
         bytes_per_buffer_(frames_per_buffer_ * sizeof(int16_t)),
@@ -259,7 +261,7 @@
   }
 
   // Insert periodic impulses in first two samples of |destination|.
-  void Read(void* destination, int num_frames) override {
+  void Read(void* destination, size_t num_frames) override {
     ASSERT_EQ(num_frames, frames_per_buffer_);
     if (play_count_ == 0) {
       PRINT("[");
@@ -273,15 +275,15 @@
       PRINT(".");
       const int16_t impulse = std::numeric_limits<int16_t>::max();
       int16_t* ptr16 = static_cast<int16_t*> (destination);
-      for (int i = 0; i < 2; ++i) {
-        *ptr16++ = impulse;
+      for (size_t i = 0; i < 2; ++i) {
+        ptr16[i] = impulse;
       }
     }
   }
 
   // Detect received impulses in |source|, derive time between transmission and
   // detection and add the calculated delay to list of latencies.
-  void Write(const void* source, int num_frames) override {
+  void Write(const void* source, size_t num_frames) override {
     ASSERT_EQ(num_frames, frames_per_buffer_);
     rec_count_++;
     if (pulse_time_ == 0) {
@@ -315,7 +317,7 @@
     }
   }
 
-  int num_latency_values() const {
+  size_t num_latency_values() const {
     return latencies_.size();
   }
 
@@ -355,10 +357,10 @@
 
  private:
   Clock* clock_;
-  const int frames_per_buffer_;
-  const int bytes_per_buffer_;
-  int play_count_;
-  int rec_count_;
+  const size_t frames_per_buffer_;
+  const size_t bytes_per_buffer_;
+  size_t play_count_;
+  size_t rec_count_;
   int64_t pulse_time_;
   std::vector<int> latencies_;
 };
@@ -379,8 +381,8 @@
 
   MOCK_METHOD10(RecordedDataIsAvailable,
                 int32_t(const void* audioSamples,
-                        const uint32_t nSamples,
-                        const uint8_t nBytesPerSample,
+                        const size_t nSamples,
+                        const size_t nBytesPerSample,
                         const uint8_t nChannels,
                         const uint32_t samplesPerSec,
                         const uint32_t totalDelayMS,
@@ -389,12 +391,12 @@
                         const bool keyPressed,
                         uint32_t& newMicLevel));
   MOCK_METHOD8(NeedMorePlayData,
-               int32_t(const uint32_t nSamples,
-                       const uint8_t nBytesPerSample,
+               int32_t(const size_t nSamples,
+                       const size_t nBytesPerSample,
                        const uint8_t nChannels,
                        const uint32_t samplesPerSec,
                        void* audioSamples,
-                       uint32_t& nSamplesOut,
+                       size_t& nSamplesOut,
                        int64_t* elapsed_time_ms,
                        int64_t* ntp_time_ms));
 
@@ -419,8 +421,8 @@
   }
 
   int32_t RealRecordedDataIsAvailable(const void* audioSamples,
-                                      const uint32_t nSamples,
-                                      const uint8_t nBytesPerSample,
+                                      const size_t nSamples,
+                                      const size_t nBytesPerSample,
                                       const uint8_t nChannels,
                                       const uint32_t samplesPerSec,
                                       const uint32_t totalDelayMS,
@@ -441,12 +443,12 @@
     return 0;
   }
 
-  int32_t RealNeedMorePlayData(const uint32_t nSamples,
-                               const uint8_t nBytesPerSample,
+  int32_t RealNeedMorePlayData(const size_t nSamples,
+                               const size_t nBytesPerSample,
                                const uint8_t nChannels,
                                const uint32_t samplesPerSec,
                                void* audioSamples,
-                               uint32_t& nSamplesOut,
+                               size_t& nSamplesOut,
                                int64_t* elapsed_time_ms,
                                int64_t* ntp_time_ms) {
     EXPECT_TRUE(play_mode()) << "No test is expecting these callbacks.";
@@ -484,10 +486,10 @@
 
  private:
   EventWrapper* test_is_done_;
-  int num_callbacks_;
+  size_t num_callbacks_;
   int type_;
-  int play_count_;
-  int rec_count_;
+  size_t play_count_;
+  size_t rec_count_;
   AudioStreamInterface* audio_stream_;
   rtc::scoped_ptr<LatencyMeasuringAudioStream> latency_audio_stream_;
 };
@@ -525,10 +527,10 @@
   int record_channels() const {
     return record_parameters_.channels();
   }
-  int playout_frames_per_10ms_buffer() const {
+  size_t playout_frames_per_10ms_buffer() const {
     return playout_parameters_.frames_per_10ms_buffer();
   }
-  int record_frames_per_10ms_buffer() const {
+  size_t record_frames_per_10ms_buffer() const {
     return record_parameters_.frames_per_10ms_buffer();
   }
 
@@ -576,12 +578,14 @@
     EXPECT_TRUE(test::FileExists(file_name));
 #ifdef ENABLE_PRINTF
     PRINT("file name: %s\n", file_name.c_str());
-    const int bytes = test::GetFileSize(file_name);
-    PRINT("file size: %d [bytes]\n", bytes);
-    PRINT("file size: %d [samples]\n", bytes / kBytesPerSample);
-    const int seconds = bytes / (sample_rate * kBytesPerSample);
+    const size_t bytes = test::GetFileSize(file_name);
+    PRINT("file size: %" PRIuS " [bytes]\n", bytes);
+    PRINT("file size: %" PRIuS " [samples]\n", bytes / kBytesPerSample);
+    const int seconds =
+        static_cast<int>(bytes / (sample_rate * kBytesPerSample));
     PRINT("file size: %d [secs]\n", seconds);
-    PRINT("file size: %d [callbacks]\n", seconds * kNumCallbacksPerSecond);
+    PRINT("file size: %" PRIuS " [callbacks]\n",
+          seconds * kNumCallbacksPerSecond);
 #endif
     return file_name;
   }
@@ -961,8 +965,8 @@
                                1000 * kFullDuplexTimeInSec));
   StopPlayout();
   StopRecording();
-  EXPECT_LE(fifo_audio_stream->average_size(), 10);
-  EXPECT_LE(fifo_audio_stream->largest_size(), 20);
+  EXPECT_LE(fifo_audio_stream->average_size(), 10u);
+  EXPECT_LE(fifo_audio_stream->largest_size(), 20u);
 }
 
 // Measures loopback latency and reports the min, max and average values for
@@ -994,7 +998,8 @@
   StopRecording();
   // Verify that the correct number of transmitted impulses are detected.
   EXPECT_EQ(latency_audio_stream->num_latency_values(),
-            kImpulseFrequencyInHz * kMeasureLatencyTimeInSec - 1);
+            static_cast<size_t>(
+                kImpulseFrequencyInHz * kMeasureLatencyTimeInSec - 1));
   latency_audio_stream->PrintResults();
 }
 
diff --git a/webrtc/modules/audio_device/android/audio_manager_unittest.cc b/webrtc/modules/audio_device/android/audio_manager_unittest.cc
index f790e6a..a5af8b0 100644
--- a/webrtc/modules/audio_device/android/audio_manager_unittest.cc
+++ b/webrtc/modules/audio_device/android/audio_manager_unittest.cc
@@ -110,10 +110,10 @@
   EXPECT_EQ(0, params.sample_rate());
   EXPECT_EQ(0, params.channels());
   EXPECT_EQ(0, params.frames_per_buffer());
-  EXPECT_EQ(0, params.frames_per_10ms_buffer());
+  EXPECT_EQ(0U, params.frames_per_10ms_buffer());
   EXPECT_EQ(0, params.GetBytesPerFrame());
   EXPECT_EQ(0, params.GetBytesPerBuffer());
-  EXPECT_EQ(0, params.GetBytesPer10msBuffer());
+  EXPECT_EQ(0U, params.GetBytesPer10msBuffer());
   EXPECT_EQ(0.0f, params.GetBufferSizeInMilliseconds());
 }
 
@@ -122,7 +122,7 @@
   const int kSampleRate = 48000;
   const int kChannels = 1;
   const int kFramesPerBuffer = 480;
-  const int kFramesPer10msBuffer = 480;
+  const size_t kFramesPer10msBuffer = 480;
   const int kBytesPerFrame = 2;
   const float kBufferSizeInMs = 10.0f;
   AudioParameters params(kSampleRate, kChannels, kFramesPerBuffer);
@@ -130,7 +130,8 @@
   EXPECT_EQ(kSampleRate, params.sample_rate());
   EXPECT_EQ(kChannels, params.channels());
   EXPECT_EQ(kFramesPerBuffer, params.frames_per_buffer());
-  EXPECT_EQ(kSampleRate / 100, params.frames_per_10ms_buffer());
+  EXPECT_EQ(static_cast<size_t>(kSampleRate / 100),
+            params.frames_per_10ms_buffer());
   EXPECT_EQ(kBytesPerFrame, params.GetBytesPerFrame());
   EXPECT_EQ(kBytesPerFrame * kFramesPerBuffer, params.GetBytesPerBuffer());
   EXPECT_EQ(kBytesPerFrame * kFramesPer10msBuffer,
diff --git a/webrtc/modules/audio_device/android/audio_record_jni.cc b/webrtc/modules/audio_device/android/audio_record_jni.cc
index 637f5f8..c9d0f99 100644
--- a/webrtc/modules/audio_device/android/audio_record_jni.cc
+++ b/webrtc/modules/audio_device/android/audio_record_jni.cc
@@ -14,6 +14,7 @@
 
 #include "webrtc/base/arraysize.h"
 #include "webrtc/base/checks.h"
+#include "webrtc/base/format_macros.h"
 #include "webrtc/modules/audio_device/android/audio_common.h"
 
 #define TAG "AudioRecordJni"
@@ -122,8 +123,8 @@
     ALOGE("InitRecording failed!");
     return -1;
   }
-  frames_per_buffer_ = frames_per_buffer;
-  ALOGD("frames_per_buffer: %d", frames_per_buffer_);
+  frames_per_buffer_ = static_cast<size_t>(frames_per_buffer);
+  ALOGD("frames_per_buffer: %" PRIuS, frames_per_buffer_);
   CHECK_EQ(direct_buffer_capacity_in_bytes_,
            frames_per_buffer_ * kBytesPerFrame);
   CHECK_EQ(frames_per_buffer_, audio_parameters_.frames_per_10ms_buffer());
@@ -200,7 +201,7 @@
       env->GetDirectBufferAddress(byte_buffer);
   jlong capacity = env->GetDirectBufferCapacity(byte_buffer);
   ALOGD("direct buffer capacity: %lld", capacity);
-  direct_buffer_capacity_in_bytes_ = static_cast<int> (capacity);
+  direct_buffer_capacity_in_bytes_ = static_cast<size_t>(capacity);
 }
 
 void JNICALL AudioRecordJni::DataIsRecorded(
diff --git a/webrtc/modules/audio_device/android/audio_record_jni.h b/webrtc/modules/audio_device/android/audio_record_jni.h
index 1a2bd9d..6a17eb3 100644
--- a/webrtc/modules/audio_device/android/audio_record_jni.h
+++ b/webrtc/modules/audio_device/android/audio_record_jni.h
@@ -135,13 +135,13 @@
   void* direct_buffer_address_;
 
   // Number of bytes in the direct audio buffer owned by |j_audio_record_|.
-  int direct_buffer_capacity_in_bytes_;
+  size_t direct_buffer_capacity_in_bytes_;
 
   // Number audio frames per audio buffer. Each audio frame corresponds to
   // one sample of PCM mono data at 16 bits per sample. Hence, each audio
   // frame contains 2 bytes (given that the Java layer only supports mono).
   // Example: 480 for 48000 Hz or 441 for 44100 Hz.
-  int frames_per_buffer_;
+  size_t frames_per_buffer_;
 
   bool initialized_;
 
diff --git a/webrtc/modules/audio_device/android/audio_track_jni.cc b/webrtc/modules/audio_device/android/audio_track_jni.cc
index f9a5d4d..f92f93e 100644
--- a/webrtc/modules/audio_device/android/audio_track_jni.cc
+++ b/webrtc/modules/audio_device/android/audio_track_jni.cc
@@ -15,6 +15,7 @@
 
 #include "webrtc/base/arraysize.h"
 #include "webrtc/base/checks.h"
+#include "webrtc/base/format_macros.h"
 
 #define TAG "AudioTrackJni"
 #define ALOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__)
@@ -217,21 +218,21 @@
       env->GetDirectBufferAddress(byte_buffer);
   jlong capacity = env->GetDirectBufferCapacity(byte_buffer);
   ALOGD("direct buffer capacity: %lld", capacity);
-  direct_buffer_capacity_in_bytes_ = static_cast<int> (capacity);
+  direct_buffer_capacity_in_bytes_ = static_cast<size_t>(capacity);
   frames_per_buffer_ = direct_buffer_capacity_in_bytes_ / kBytesPerFrame;
-  ALOGD("frames_per_buffer: %d", frames_per_buffer_);
+  ALOGD("frames_per_buffer: %" PRIuS, frames_per_buffer_);
 }
 
 void JNICALL AudioTrackJni::GetPlayoutData(
   JNIEnv* env, jobject obj, jint length, jlong nativeAudioTrack) {
   webrtc::AudioTrackJni* this_object =
       reinterpret_cast<webrtc::AudioTrackJni*> (nativeAudioTrack);
-  this_object->OnGetPlayoutData(length);
+  this_object->OnGetPlayoutData(static_cast<size_t>(length));
 }
 
 // This method is called on a high-priority thread from Java. The name of
 // the thread is 'AudioRecordTrack'.
-void AudioTrackJni::OnGetPlayoutData(int length) {
+void AudioTrackJni::OnGetPlayoutData(size_t length) {
   DCHECK(thread_checker_java_.CalledOnValidThread());
   DCHECK_EQ(frames_per_buffer_, length / kBytesPerFrame);
   if (!audio_device_buffer_) {
@@ -244,7 +245,7 @@
     ALOGE("AudioDeviceBuffer::RequestPlayoutData failed!");
     return;
   }
-  DCHECK_EQ(samples, frames_per_buffer_);
+  DCHECK_EQ(static_cast<size_t>(samples), frames_per_buffer_);
   // Copy decoded data into common byte buffer to ensure that it can be
   // written to the Java based audio track.
   samples = audio_device_buffer_->GetPlayoutData(direct_buffer_address_);
diff --git a/webrtc/modules/audio_device/android/audio_track_jni.h b/webrtc/modules/audio_device/android/audio_track_jni.h
index 57f7b51..058bd8d 100644
--- a/webrtc/modules/audio_device/android/audio_track_jni.h
+++ b/webrtc/modules/audio_device/android/audio_track_jni.h
@@ -99,7 +99,7 @@
   // the thread is 'AudioTrackThread'.
   static void JNICALL GetPlayoutData(
     JNIEnv* env, jobject obj, jint length, jlong nativeAudioTrack);
-  void OnGetPlayoutData(int length);
+  void OnGetPlayoutData(size_t length);
 
   // Stores thread ID in constructor.
   rtc::ThreadChecker thread_checker_;
@@ -129,13 +129,13 @@
   void* direct_buffer_address_;
 
   // Number of bytes in the direct audio buffer owned by |j_audio_track_|.
-  int direct_buffer_capacity_in_bytes_;
+  size_t direct_buffer_capacity_in_bytes_;
 
   // Number of audio frames per audio buffer. Each audio frame corresponds to
   // one sample of PCM mono data at 16 bits per sample. Hence, each audio
   // frame contains 2 bytes (given that the Java layer only supports mono).
   // Example: 480 for 48000 Hz or 441 for 44100 Hz.
-  int frames_per_buffer_;
+  size_t frames_per_buffer_;
 
   bool initialized_;
 
diff --git a/webrtc/modules/audio_device/audio_device_buffer.cc b/webrtc/modules/audio_device/audio_device_buffer.cc
index 3cfbc7d..cc6d6bb 100644
--- a/webrtc/modules/audio_device/audio_device_buffer.cc
+++ b/webrtc/modules/audio_device/audio_device_buffer.cc
@@ -13,6 +13,7 @@
 #include <assert.h>
 #include <string.h>
 
+#include "webrtc/base/format_macros.h"
 #include "webrtc/modules/audio_device/audio_device_config.h"
 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
 #include "webrtc/system_wrappers/interface/logging.h"
@@ -380,7 +381,7 @@
 // ----------------------------------------------------------------------------
 
 int32_t AudioDeviceBuffer::SetRecordedBuffer(const void* audioBuffer,
-                                             uint32_t nSamples)
+                                             size_t nSamples)
 {
     CriticalSectionScoped lock(&_critSect);
 
@@ -414,7 +415,7 @@
         }
 
         // exctract left or right channel from input buffer to the local buffer
-        for (uint32_t i = 0; i < _recSamples; i++)
+        for (size_t i = 0; i < _recSamples; i++)
         {
             *ptr16Out = *ptr16In;
             ptr16Out++;
@@ -482,10 +483,10 @@
 //  RequestPlayoutData
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceBuffer::RequestPlayoutData(uint32_t nSamples)
+int32_t AudioDeviceBuffer::RequestPlayoutData(size_t nSamples)
 {
     uint32_t playSampleRate = 0;
-    uint8_t playBytesPerSample = 0;
+    size_t playBytesPerSample = 0;
     uint8_t playChannels = 0;
     {
         CriticalSectionScoped lock(&_critSect);
@@ -520,7 +521,7 @@
         }
     }
 
-    uint32_t nSamplesOut(0);
+    size_t nSamplesOut(0);
 
     CriticalSectionScoped lock(&_critSectCb);
 
@@ -563,7 +564,7 @@
     if (_playSize > kMaxBufferSizeBytes)
     {
        WEBRTC_TRACE(kTraceError, kTraceUtility, _id,
-                    "_playSize %i exceeds kMaxBufferSizeBytes in "
+                    "_playSize %" PRIuS " exceeds kMaxBufferSizeBytes in "
                     "AudioDeviceBuffer::GetPlayoutData", _playSize);
        assert(false);
        return -1;
diff --git a/webrtc/modules/audio_device/audio_device_buffer.h b/webrtc/modules/audio_device/audio_device_buffer.h
index a89927f..63a05ef 100644
--- a/webrtc/modules/audio_device/audio_device_buffer.h
+++ b/webrtc/modules/audio_device/audio_device_buffer.h
@@ -19,7 +19,7 @@
 class CriticalSectionWrapper;
 
 const uint32_t kPulsePeriodMs = 1000;
-const uint32_t kMaxBufferSizeBytes = 3840; // 10ms in stereo @ 96kHz
+const size_t kMaxBufferSizeBytes = 3840; // 10ms in stereo @ 96kHz
 
 class AudioDeviceObserver;
 
@@ -50,7 +50,7 @@
         AudioDeviceModule::ChannelType& channel) const;
 
     virtual int32_t SetRecordedBuffer(const void* audioBuffer,
-                                      uint32_t nSamples);
+                                      size_t nSamples);
     int32_t SetCurrentMicLevel(uint32_t level);
     virtual void SetVQEData(int playDelayMS,
                             int recDelayMS,
@@ -58,7 +58,7 @@
     virtual int32_t DeliverRecordedData();
     uint32_t NewMicLevel() const;
 
-    virtual int32_t RequestPlayoutData(uint32_t nSamples);
+    virtual int32_t RequestPlayoutData(size_t nSamples);
     virtual int32_t GetPlayoutData(void* audioBuffer);
 
     int32_t StartInputFileRecording(
@@ -87,22 +87,22 @@
     AudioDeviceModule::ChannelType _recChannel;
 
     // 2 or 4 depending on mono or stereo
-    uint8_t                   _recBytesPerSample;
-    uint8_t                   _playBytesPerSample;
+    size_t                   _recBytesPerSample;
+    size_t                   _playBytesPerSample;
 
     // 10ms in stereo @ 96kHz
     int8_t                          _recBuffer[kMaxBufferSizeBytes];
 
     // one sample <=> 2 or 4 bytes
-    uint32_t                  _recSamples;
-    uint32_t                  _recSize;           // in bytes
+    size_t                    _recSamples;
+    size_t                    _recSize;           // in bytes
 
     // 10ms in stereo @ 96kHz
     int8_t                          _playBuffer[kMaxBufferSizeBytes];
 
     // one sample <=> 2 or 4 bytes
-    uint32_t                  _playSamples;
-    uint32_t                  _playSize;          // in bytes
+    size_t                    _playSamples;
+    size_t                    _playSize;          // in bytes
 
     FileWrapper&                    _recFile;
     FileWrapper&                    _playFile;
diff --git a/webrtc/modules/audio_device/dummy/file_audio_device.cc b/webrtc/modules/audio_device/dummy/file_audio_device.cc
index 3de5344..a2eac87 100644
--- a/webrtc/modules/audio_device/dummy/file_audio_device.cc
+++ b/webrtc/modules/audio_device/dummy/file_audio_device.cc
@@ -172,7 +172,7 @@
     return -1;
   }
 
-  _recordingFramesIn10MS = static_cast<uint32_t>(kRecordingFixedSampleRate/100);
+  _recordingFramesIn10MS = static_cast<size_t>(kRecordingFixedSampleRate / 100);
 
   if (_ptrAudioBuffer) {
     _ptrAudioBuffer->SetRecordingSampleRate(kRecordingFixedSampleRate);
@@ -190,7 +190,7 @@
       return 0;
   }
 
-  _playoutFramesIn10MS = static_cast<uint32_t>(kPlayoutFixedSampleRate/100);
+  _playoutFramesIn10MS = static_cast<size_t>(kPlayoutFixedSampleRate / 100);
   _playing = true;
   _playoutFramesLeft = 0;
 
diff --git a/webrtc/modules/audio_device/dummy/file_audio_device.h b/webrtc/modules/audio_device/dummy/file_audio_device.h
index ffc8adc..91a7d22 100644
--- a/webrtc/modules/audio_device/dummy/file_audio_device.h
+++ b/webrtc/modules/audio_device/dummy/file_audio_device.h
@@ -174,9 +174,9 @@
   uint32_t _playoutFramesLeft;
   CriticalSectionWrapper& _critSect;
 
-  uint32_t _recordingBufferSizeIn10MS;
-  uint32_t _recordingFramesIn10MS;
-  uint32_t _playoutFramesIn10MS;
+  size_t _recordingBufferSizeIn10MS;
+  size_t _recordingFramesIn10MS;
+  size_t _playoutFramesIn10MS;
 
   rtc::scoped_ptr<ThreadWrapper> _ptrThreadRec;
   rtc::scoped_ptr<ThreadWrapper> _ptrThreadPlay;
diff --git a/webrtc/modules/audio_device/include/audio_device_defines.h b/webrtc/modules/audio_device/include/audio_device_defines.h
index 106edcb..32df9e9 100644
--- a/webrtc/modules/audio_device/include/audio_device_defines.h
+++ b/webrtc/modules/audio_device/include/audio_device_defines.h
@@ -11,6 +11,8 @@
 #ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_DEFINES_H
 #define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_DEFINES_H
 
+#include <stddef.h>
+
 #include "webrtc/typedefs.h"
 
 namespace webrtc {
@@ -45,8 +47,8 @@
 class AudioTransport {
  public:
   virtual int32_t RecordedDataIsAvailable(const void* audioSamples,
-                                          const uint32_t nSamples,
-                                          const uint8_t nBytesPerSample,
+                                          const size_t nSamples,
+                                          const size_t nBytesPerSample,
                                           const uint8_t nChannels,
                                           const uint32_t samplesPerSec,
                                           const uint32_t totalDelayMS,
@@ -55,12 +57,12 @@
                                           const bool keyPressed,
                                           uint32_t& newMicLevel) = 0;
 
-  virtual int32_t NeedMorePlayData(const uint32_t nSamples,
-                                   const uint8_t nBytesPerSample,
+  virtual int32_t NeedMorePlayData(const size_t nSamples,
+                                   const size_t nBytesPerSample,
                                    const uint8_t nChannels,
                                    const uint32_t samplesPerSec,
                                    void* audioSamples,
-                                   uint32_t& nSamplesOut,
+                                   size_t& nSamplesOut,
                                    int64_t* elapsed_time_ms,
                                    int64_t* ntp_time_ms) = 0;
 
@@ -84,7 +86,7 @@
                               const int16_t* audio_data,
                               int sample_rate,
                               int number_of_channels,
-                              int number_of_frames,
+                              size_t number_of_frames,
                               int audio_delay_milliseconds,
                               int current_volume,
                               bool key_pressed,
@@ -102,7 +104,7 @@
                       int bits_per_sample,
                       int sample_rate,
                       int number_of_channels,
-                      int number_of_frames) {}
+                      size_t number_of_frames) {}
 
   // Method to push the captured audio data to the specific VoE channel.
   // The data will not undergo audio processing.
@@ -115,7 +117,7 @@
                                int bits_per_sample,
                                int sample_rate,
                                int number_of_channels,
-                               int number_of_frames) {}
+                               size_t number_of_frames) {}
 
   // Method to pull mixed render audio data from all active VoE channels.
   // The data will not be passed as reference for audio processing internally.
@@ -124,7 +126,7 @@
   virtual void PullRenderData(int bits_per_sample,
                               int sample_rate,
                               int number_of_channels,
-                              int number_of_frames,
+                              size_t number_of_frames,
                               void* audio_data,
                               int64_t* elapsed_time_ms,
                               int64_t* ntp_time_ms) {}
@@ -151,18 +153,18 @@
       : sample_rate_(sample_rate),
         channels_(channels),
         frames_per_buffer_(frames_per_buffer),
-        frames_per_10ms_buffer_(sample_rate / 100) {}
+        frames_per_10ms_buffer_(static_cast<size_t>(sample_rate / 100)) {}
   void reset(int sample_rate, int channels, int frames_per_buffer) {
     sample_rate_ = sample_rate;
     channels_ = channels;
     frames_per_buffer_ = frames_per_buffer;
-    frames_per_10ms_buffer_ = (sample_rate / 100);
+    frames_per_10ms_buffer_ = static_cast<size_t>(sample_rate / 100);
   }
   int bits_per_sample() const { return kBitsPerSample; }
   int sample_rate() const { return sample_rate_; }
   int channels() const { return channels_; }
   int frames_per_buffer() const { return frames_per_buffer_; }
-  int frames_per_10ms_buffer() const { return frames_per_10ms_buffer_; }
+  size_t frames_per_10ms_buffer() const { return frames_per_10ms_buffer_; }
   bool is_valid() const {
     return ((sample_rate_ > 0) && (channels_ > 0) && (frames_per_buffer_ > 0));
   }
@@ -170,7 +172,7 @@
   int GetBytesPerBuffer() const {
     return frames_per_buffer_ * GetBytesPerFrame();
   }
-  int GetBytesPer10msBuffer() const {
+  size_t GetBytesPer10msBuffer() const {
     return frames_per_10ms_buffer_ * GetBytesPerFrame();
   }
   float GetBufferSizeInMilliseconds() const {
@@ -183,7 +185,7 @@
   int sample_rate_;
   int channels_;
   int frames_per_buffer_;
-  int frames_per_10ms_buffer_;
+  size_t frames_per_10ms_buffer_;
 };
 
 }  // namespace webrtc
diff --git a/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc b/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc
index b75f18f..fa22114 100644
--- a/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc
+++ b/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc
@@ -19,6 +19,7 @@
 #include "testing/gtest/include/gtest/gtest.h"
 #include "webrtc/base/arraysize.h"
 #include "webrtc/base/criticalsection.h"
+#include "webrtc/base/format_macros.h"
 #include "webrtc/base/logging.h"
 #include "webrtc/base/scoped_ptr.h"
 #include "webrtc/base/scoped_ref_ptr.h"
@@ -52,15 +53,15 @@
 
 // Number of callbacks (input or output) the tests waits for before we set
 // an event indicating that the test was OK.
-static const int kNumCallbacks = 10;
+static const size_t kNumCallbacks = 10;
 // Max amount of time we wait for an event to be set while counting callbacks.
 static const int kTestTimeOutInMilliseconds = 10 * 1000;
 // Number of bits per PCM audio sample.
-static const int kBitsPerSample = 16;
+static const size_t kBitsPerSample = 16;
 // Number of bytes per PCM audio sample.
-static const int kBytesPerSample = kBitsPerSample / 8;
+static const size_t kBytesPerSample = kBitsPerSample / 8;
 // Average number of audio callbacks per second assuming 10ms packet size.
-static const int kNumCallbacksPerSecond = 100;
+static const size_t kNumCallbacksPerSecond = 100;
 // Play out a test file during this time (unit is in seconds).
 static const int kFilePlayTimeInSec = 15;
 // Run the full-duplex test during this time (unit is in seconds).
@@ -69,7 +70,7 @@
 // Wait for the callback sequence to stabilize by ignoring this amount of the
 // initial callbacks (avoids initial FIFO access).
 // Only used in the RunPlayoutAndRecordingInFullDuplex test.
-static const int kNumIgnoreFirstCallbacks = 50;
+static const size_t kNumIgnoreFirstCallbacks = 50;
 // Sets the number of impulses per second in the latency test.
 // TODO(henrika): fine tune this setting for iOS.
 static const int kImpulseFrequencyInHz = 1;
@@ -92,8 +93,8 @@
 // measurements.
 class AudioStreamInterface {
  public:
-  virtual void Write(const void* source, int num_frames) = 0;
-  virtual void Read(void* destination, int num_frames) = 0;
+  virtual void Write(const void* source, size_t num_frames) = 0;
+  virtual void Read(void* destination, size_t num_frames) = 0;
 
  protected:
   virtual ~AudioStreamInterface() {}
@@ -103,7 +104,7 @@
 // construction.
 class FileAudioStream : public AudioStreamInterface {
  public:
-  FileAudioStream(int num_callbacks,
+  FileAudioStream(size_t num_callbacks,
                   const std::string& file_name,
                   int sample_rate)
       : file_size_in_bytes_(0), sample_rate_(sample_rate), file_pos_(0) {
@@ -111,40 +112,41 @@
     sample_rate_ = sample_rate;
     EXPECT_GE(file_size_in_callbacks(), num_callbacks)
         << "Size of test file is not large enough to last during the test.";
-    const int num_16bit_samples =
+    const size_t num_16bit_samples =
         test::GetFileSize(file_name) / kBytesPerSample;
     file_.reset(new int16_t[num_16bit_samples]);
     FILE* audio_file = fopen(file_name.c_str(), "rb");
     EXPECT_NE(audio_file, nullptr);
-    int num_samples_read =
+    size_t num_samples_read =
         fread(file_.get(), sizeof(int16_t), num_16bit_samples, audio_file);
     EXPECT_EQ(num_samples_read, num_16bit_samples);
     fclose(audio_file);
   }
 
   // AudioStreamInterface::Write() is not implemented.
-  void Write(const void* source, int num_frames) override {}
+  void Write(const void* source, size_t num_frames) override {}
 
   // Read samples from file stored in memory (at construction) and copy
   // |num_frames| (<=> 10ms) to the |destination| byte buffer.
-  void Read(void* destination, int num_frames) override {
+  void Read(void* destination, size_t num_frames) override {
     memcpy(destination, static_cast<int16_t*>(&file_[file_pos_]),
            num_frames * sizeof(int16_t));
     file_pos_ += num_frames;
   }
 
   int file_size_in_seconds() const {
-    return (file_size_in_bytes_ / (kBytesPerSample * sample_rate_));
+    return static_cast<int>(
+        file_size_in_bytes_ / (kBytesPerSample * sample_rate_));
   }
-  int file_size_in_callbacks() const {
+  size_t file_size_in_callbacks() const {
     return file_size_in_seconds() * kNumCallbacksPerSecond;
   }
 
  private:
-  int file_size_in_bytes_;
+  size_t file_size_in_bytes_;
   int sample_rate_;
   rtc::scoped_ptr<int16_t[]> file_;
-  int file_pos_;
+  size_t file_pos_;
 };
 
 // Simple first in first out (FIFO) class that wraps a list of 16-bit audio
@@ -157,7 +159,7 @@
 // since both sides (playout and recording) are driven by its own thread.
 class FifoAudioStream : public AudioStreamInterface {
  public:
-  explicit FifoAudioStream(int frames_per_buffer)
+  explicit FifoAudioStream(size_t frames_per_buffer)
       : frames_per_buffer_(frames_per_buffer),
         bytes_per_buffer_(frames_per_buffer_ * sizeof(int16_t)),
         fifo_(new AudioBufferList),
@@ -172,7 +174,7 @@
   // Allocate new memory, copy |num_frames| samples from |source| into memory
   // and add pointer to the memory location to end of the list.
   // Increases the size of the FIFO by one element.
-  void Write(const void* source, int num_frames) override {
+  void Write(const void* source, size_t num_frames) override {
     ASSERT_EQ(num_frames, frames_per_buffer_);
     PRINTD("+");
     if (write_count_++ < kNumIgnoreFirstCallbacks) {
@@ -182,10 +184,10 @@
     memcpy(static_cast<int16_t*>(&memory[0]), source, bytes_per_buffer_);
     rtc::CritScope lock(&lock_);
     fifo_->push_back(memory);
-    const int size = fifo_->size();
+    const size_t size = fifo_->size();
     if (size > largest_size_) {
       largest_size_ = size;
-      PRINTD("(%d)", largest_size_);
+      PRINTD("(%" PRIuS ")", largest_size_);
     }
     total_written_elements_ += size;
   }
@@ -193,7 +195,7 @@
   // Read pointer to data buffer from front of list, copy |num_frames| of stored
   // data into |destination| and delete the utilized memory allocation.
   // Decreases the size of the FIFO by one element.
-  void Read(void* destination, int num_frames) override {
+  void Read(void* destination, size_t num_frames) override {
     ASSERT_EQ(num_frames, frames_per_buffer_);
     PRINTD("-");
     rtc::CritScope lock(&lock_);
@@ -207,11 +209,11 @@
     }
   }
 
-  int size() const { return fifo_->size(); }
+  size_t size() const { return fifo_->size(); }
 
-  int largest_size() const { return largest_size_; }
+  size_t largest_size() const { return largest_size_; }
 
-  int average_size() const {
+  size_t average_size() const {
     return (total_written_elements_ == 0)
                ? 0.0
                : 0.5 +
@@ -229,12 +231,12 @@
 
   using AudioBufferList = std::list<int16_t*>;
   rtc::CriticalSection lock_;
-  const int frames_per_buffer_;
-  const int bytes_per_buffer_;
+  const size_t frames_per_buffer_;
+  const size_t bytes_per_buffer_;
   rtc::scoped_ptr<AudioBufferList> fifo_;
-  int largest_size_;
-  int total_written_elements_;
-  int write_count_;
+  size_t largest_size_;
+  size_t total_written_elements_;
+  size_t write_count_;
 };
 
 // Inserts periodic impulses and measures the latency between the time of
@@ -243,7 +245,7 @@
 // See http://source.android.com/devices/audio/loopback.html for details.
 class LatencyMeasuringAudioStream : public AudioStreamInterface {
  public:
-  explicit LatencyMeasuringAudioStream(int frames_per_buffer)
+  explicit LatencyMeasuringAudioStream(size_t frames_per_buffer)
       : clock_(Clock::GetRealTimeClock()),
         frames_per_buffer_(frames_per_buffer),
         bytes_per_buffer_(frames_per_buffer_ * sizeof(int16_t)),
@@ -252,7 +254,7 @@
         pulse_time_(0) {}
 
   // Insert periodic impulses in first two samples of |destination|.
-  void Read(void* destination, int num_frames) override {
+  void Read(void* destination, size_t num_frames) override {
     ASSERT_EQ(num_frames, frames_per_buffer_);
     if (play_count_ == 0) {
       PRINT("[");
@@ -266,15 +268,15 @@
       PRINT(".");
       const int16_t impulse = std::numeric_limits<int16_t>::max();
       int16_t* ptr16 = static_cast<int16_t*>(destination);
-      for (int i = 0; i < 2; ++i) {
-        *ptr16++ = impulse;
+      for (size_t i = 0; i < 2; ++i) {
+        ptr16[i] = impulse;
       }
     }
   }
 
   // Detect received impulses in |source|, derive time between transmission and
   // detection and add the calculated delay to list of latencies.
-  void Write(const void* source, int num_frames) override {
+  void Write(const void* source, size_t num_frames) override {
     ASSERT_EQ(num_frames, frames_per_buffer_);
     rec_count_++;
     if (pulse_time_ == 0) {
@@ -307,7 +309,7 @@
     }
   }
 
-  int num_latency_values() const { return latencies_.size(); }
+  size_t num_latency_values() const { return latencies_.size(); }
 
   int min_latency() const {
     if (latencies_.empty())
@@ -346,10 +348,10 @@
 
  private:
   Clock* clock_;
-  const int frames_per_buffer_;
-  const int bytes_per_buffer_;
-  int play_count_;
-  int rec_count_;
+  const size_t frames_per_buffer_;
+  const size_t bytes_per_buffer_;
+  size_t play_count_;
+  size_t rec_count_;
   int64_t pulse_time_;
   std::vector<int> latencies_;
 };
@@ -369,8 +371,8 @@
 
   MOCK_METHOD10(RecordedDataIsAvailable,
                 int32_t(const void* audioSamples,
-                        const uint32_t nSamples,
-                        const uint8_t nBytesPerSample,
+                        size_t uint32_t nSamples,
+                        size_t uint8_t nBytesPerSample,
                         const uint8_t nChannels,
                         const uint32_t samplesPerSec,
                         const uint32_t totalDelayMS,
@@ -379,12 +381,12 @@
                         const bool keyPressed,
                         uint32_t& newMicLevel));
   MOCK_METHOD8(NeedMorePlayData,
-               int32_t(const uint32_t nSamples,
-                       const uint8_t nBytesPerSample,
+               int32_t(size_t uint32_t nSamples,
+                       size_t uint8_t nBytesPerSample,
                        const uint8_t nChannels,
                        const uint32_t samplesPerSec,
                        void* audioSamples,
-                       uint32_t& nSamplesOut,
+                       size_t& nSamplesOut,
                        int64_t* elapsed_time_ms,
                        int64_t* ntp_time_ms));
 
@@ -392,7 +394,7 @@
   // implementations (of AudioStreamInterface) here.
   void HandleCallbacks(EventWrapper* test_is_done,
                        AudioStreamInterface* audio_stream,
-                       int num_callbacks) {
+                       size_t num_callbacks) {
     test_is_done_ = test_is_done;
     audio_stream_ = audio_stream;
     num_callbacks_ = num_callbacks;
@@ -409,8 +411,8 @@
   }
 
   int32_t RealRecordedDataIsAvailable(const void* audioSamples,
-                                      const uint32_t nSamples,
-                                      const uint8_t nBytesPerSample,
+                                      size_t uint32_t nSamples,
+                                      size_t uint8_t nBytesPerSample,
                                       const uint8_t nChannels,
                                       const uint32_t samplesPerSec,
                                       const uint32_t totalDelayMS,
@@ -431,12 +433,12 @@
     return 0;
   }
 
-  int32_t RealNeedMorePlayData(const uint32_t nSamples,
-                               const uint8_t nBytesPerSample,
+  int32_t RealNeedMorePlayData(size_t uint32_t nSamples,
+                               size_t uint8_t nBytesPerSample,
                                const uint8_t nChannels,
                                const uint32_t samplesPerSec,
                                void* audioSamples,
-                               uint32_t& nSamplesOut,
+                               size_t& nSamplesOut,
                                int64_t* elapsed_time_ms,
                                int64_t* ntp_time_ms) {
     EXPECT_TRUE(play_mode()) << "No test is expecting these callbacks.";
@@ -474,10 +476,10 @@
 
  private:
   EventWrapper* test_is_done_;
-  int num_callbacks_;
+  size_t num_callbacks_;
   int type_;
-  int play_count_;
-  int rec_count_;
+  size_t play_count_;
+  size_t rec_count_;
   AudioStreamInterface* audio_stream_;
 };
 
@@ -510,10 +512,10 @@
   int record_sample_rate() const { return record_parameters_.sample_rate(); }
   int playout_channels() const { return playout_parameters_.channels(); }
   int record_channels() const { return record_parameters_.channels(); }
-  int playout_frames_per_10ms_buffer() const {
+  size_t playout_frames_per_10ms_buffer() const {
     return playout_parameters_.frames_per_10ms_buffer();
   }
-  int record_frames_per_10ms_buffer() const {
+  size_t record_frames_per_10ms_buffer() const {
     return record_parameters_.frames_per_10ms_buffer();
   }
 
@@ -552,12 +554,14 @@
     EXPECT_TRUE(test::FileExists(file_name));
 #ifdef ENABLE_DEBUG_PRINTF
     PRINTD("file name: %s\n", file_name.c_str());
-    const int bytes = test::GetFileSize(file_name);
-    PRINTD("file size: %d [bytes]\n", bytes);
-    PRINTD("file size: %d [samples]\n", bytes / kBytesPerSample);
-    const int seconds = bytes / (sample_rate * kBytesPerSample);
+    const size_t bytes = test::GetFileSize(file_name);
+    PRINTD("file size: %" PRIuS " [bytes]\n", bytes);
+    PRINTD("file size: %" PRIuS " [samples]\n", bytes / kBytesPerSample);
+    const int seconds =
+        static_cast<int>(bytes / (sample_rate * kBytesPerSample));
     PRINTD("file size: %d [secs]\n", seconds);
-    PRINTD("file size: %d [callbacks]\n", seconds * kNumCallbacksPerSecond);
+    PRINTD("file size: %" PRIuS " [callbacks]\n",
+           seconds * kNumCallbacksPerSecond);
 #endif
     return file_name;
   }
diff --git a/webrtc/modules/audio_device/mock_audio_device_buffer.h b/webrtc/modules/audio_device/mock_audio_device_buffer.h
index b9e66f7..d18c0ec 100644
--- a/webrtc/modules/audio_device/mock_audio_device_buffer.h
+++ b/webrtc/modules/audio_device/mock_audio_device_buffer.h
@@ -21,7 +21,7 @@
   MockAudioDeviceBuffer() {}
   virtual ~MockAudioDeviceBuffer() {}
 
-  MOCK_METHOD1(RequestPlayoutData, int32_t(uint32_t nSamples));
+  MOCK_METHOD1(RequestPlayoutData, int32_t(size_t nSamples));
   MOCK_METHOD1(GetPlayoutData, int32_t(void* audioBuffer));
 };
 
diff --git a/webrtc/modules/audio_device/test/audio_device_test_api.cc b/webrtc/modules/audio_device/test/audio_device_test_api.cc
index 923d39a..c09b88d 100644
--- a/webrtc/modules/audio_device/test/audio_device_test_api.cc
+++ b/webrtc/modules/audio_device/test/audio_device_test_api.cc
@@ -83,8 +83,8 @@
   ~AudioTransportAPI() {}
 
   int32_t RecordedDataIsAvailable(const void* audioSamples,
-                                  const uint32_t nSamples,
-                                  const uint8_t nBytesPerSample,
+                                  const size_t nSamples,
+                                  const size_t nBytesPerSample,
                                   const uint8_t nChannels,
                                   const uint32_t sampleRate,
                                   const uint32_t totalDelay,
@@ -108,12 +108,12 @@
     return 0;
   }
 
-  int32_t NeedMorePlayData(const uint32_t nSamples,
-                           const uint8_t nBytesPerSample,
+  int32_t NeedMorePlayData(const size_t nSamples,
+                           const size_t nBytesPerSample,
                            const uint8_t nChannels,
                            const uint32_t sampleRate,
                            void* audioSamples,
-                           uint32_t& nSamplesOut,
+                           size_t& nSamplesOut,
                            int64_t* elapsed_time_ms,
                            int64_t* ntp_time_ms) override {
     play_count_++;
@@ -133,7 +133,7 @@
                       const int16_t* audio_data,
                       int sample_rate,
                       int number_of_channels,
-                      int number_of_frames,
+                      size_t number_of_frames,
                       int audio_delay_milliseconds,
                       int current_volume,
                       bool key_pressed,
@@ -144,10 +144,10 @@
   void PushCaptureData(int voe_channel, const void* audio_data,
                        int bits_per_sample, int sample_rate,
                        int number_of_channels,
-                       int number_of_frames) override {}
+                       size_t number_of_frames) override {}
 
   void PullRenderData(int bits_per_sample, int sample_rate,
-                      int number_of_channels, int number_of_frames,
+                      int number_of_channels, size_t number_of_frames,
                       void* audio_data,
                       int64_t* elapsed_time_ms,
                       int64_t* ntp_time_ms) override {}
diff --git a/webrtc/modules/audio_device/test/func_test_manager.cc b/webrtc/modules/audio_device/test/func_test_manager.cc
index ae3cd2c..005e0e5 100644
--- a/webrtc/modules/audio_device/test/func_test_manager.cc
+++ b/webrtc/modules/audio_device/test/func_test_manager.cc
@@ -192,8 +192,8 @@
 
 int32_t AudioTransportImpl::RecordedDataIsAvailable(
     const void* audioSamples,
-    const uint32_t nSamples,
-    const uint8_t nBytesPerSample,
+    const size_t nSamples,
+    const size_t nBytesPerSample,
     const uint8_t nChannels,
     const uint32_t samplesPerSec,
     const uint32_t totalDelayMS,
@@ -206,7 +206,7 @@
     {
         AudioPacket* packet = new AudioPacket();
         memcpy(packet->dataBuffer, audioSamples, nSamples * nBytesPerSample);
-        packet->nSamples = (uint16_t) nSamples;
+        packet->nSamples = nSamples;
         packet->nBytesPerSample = nBytesPerSample;
         packet->nChannels = nChannels;
         packet->samplesPerSec = samplesPerSec;
@@ -337,12 +337,12 @@
 
 
 int32_t AudioTransportImpl::NeedMorePlayData(
-    const uint32_t nSamples,
-    const uint8_t nBytesPerSample,
+    const size_t nSamples,
+    const size_t nBytesPerSample,
     const uint8_t nChannels,
     const uint32_t samplesPerSec,
     void* audioSamples,
-    uint32_t& nSamplesOut,
+    size_t& nSamplesOut,
     int64_t* elapsed_time_ms,
     int64_t* ntp_time_ms)
 {
@@ -359,15 +359,15 @@
             if (packet)
             {
                 int ret(0);
-                int lenOut(0);
+                size_t lenOut(0);
                 int16_t tmpBuf_96kHz[80 * 12];
                 int16_t* ptr16In = NULL;
                 int16_t* ptr16Out = NULL;
 
-                const uint16_t nSamplesIn = packet->nSamples;
+                const size_t nSamplesIn = packet->nSamples;
                 const uint8_t nChannelsIn = packet->nChannels;
                 const uint32_t samplesPerSecIn = packet->samplesPerSec;
-                const uint16_t nBytesPerSampleIn = packet->nBytesPerSample;
+                const size_t nBytesPerSampleIn = packet->nBytesPerSample;
 
                 int32_t fsInHz(samplesPerSecIn);
                 int32_t fsOutHz(samplesPerSec);
@@ -401,7 +401,7 @@
                             ptr16Out = (int16_t*) audioSamples;
 
                             // do stereo -> mono
-                            for (unsigned int i = 0; i < nSamples; i++)
+                            for (size_t i = 0; i < nSamples; i++)
                             {
                                 *ptr16Out = *ptr16In; // use left channel
                                 ptr16Out++;
@@ -409,7 +409,7 @@
                                 ptr16In++;
                             }
                         }
-                        assert(2*nSamples == (uint32_t)lenOut);
+                        assert(2*nSamples == lenOut);
                     } else
                     {
                         if (_playCount % 100 == 0)
@@ -439,7 +439,7 @@
                             ptr16Out = (int16_t*) audioSamples;
 
                             // do mono -> stereo
-                            for (unsigned int i = 0; i < nSamples; i++)
+                            for (size_t i = 0; i < nSamples; i++)
                             {
                                 *ptr16Out = *ptr16In; // left
                                 ptr16Out++;
@@ -448,7 +448,7 @@
                                 ptr16In++;
                             }
                         }
-                        assert(nSamples == (uint32_t)lenOut);
+                        assert(nSamples == lenOut);
                     } else
                     {
                         if (_playCount % 100 == 0)
@@ -483,7 +483,7 @@
             // mono sample from file is duplicated and sent to left and right
             // channels
             int16_t* audio16 = (int16_t*) audioSamples;
-            for (unsigned int i = 0; i < nSamples; i++)
+            for (size_t i = 0; i < nSamples; i++)
             {
                 (*audio16) = fileBuf[i]; // left
                 audio16++;
@@ -578,7 +578,7 @@
                                         const int16_t* audio_data,
                                         int sample_rate,
                                         int number_of_channels,
-                                        int number_of_frames,
+                                        size_t number_of_frames,
                                         int audio_delay_milliseconds,
                                         int current_volume,
                                         bool key_pressed,
@@ -590,11 +590,11 @@
                                          const void* audio_data,
                                          int bits_per_sample, int sample_rate,
                                          int number_of_channels,
-                                         int number_of_frames) {}
+                                         size_t number_of_frames) {}
 
 void AudioTransportImpl::PullRenderData(int bits_per_sample, int sample_rate,
                                         int number_of_channels,
-                                        int number_of_frames,
+                                        size_t number_of_frames,
                                         void* audio_data,
                                         int64_t* elapsed_time_ms,
                                         int64_t* ntp_time_ms) {}
diff --git a/webrtc/modules/audio_device/test/func_test_manager.h b/webrtc/modules/audio_device/test/func_test_manager.h
index f5ddd3a..a91ae81 100644
--- a/webrtc/modules/audio_device/test/func_test_manager.h
+++ b/webrtc/modules/audio_device/test/func_test_manager.h
@@ -47,8 +47,8 @@
 struct AudioPacket
 {
     uint8_t dataBuffer[4 * 960];
-    uint16_t nSamples;
-    uint16_t nBytesPerSample;
+    size_t nSamples;
+    size_t nBytesPerSample;
     uint8_t nChannels;
     uint32_t samplesPerSec;
 };
@@ -86,8 +86,8 @@
 {
 public:
     int32_t RecordedDataIsAvailable(const void* audioSamples,
-                                    const uint32_t nSamples,
-                                    const uint8_t nBytesPerSample,
+                                    const size_t nSamples,
+                                    const size_t nBytesPerSample,
                                     const uint8_t nChannels,
                                     const uint32_t samplesPerSec,
                                     const uint32_t totalDelayMS,
@@ -96,12 +96,12 @@
                                     const bool keyPressed,
                                     uint32_t& newMicLevel) override;
 
-    int32_t NeedMorePlayData(const uint32_t nSamples,
-                             const uint8_t nBytesPerSample,
+    int32_t NeedMorePlayData(const size_t nSamples,
+                             const size_t nBytesPerSample,
                              const uint8_t nChannels,
                              const uint32_t samplesPerSec,
                              void* audioSamples,
-                             uint32_t& nSamplesOut,
+                             size_t& nSamplesOut,
                              int64_t* elapsed_time_ms,
                              int64_t* ntp_time_ms) override;
 
@@ -110,7 +110,7 @@
                         const int16_t* audio_data,
                         int sample_rate,
                         int number_of_channels,
-                        int number_of_frames,
+                        size_t number_of_frames,
                         int audio_delay_milliseconds,
                         int current_volume,
                         bool key_pressed,
@@ -119,10 +119,10 @@
     void PushCaptureData(int voe_channel, const void* audio_data,
                          int bits_per_sample, int sample_rate,
                          int number_of_channels,
-                         int number_of_frames) override;
+                         size_t number_of_frames) override;
 
     void PullRenderData(int bits_per_sample, int sample_rate,
-                        int number_of_channels, int number_of_frames,
+                        int number_of_channels, size_t number_of_frames,
                         void* audio_data,
                         int64_t* elapsed_time_ms,
                         int64_t* ntp_time_ms) override;
diff --git a/webrtc/modules/audio_processing/aec/aec_core.c b/webrtc/modules/audio_processing/aec/aec_core.c
index 7092707..4ddfa4a 100644
--- a/webrtc/modules/audio_processing/aec/aec_core.c
+++ b/webrtc/modules/audio_processing/aec/aec_core.c
@@ -945,7 +945,8 @@
   float fft[PART_LEN2];
   float scale, dtmp;
   float nlpGainHband;
-  int i, j;
+  int i;
+  size_t j;
 
   // Coherence and non-linear filter
   float cohde[PART_LEN1], cohxd[PART_LEN1];
@@ -1160,8 +1161,8 @@
   memcpy(aec->eBuf, aec->eBuf + PART_LEN, sizeof(float) * PART_LEN);
 
   // Copy the current block to the old position for H band
-  for (i = 0; i < aec->num_bands - 1; ++i) {
-    memcpy(aec->dBufH[i], aec->dBufH[i] + PART_LEN, sizeof(float) * PART_LEN);
+  for (j = 0; j < aec->num_bands - 1; ++j) {
+    memcpy(aec->dBufH[j], aec->dBufH[j] + PART_LEN, sizeof(float) * PART_LEN);
   }
 
   memmove(aec->xfwBuf + PART_LEN1,
@@ -1170,7 +1171,7 @@
 }
 
 static void ProcessBlock(AecCore* aec) {
-  int i;
+  size_t i;
   float y[PART_LEN], e[PART_LEN];
   float scale;
 
@@ -1557,7 +1558,7 @@
   } else {
     aec->normal_mu = 0.5f;
     aec->normal_error_threshold = 1.5e-6f;
-    aec->num_bands = sampFreq / 16000;
+    aec->num_bands = (size_t)(sampFreq / 16000);
   }
 
   WebRtc_InitBuffer(aec->nearFrBuf);
@@ -1731,11 +1732,11 @@
 
 void WebRtcAec_ProcessFrames(AecCore* aec,
                              const float* const* nearend,
-                             int num_bands,
-                             int num_samples,
+                             size_t num_bands,
+                             size_t num_samples,
                              int knownDelay,
                              float* const* out) {
-  int i, j;
+  size_t i, j;
   int out_elements = 0;
 
   aec->frame_count++;
diff --git a/webrtc/modules/audio_processing/aec/aec_core.h b/webrtc/modules/audio_processing/aec/aec_core.h
index 2530527..241f077 100644
--- a/webrtc/modules/audio_processing/aec/aec_core.h
+++ b/webrtc/modules/audio_processing/aec/aec_core.h
@@ -15,6 +15,8 @@
 #ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_CORE_H_
 #define WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_CORE_H_
 
+#include <stddef.h>
+
 #include "webrtc/typedefs.h"
 
 #define FRAME_LEN 80
@@ -65,8 +67,8 @@
 void WebRtcAec_BufferFarendPartition(AecCore* aec, const float* farend);
 void WebRtcAec_ProcessFrames(AecCore* aec,
                              const float* const* nearend,
-                             int num_bands,
-                             int num_samples,
+                             size_t num_bands,
+                             size_t num_samples,
                              int knownDelay,
                              float* const* out);
 
diff --git a/webrtc/modules/audio_processing/aec/aec_core_internal.h b/webrtc/modules/audio_processing/aec/aec_core_internal.h
index 796ea2c..a831feb 100644
--- a/webrtc/modules/audio_processing/aec/aec_core_internal.h
+++ b/webrtc/modules/audio_processing/aec/aec_core_internal.h
@@ -101,7 +101,7 @@
 
   int mult;  // sampling frequency multiple
   int sampFreq;
-  int num_bands;
+  size_t num_bands;
   uint32_t seed;
 
   float normal_mu;               // stepsize
diff --git a/webrtc/modules/audio_processing/aec/aec_resampler.c b/webrtc/modules/audio_processing/aec/aec_resampler.c
index 62a830b..99c39ef 100644
--- a/webrtc/modules/audio_processing/aec/aec_resampler.c
+++ b/webrtc/modules/audio_processing/aec/aec_resampler.c
@@ -64,17 +64,16 @@
 
 void WebRtcAec_ResampleLinear(void* resampInst,
                               const float* inspeech,
-                              int size,
+                              size_t size,
                               float skew,
                               float* outspeech,
-                              int* size_out) {
+                              size_t* size_out) {
   AecResampler* obj = (AecResampler*)resampInst;
 
   float* y;
   float be, tnew;
-  int tn, mm;
+  size_t tn, mm;
 
-  assert(size >= 0);
   assert(size <= 2 * FRAME_LEN);
   assert(resampInst != NULL);
   assert(inspeech != NULL);
@@ -94,7 +93,7 @@
   y = &obj->buffer[FRAME_LEN];  // Point at current frame
 
   tnew = be * mm + obj->position;
-  tn = (int)tnew;
+  tn = (size_t)tnew;
 
   while (tn < size) {
 
diff --git a/webrtc/modules/audio_processing/aec/aec_resampler.h b/webrtc/modules/audio_processing/aec/aec_resampler.h
index a374992..a5002c1 100644
--- a/webrtc/modules/audio_processing/aec/aec_resampler.h
+++ b/webrtc/modules/audio_processing/aec/aec_resampler.h
@@ -31,9 +31,9 @@
 // Resamples input using linear interpolation.
 void WebRtcAec_ResampleLinear(void* resampInst,
                               const float* inspeech,
-                              int size,
+                              size_t size,
                               float skew,
                               float* outspeech,
-                              int* size_out);
+                              size_t* size_out);
 
 #endif  // WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_RESAMPLER_H_
diff --git a/webrtc/modules/audio_processing/aec/echo_cancellation.c b/webrtc/modules/audio_processing/aec/echo_cancellation.c
index b31a84a..0f5cd31 100644
--- a/webrtc/modules/audio_processing/aec/echo_cancellation.c
+++ b/webrtc/modules/audio_processing/aec/echo_cancellation.c
@@ -105,16 +105,16 @@
 static void EstBufDelayExtended(Aec* aecInst);
 static int ProcessNormal(Aec* self,
                          const float* const* near,
-                         int num_bands,
+                         size_t num_bands,
                          float* const* out,
-                         int16_t num_samples,
+                         size_t num_samples,
                          int16_t reported_delay_ms,
                          int32_t skew);
 static void ProcessExtended(Aec* self,
                             const float* const* near,
-                            int num_bands,
+                            size_t num_bands,
                             float* const* out,
-                            int16_t num_samples,
+                            size_t num_samples,
                             int16_t reported_delay_ms,
                             int32_t skew);
 
@@ -271,9 +271,9 @@
 // only buffer L band for farend
 int32_t WebRtcAec_BufferFarend(void* aecInst,
                                const float* farend,
-                               int16_t nrOfSamples) {
+                               size_t nrOfSamples) {
   Aec* aecpc = aecInst;
-  int newNrOfSamples = nrOfSamples;
+  size_t newNrOfSamples = nrOfSamples;
   float new_farend[MAX_RESAMP_LEN];
   const float* farend_ptr = farend;
 
@@ -305,11 +305,11 @@
   }
 
   aecpc->farend_started = 1;
-  WebRtcAec_SetSystemDelay(aecpc->aec,
-                           WebRtcAec_system_delay(aecpc->aec) + newNrOfSamples);
+  WebRtcAec_SetSystemDelay(
+      aecpc->aec, WebRtcAec_system_delay(aecpc->aec) + (int)newNrOfSamples);
 
   // Write the time-domain data to |far_pre_buf|.
-  WebRtc_WriteBuffer(aecpc->far_pre_buf, farend_ptr, (size_t)newNrOfSamples);
+  WebRtc_WriteBuffer(aecpc->far_pre_buf, farend_ptr, newNrOfSamples);
 
   // Transform to frequency domain if we have enough data.
   while (WebRtc_available_read(aecpc->far_pre_buf) >= PART_LEN2) {
@@ -334,9 +334,9 @@
 
 int32_t WebRtcAec_Process(void* aecInst,
                           const float* const* nearend,
-                          int num_bands,
+                          size_t num_bands,
                           float* const* out,
-                          int16_t nrOfSamples,
+                          size_t nrOfSamples,
                           int16_t msInSndCardBuf,
                           int32_t skew) {
   Aec* aecpc = aecInst;
@@ -592,14 +592,14 @@
 
 static int ProcessNormal(Aec* aecpc,
                          const float* const* nearend,
-                         int num_bands,
+                         size_t num_bands,
                          float* const* out,
-                         int16_t nrOfSamples,
+                         size_t nrOfSamples,
                          int16_t msInSndCardBuf,
                          int32_t skew) {
   int retVal = 0;
-  short i;
-  short nBlocks10ms;
+  size_t i;
+  size_t nBlocks10ms;
   // Limit resampling to doubling/halving of signal
   const float minSkewEst = -0.5f;
   const float maxSkewEst = 1.0f;
@@ -740,12 +740,12 @@
 
 static void ProcessExtended(Aec* self,
                             const float* const* near,
-                            int num_bands,
+                            size_t num_bands,
                             float* const* out,
-                            int16_t num_samples,
+                            size_t num_samples,
                             int16_t reported_delay_ms,
                             int32_t skew) {
-  int i;
+  size_t i;
   const int delay_diff_offset = kDelayDiffOffsetSamples;
 #if defined(WEBRTC_UNTRUSTED_DELAY)
   reported_delay_ms = kFixedDelayMs;
diff --git a/webrtc/modules/audio_processing/aec/include/echo_cancellation.h b/webrtc/modules/audio_processing/aec/include/echo_cancellation.h
index e49a084..a340cf8 100644
--- a/webrtc/modules/audio_processing/aec/include/echo_cancellation.h
+++ b/webrtc/modules/audio_processing/aec/include/echo_cancellation.h
@@ -11,6 +11,8 @@
 #ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AEC_INCLUDE_ECHO_CANCELLATION_H_
 #define WEBRTC_MODULES_AUDIO_PROCESSING_AEC_INCLUDE_ECHO_CANCELLATION_H_
 
+#include <stddef.h>
+
 #include "webrtc/typedefs.h"
 
 // Errors
@@ -111,7 +113,7 @@
  */
 int32_t WebRtcAec_BufferFarend(void* aecInst,
                                const float* farend,
-                               int16_t nrOfSamples);
+                               size_t nrOfSamples);
 
 /*
  * Runs the echo canceller on an 80 or 160 sample blocks of data.
@@ -138,9 +140,9 @@
  */
 int32_t WebRtcAec_Process(void* aecInst,
                           const float* const* nearend,
-                          int num_bands,
+                          size_t num_bands,
                           float* const* out,
-                          int16_t nrOfSamples,
+                          size_t nrOfSamples,
                           int16_t msInSndCardBuf,
                           int32_t skew);
 
diff --git a/webrtc/modules/audio_processing/aec/system_delay_unittest.cc b/webrtc/modules/audio_processing/aec/system_delay_unittest.cc
index 5e26a31..07e3cf8 100644
--- a/webrtc/modules/audio_processing/aec/system_delay_unittest.cc
+++ b/webrtc/modules/audio_processing/aec/system_delay_unittest.cc
@@ -33,7 +33,7 @@
   void RenderAndCapture(int device_buffer_ms);
 
   // Fills up the far-end buffer with respect to the default device buffer size.
-  int BufferFillUp();
+  size_t BufferFillUp();
 
   // Runs and verifies the behavior in a stable startup procedure.
   void RunStableStartup();
@@ -44,7 +44,7 @@
 
   void* handle_;
   Aec* self_;
-  int samples_per_frame_;
+  size_t samples_per_frame_;
   // Dummy input/output speech data.
   static const int kSamplesPerChunk = 160;
   float far_[kSamplesPerChunk];
@@ -102,7 +102,7 @@
   EXPECT_EQ(0, WebRtcAec_system_delay(self_->aec));
 
   // One frame equals 10 ms of data.
-  samples_per_frame_ = sample_rate_hz / 100;
+  samples_per_frame_ = static_cast<size_t>(sample_rate_hz / 100);
 }
 
 void SystemDelayTest::RenderAndCapture(int device_buffer_ms) {
@@ -117,15 +117,16 @@
                               0));
 }
 
-int SystemDelayTest::BufferFillUp() {
+size_t SystemDelayTest::BufferFillUp() {
   // To make sure we have a full buffer when we verify stability we first fill
   // up the far-end buffer with the same amount as we will report in through
   // Process().
-  int buffer_size = 0;
+  size_t buffer_size = 0;
   for (int i = 0; i < kDeviceBufMs / 10; i++) {
     EXPECT_EQ(0, WebRtcAec_BufferFarend(handle_, far_, samples_per_frame_));
     buffer_size += samples_per_frame_;
-    EXPECT_EQ(buffer_size, WebRtcAec_system_delay(self_->aec));
+    EXPECT_EQ(static_cast<int>(buffer_size),
+              WebRtcAec_system_delay(self_->aec));
   }
   return buffer_size;
 }
@@ -134,7 +135,7 @@
   // To make sure we have a full buffer when we verify stability we first fill
   // up the far-end buffer with the same amount as we will report in through
   // Process().
-  int buffer_size = BufferFillUp();
+  size_t buffer_size = BufferFillUp();
 
   if (WebRtcAec_delay_agnostic_enabled(self_->aec) == 1) {
     // In extended_filter mode we set the buffer size after the first processed
@@ -159,14 +160,16 @@
     EXPECT_GT(kStableConvergenceMs, process_time_ms);
   }
   // Verify that the buffer has been flushed.
-  EXPECT_GE(buffer_size, WebRtcAec_system_delay(self_->aec));
+  EXPECT_GE(static_cast<int>(buffer_size),
+            WebRtcAec_system_delay(self_->aec));
 }
 
   int SystemDelayTest::MapBufferSizeToSamples(int size_in_ms,
                                               bool extended_filter) {
   // If extended_filter is disabled we add an extra 10 ms for the unprocessed
   // frame. That is simply how the algorithm is constructed.
-  return (size_in_ms + (extended_filter ? 0 : 10)) * samples_per_frame_ / 10;
+  return static_cast<int>(
+      (size_in_ms + (extended_filter ? 0 : 10)) * samples_per_frame_ / 10);
 }
 
 // The tests should meet basic requirements and not be adjusted to what is
@@ -207,7 +210,8 @@
         for (int j = 1; j <= 5; j++) {
           EXPECT_EQ(0,
                     WebRtcAec_BufferFarend(handle_, far_, samples_per_frame_));
-          EXPECT_EQ(j * samples_per_frame_, WebRtcAec_system_delay(self_->aec));
+          EXPECT_EQ(static_cast<int>(j * samples_per_frame_),
+                    WebRtcAec_system_delay(self_->aec));
         }
       }
     }
@@ -236,7 +240,8 @@
         // the average.
         // In extended_filter mode we target 50% and measure after one processed
         // 10 ms chunk.
-        int average_reported_delay = kDeviceBufMs * samples_per_frame_ / 10;
+        int average_reported_delay =
+            static_cast<int>(kDeviceBufMs * samples_per_frame_ / 10);
         EXPECT_GE(average_reported_delay, WebRtcAec_system_delay(self_->aec));
         int lower_bound = WebRtcAec_extended_filter_enabled(self_->aec)
                               ? average_reported_delay / 2 - samples_per_frame_
@@ -267,7 +272,7 @@
     // To make sure we have a full buffer when we verify stability we first fill
     // up the far-end buffer with the same amount as we will report in on the
     // average through Process().
-    int buffer_size = BufferFillUp();
+    size_t buffer_size = BufferFillUp();
 
     int buffer_offset_ms = 25;
     int reported_delay_ms = 0;
@@ -285,14 +290,16 @@
     // Verify convergence time.
     EXPECT_GE(kMaxConvergenceMs, process_time_ms);
     // Verify that the buffer has been flushed.
-    EXPECT_GE(buffer_size, WebRtcAec_system_delay(self_->aec));
+    EXPECT_GE(static_cast<int>(buffer_size),
+              WebRtcAec_system_delay(self_->aec));
 
     // Verify system delay with respect to requirements, i.e., the
     // |system_delay| is in the interval [60%, 100%] of what's last reported.
-    EXPECT_GE(reported_delay_ms * samples_per_frame_ / 10,
+    EXPECT_GE(static_cast<int>(reported_delay_ms * samples_per_frame_ / 10),
               WebRtcAec_system_delay(self_->aec));
-    EXPECT_LE(reported_delay_ms * samples_per_frame_ / 10 * 3 / 5,
-              WebRtcAec_system_delay(self_->aec));
+    EXPECT_LE(
+        static_cast<int>(reported_delay_ms * samples_per_frame_ / 10 * 3 / 5),
+        WebRtcAec_system_delay(self_->aec));
   }
 }
 
@@ -331,8 +338,8 @@
 
     // We now have established the required buffer size. Let us verify that we
     // fill up before leaving the startup phase for normal processing.
-    int buffer_size = 0;
-    int target_buffer_size = kDeviceBufMs * samples_per_frame_ / 10 * 3 / 4;
+    size_t buffer_size = 0;
+    size_t target_buffer_size = kDeviceBufMs * samples_per_frame_ / 10 * 3 / 4;
     process_time_ms = 0;
     for (; process_time_ms <= kMaxConvergenceMs; process_time_ms += 10) {
       RenderAndCapture(kDeviceBufMs);
@@ -345,7 +352,8 @@
     // Verify convergence time.
     EXPECT_GT(kMaxConvergenceMs, process_time_ms);
     // Verify that the buffer has reached the desired size.
-    EXPECT_LE(target_buffer_size, WebRtcAec_system_delay(self_->aec));
+    EXPECT_LE(static_cast<int>(target_buffer_size),
+              WebRtcAec_system_delay(self_->aec));
 
     // Verify normal behavior (system delay is kept constant) after startup by
     // running a couple of calls to BufferFarend() and Process().
diff --git a/webrtc/modules/audio_processing/aecm/echo_control_mobile.c b/webrtc/modules/audio_processing/aecm/echo_control_mobile.c
index 5f3fa2a..83781e9 100644
--- a/webrtc/modules/audio_processing/aecm/echo_control_mobile.c
+++ b/webrtc/modules/audio_processing/aecm/echo_control_mobile.c
@@ -199,7 +199,7 @@
 }
 
 int32_t WebRtcAecm_BufferFarend(void *aecmInst, const int16_t *farend,
-                                int16_t nrOfSamples)
+                                size_t nrOfSamples)
 {
   AecMobile* aecm = aecmInst;
     int32_t retVal = 0;
@@ -233,21 +233,21 @@
         WebRtcAecm_DelayComp(aecm);
     }
 
-    WebRtc_WriteBuffer(aecm->farendBuf, farend, (size_t) nrOfSamples);
+    WebRtc_WriteBuffer(aecm->farendBuf, farend, nrOfSamples);
 
     return retVal;
 }
 
 int32_t WebRtcAecm_Process(void *aecmInst, const int16_t *nearendNoisy,
                            const int16_t *nearendClean, int16_t *out,
-                           int16_t nrOfSamples, int16_t msInSndCardBuf)
+                           size_t nrOfSamples, int16_t msInSndCardBuf)
 {
   AecMobile* aecm = aecmInst;
     int32_t retVal = 0;
-    short i;
+    size_t i;
     short nmbrOfFilledBuffers;
-    short nBlocks10ms;
-    short nFrames;
+    size_t nBlocks10ms;
+    size_t nFrames;
 #ifdef AEC_DEBUG
     short msInAECBuf;
 #endif
diff --git a/webrtc/modules/audio_processing/aecm/include/echo_control_mobile.h b/webrtc/modules/audio_processing/aecm/include/echo_control_mobile.h
index 22e0fe6..7ae15c2 100644
--- a/webrtc/modules/audio_processing/aecm/include/echo_control_mobile.h
+++ b/webrtc/modules/audio_processing/aecm/include/echo_control_mobile.h
@@ -87,7 +87,7 @@
  */
 int32_t WebRtcAecm_BufferFarend(void* aecmInst,
                                 const int16_t* farend,
-                                int16_t nrOfSamples);
+                                size_t nrOfSamples);
 
 /*
  * Runs the AECM on an 80 or 160 sample blocks of data.
@@ -118,7 +118,7 @@
                            const int16_t* nearendNoisy,
                            const int16_t* nearendClean,
                            int16_t* out,
-                           int16_t nrOfSamples,
+                           size_t nrOfSamples,
                            int16_t msInSndCardBuf);
 
 /*
diff --git a/webrtc/modules/audio_processing/agc/agc.cc b/webrtc/modules/audio_processing/agc/agc.cc
index 80c3e1f..9786d7b 100644
--- a/webrtc/modules/audio_processing/agc/agc.cc
+++ b/webrtc/modules/audio_processing/agc/agc.cc
@@ -39,17 +39,17 @@
 
 Agc::~Agc() {}
 
-float Agc::AnalyzePreproc(const int16_t* audio, int length) {
+float Agc::AnalyzePreproc(const int16_t* audio, size_t length) {
   assert(length > 0);
-  int num_clipped = 0;
-  for (int i = 0; i < length; ++i) {
+  size_t num_clipped = 0;
+  for (size_t i = 0; i < length; ++i) {
     if (audio[i] == 32767 || audio[i] == -32768)
       ++num_clipped;
   }
   return 1.0f * num_clipped / length;
 }
 
-int Agc::Process(const int16_t* audio, int length, int sample_rate_hz) {
+int Agc::Process(const int16_t* audio, size_t length, int sample_rate_hz) {
   vad_.ProcessChunk(audio, length, sample_rate_hz);
   const std::vector<double>& rms = vad_.chunkwise_rms();
   const std::vector<double>& probabilities =
diff --git a/webrtc/modules/audio_processing/agc/agc.h b/webrtc/modules/audio_processing/agc/agc.h
index dd4605e..08c287f 100644
--- a/webrtc/modules/audio_processing/agc/agc.h
+++ b/webrtc/modules/audio_processing/agc/agc.h
@@ -27,10 +27,10 @@
 
   // Returns the proportion of samples in the buffer which are at full-scale
   // (and presumably clipped).
-  virtual float AnalyzePreproc(const int16_t* audio, int length);
+  virtual float AnalyzePreproc(const int16_t* audio, size_t length);
   // |audio| must be mono; in a multi-channel stream, provide the first (usually
   // left) channel.
-  virtual int Process(const int16_t* audio, int length, int sample_rate_hz);
+  virtual int Process(const int16_t* audio, size_t length, int sample_rate_hz);
 
   // Retrieves the difference between the target RMS level and the current
   // signal RMS level in dB. Returns true if an update is available and false
diff --git a/webrtc/modules/audio_processing/agc/agc_manager_direct.cc b/webrtc/modules/audio_processing/agc/agc_manager_direct.cc
index 74f5540..48ce2f8 100644
--- a/webrtc/modules/audio_processing/agc/agc_manager_direct.cc
+++ b/webrtc/modules/audio_processing/agc/agc_manager_direct.cc
@@ -95,7 +95,7 @@
   ~DebugFile() {
     fclose(file_);
   }
-  void Write(const int16_t* data, int length_samples) {
+  void Write(const int16_t* data, size_t length_samples) {
     fwrite(data, 1, length_samples * sizeof(int16_t), file_);
   }
  private:
@@ -106,7 +106,7 @@
   }
   ~DebugFile() {
   }
-  void Write(const int16_t* data, int length_samples) {
+  void Write(const int16_t* data, size_t length_samples) {
   }
 #endif  // WEBRTC_AGC_DEBUG_DUMP
 };
@@ -188,8 +188,8 @@
 
 void AgcManagerDirect::AnalyzePreProcess(int16_t* audio,
                                          int num_channels,
-                                         int samples_per_channel) {
-  int length = num_channels * samples_per_channel;
+                                         size_t samples_per_channel) {
+  size_t length = num_channels * samples_per_channel;
   if (capture_muted_) {
     return;
   }
@@ -230,7 +230,7 @@
 }
 
 void AgcManagerDirect::Process(const int16_t* audio,
-                               int length,
+                               size_t length,
                                int sample_rate_hz) {
   if (capture_muted_) {
     return;
diff --git a/webrtc/modules/audio_processing/agc/agc_manager_direct.h b/webrtc/modules/audio_processing/agc/agc_manager_direct.h
index d12acf3..fae1248 100644
--- a/webrtc/modules/audio_processing/agc/agc_manager_direct.h
+++ b/webrtc/modules/audio_processing/agc/agc_manager_direct.h
@@ -57,8 +57,8 @@
   int Initialize();
   void AnalyzePreProcess(int16_t* audio,
                          int num_channels,
-                         int samples_per_channel);
-  void Process(const int16_t* audio, int length, int sample_rate_hz);
+                         size_t samples_per_channel);
+  void Process(const int16_t* audio, size_t length, int sample_rate_hz);
 
   // Sets a new microphone level, after first checking that it hasn't been
   // updated by the user, in which case no action is taken.
diff --git a/webrtc/modules/audio_processing/agc/legacy/analog_agc.c b/webrtc/modules/audio_processing/agc/legacy/analog_agc.c
index 73adb5d..be644d9 100644
--- a/webrtc/modules/audio_processing/agc/legacy/analog_agc.c
+++ b/webrtc/modules/audio_processing/agc/legacy/analog_agc.c
@@ -41,7 +41,7 @@
 
 static const int16_t kMuteGuardTimeMs = 8000;
 static const int16_t kInitCheck = 42;
-static const int16_t kNumSubframes = 10;
+static const size_t kNumSubframes = 10;
 
 /* Default settings if config is not used */
 #define AGC_DEFAULT_TARGET_LEVEL 3
@@ -112,13 +112,14 @@
         6726, 5343, 4244, 3371, 2678, 2127, 1690, 1342, 1066, 847, 673, 534, 424, 337, 268,
         213, 169, 134, 107, 85, 67};
 
-int WebRtcAgc_AddMic(void *state, int16_t* const* in_mic, int16_t num_bands,
-                     int16_t samples)
+int WebRtcAgc_AddMic(void *state, int16_t* const* in_mic, size_t num_bands,
+                     size_t samples)
 {
     int32_t nrg, max_nrg, sample, tmp32;
     int32_t *ptr;
     uint16_t targetGainIdx, gain;
-    int16_t i, n, L, tmp16, tmp_speech[16];
+    size_t i;
+    int16_t n, L, tmp16, tmp_speech[16];
     LegacyAgc* stt;
     stt = (LegacyAgc*)state;
 
@@ -164,7 +165,7 @@
 
         for (i = 0; i < samples; i++)
         {
-            int j;
+            size_t j;
             for (j = 0; j < num_bands; ++j)
             {
                 sample = (in_mic[j][i] * gain) >> 12;
@@ -249,7 +250,7 @@
     return 0;
 }
 
-int WebRtcAgc_AddFarend(void *state, const int16_t *in_far, int16_t samples)
+int WebRtcAgc_AddFarend(void *state, const int16_t *in_far, size_t samples)
 {
   LegacyAgc* stt;
   stt = (LegacyAgc*)state;
@@ -280,16 +281,16 @@
 }
 
 int WebRtcAgc_VirtualMic(void *agcInst, int16_t* const* in_near,
-                         int16_t num_bands, int16_t samples, int32_t micLevelIn,
+                         size_t num_bands, size_t samples, int32_t micLevelIn,
                          int32_t *micLevelOut)
 {
     int32_t tmpFlt, micLevelTmp, gainIdx;
     uint16_t gain;
-    int16_t ii, j;
+    size_t ii, j;
     LegacyAgc* stt;
 
     uint32_t nrg;
-    int16_t sampleCntr;
+    size_t sampleCntr;
     uint32_t frameNrg = 0;
     uint32_t frameNrgLimit = 5500;
     int16_t numZeroCrossing = 0;
@@ -1132,7 +1133,7 @@
 }
 
 int WebRtcAgc_Process(void *agcInst, const int16_t* const* in_near,
-                      int16_t num_bands, int16_t samples,
+                      size_t num_bands, size_t samples,
                       int16_t* const* out, int32_t inMicLevel,
                       int32_t *outMicLevel, int16_t echo,
                       uint8_t *saturationWarning)
diff --git a/webrtc/modules/audio_processing/agc/legacy/digital_agc.c b/webrtc/modules/audio_processing/agc/legacy/digital_agc.c
index 4619b88..aeafb65 100644
--- a/webrtc/modules/audio_processing/agc/legacy/digital_agc.c
+++ b/webrtc/modules/audio_processing/agc/legacy/digital_agc.c
@@ -283,7 +283,7 @@
 
 int32_t WebRtcAgc_AddFarendToDigital(DigitalAgc* stt,
                                      const int16_t* in_far,
-                                     int16_t nrSamples) {
+                                     size_t nrSamples) {
     assert(stt != NULL);
     // VAD for far end
     WebRtcAgc_ProcessVad(&stt->vadFarend, in_far, nrSamples);
@@ -293,7 +293,7 @@
 
 int32_t WebRtcAgc_ProcessDigital(DigitalAgc* stt,
                                  const int16_t* const* in_near,
-                                 int16_t num_bands,
+                                 size_t num_bands,
                                  int16_t* const* out,
                                  uint32_t FS,
                                  int16_t lowlevelSignal) {
@@ -310,8 +310,9 @@
     int16_t zeros = 0, zeros_fast, frac = 0;
     int16_t decay;
     int16_t gate, gain_adj;
-    int16_t k, n, i;
-    int16_t L, L2; // samples/subframe
+    int16_t k;
+    size_t n, i, L;
+    int16_t L2; // samples/subframe
 
     // determine number of samples per ms
     if (FS == 8000)
@@ -632,7 +633,7 @@
 
 int16_t WebRtcAgc_ProcessVad(AgcVad* state,      // (i) VAD state
                              const int16_t* in,  // (i) Speech signal
-                             int16_t nrSamples)  // (i) number of samples
+                             size_t nrSamples)  // (i) number of samples
 {
     int32_t out, nrg, tmp32, tmp32b;
     uint16_t tmpU16;
diff --git a/webrtc/modules/audio_processing/agc/legacy/digital_agc.h b/webrtc/modules/audio_processing/agc/legacy/digital_agc.h
index b8314d9..819844d 100644
--- a/webrtc/modules/audio_processing/agc/legacy/digital_agc.h
+++ b/webrtc/modules/audio_processing/agc/legacy/digital_agc.h
@@ -56,20 +56,20 @@
 
 int32_t WebRtcAgc_ProcessDigital(DigitalAgc* digitalAgcInst,
                                  const int16_t* const* inNear,
-                                 int16_t num_bands,
+                                 size_t num_bands,
                                  int16_t* const* out,
                                  uint32_t FS,
                                  int16_t lowLevelSignal);
 
 int32_t WebRtcAgc_AddFarendToDigital(DigitalAgc* digitalAgcInst,
                                      const int16_t* inFar,
-                                     int16_t nrSamples);
+                                     size_t nrSamples);
 
 void WebRtcAgc_InitVad(AgcVad* vadInst);
 
 int16_t WebRtcAgc_ProcessVad(AgcVad* vadInst,     // (i) VAD state
                              const int16_t* in,   // (i) Speech signal
-                             int16_t nrSamples);  // (i) number of samples
+                             size_t nrSamples);  // (i) number of samples
 
 int32_t WebRtcAgc_CalculateGainTable(int32_t *gainTable, // Q16
                                      int16_t compressionGaindB, // Q0 (in dB)
diff --git a/webrtc/modules/audio_processing/agc/legacy/gain_control.h b/webrtc/modules/audio_processing/agc/legacy/gain_control.h
index 0ccba76..08c1988 100644
--- a/webrtc/modules/audio_processing/agc/legacy/gain_control.h
+++ b/webrtc/modules/audio_processing/agc/legacy/gain_control.h
@@ -66,7 +66,7 @@
  */
 int WebRtcAgc_AddFarend(void* agcInst,
                         const int16_t* inFar,
-                        int16_t samples);
+                        size_t samples);
 
 /*
  * This function processes a 10 ms frame of microphone speech to determine
@@ -90,8 +90,8 @@
  */
 int WebRtcAgc_AddMic(void* agcInst,
                      int16_t* const* inMic,
-                     int16_t num_bands,
-                     int16_t samples);
+                     size_t num_bands,
+                     size_t samples);
 
 /*
  * This function replaces the analog microphone with a virtual one.
@@ -118,8 +118,8 @@
  */
 int WebRtcAgc_VirtualMic(void* agcInst,
                          int16_t* const* inMic,
-                         int16_t num_bands,
-                         int16_t samples,
+                         size_t num_bands,
+                         size_t samples,
                          int32_t micLevelIn,
                          int32_t* micLevelOut);
 
@@ -159,8 +159,8 @@
  */
 int WebRtcAgc_Process(void* agcInst,
                       const int16_t* const* inNear,
-                      int16_t num_bands,
-                      int16_t samples,
+                      size_t num_bands,
+                      size_t samples,
                       int16_t* const* out,
                       int32_t inMicLevel,
                       int32_t* outMicLevel,
diff --git a/webrtc/modules/audio_processing/agc/mock_agc.h b/webrtc/modules/audio_processing/agc/mock_agc.h
index 1c36a05..13dbd2e 100644
--- a/webrtc/modules/audio_processing/agc/mock_agc.h
+++ b/webrtc/modules/audio_processing/agc/mock_agc.h
@@ -20,8 +20,8 @@
 
 class MockAgc : public Agc {
  public:
-  MOCK_METHOD2(AnalyzePreproc, float(const int16_t* audio, int length));
-  MOCK_METHOD3(Process, int(const int16_t* audio, int length,
+  MOCK_METHOD2(AnalyzePreproc, float(const int16_t* audio, size_t length));
+  MOCK_METHOD3(Process, int(const int16_t* audio, size_t length,
                             int sample_rate_hz));
   MOCK_METHOD1(GetRmsErrorDb, bool(int* error));
   MOCK_METHOD0(Reset, void());
diff --git a/webrtc/modules/audio_processing/audio_buffer.cc b/webrtc/modules/audio_processing/audio_buffer.cc
index 6f73262..81790a1 100644
--- a/webrtc/modules/audio_processing/audio_buffer.cc
+++ b/webrtc/modules/audio_processing/audio_buffer.cc
@@ -19,9 +19,9 @@
 namespace webrtc {
 namespace {
 
-const int kSamplesPer16kHzChannel = 160;
-const int kSamplesPer32kHzChannel = 320;
-const int kSamplesPer48kHzChannel = 480;
+const size_t kSamplesPer16kHzChannel = 160;
+const size_t kSamplesPer32kHzChannel = 320;
+const size_t kSamplesPer48kHzChannel = 480;
 
 int KeyboardChannelIndex(const StreamConfig& stream_config) {
   if (!stream_config.has_keyboard()) {
@@ -32,23 +32,22 @@
   return stream_config.num_channels();
 }
 
-int NumBandsFromSamplesPerChannel(int num_frames) {
-  int num_bands = 1;
+size_t NumBandsFromSamplesPerChannel(size_t num_frames) {
+  size_t num_bands = 1;
   if (num_frames == kSamplesPer32kHzChannel ||
       num_frames == kSamplesPer48kHzChannel) {
-    num_bands = rtc::CheckedDivExact(num_frames,
-                                     static_cast<int>(kSamplesPer16kHzChannel));
+    num_bands = rtc::CheckedDivExact(num_frames, kSamplesPer16kHzChannel);
   }
   return num_bands;
 }
 
 }  // namespace
 
-AudioBuffer::AudioBuffer(int input_num_frames,
+AudioBuffer::AudioBuffer(size_t input_num_frames,
                          int num_input_channels,
-                         int process_num_frames,
+                         size_t process_num_frames,
                          int num_process_channels,
-                         int output_num_frames)
+                         size_t output_num_frames)
   : input_num_frames_(input_num_frames),
     num_input_channels_(num_input_channels),
     proc_num_frames_(process_num_frames),
@@ -345,20 +344,20 @@
   num_channels_ = num_channels;
 }
 
-int AudioBuffer::num_frames() const {
+size_t AudioBuffer::num_frames() const {
   return proc_num_frames_;
 }
 
-int AudioBuffer::num_frames_per_band() const {
+size_t AudioBuffer::num_frames_per_band() const {
   return num_split_frames_;
 }
 
-int AudioBuffer::num_keyboard_frames() const {
+size_t AudioBuffer::num_keyboard_frames() const {
   // We don't resample the keyboard channel.
   return input_num_frames_;
 }
 
-int AudioBuffer::num_bands() const {
+size_t AudioBuffer::num_bands() const {
   return num_bands_;
 }
 
diff --git a/webrtc/modules/audio_processing/audio_buffer.h b/webrtc/modules/audio_processing/audio_buffer.h
index aeb303b..f82ab61 100644
--- a/webrtc/modules/audio_processing/audio_buffer.h
+++ b/webrtc/modules/audio_processing/audio_buffer.h
@@ -33,19 +33,19 @@
 class AudioBuffer {
  public:
   // TODO(ajm): Switch to take ChannelLayouts.
-  AudioBuffer(int input_num_frames,
+  AudioBuffer(size_t input_num_frames,
               int num_input_channels,
-              int process_num_frames,
+              size_t process_num_frames,
               int num_process_channels,
-              int output_num_frames);
+              size_t output_num_frames);
   virtual ~AudioBuffer();
 
   int num_channels() const;
   void set_num_channels(int num_channels);
-  int num_frames() const;
-  int num_frames_per_band() const;
-  int num_keyboard_frames() const;
-  int num_bands() const;
+  size_t num_frames() const;
+  size_t num_frames_per_band() const;
+  size_t num_keyboard_frames() const;
+  size_t num_bands() const;
 
   // Returns a pointer array to the full-band channels.
   // Usage:
@@ -127,20 +127,20 @@
 
   // The audio is passed into DeinterleaveFrom() or CopyFrom() with input
   // format (samples per channel and number of channels).
-  const int input_num_frames_;
+  const size_t input_num_frames_;
   const int num_input_channels_;
   // The audio is stored by DeinterleaveFrom() or CopyFrom() with processing
   // format.
-  const int proc_num_frames_;
+  const size_t proc_num_frames_;
   const int num_proc_channels_;
   // The audio is returned by InterleaveTo() and CopyTo() with output samples
   // per channels and the current number of channels. This last one can be
   // changed at any time using set_num_channels().
-  const int output_num_frames_;
+  const size_t output_num_frames_;
   int num_channels_;
 
-  int num_bands_;
-  int num_split_frames_;
+  size_t num_bands_;
+  size_t num_split_frames_;
   bool mixed_low_pass_valid_;
   bool reference_copied_;
   AudioFrame::VADActivity activity_;
diff --git a/webrtc/modules/audio_processing/audio_processing_impl.cc b/webrtc/modules/audio_processing/audio_processing_impl.cc
index c9e4ddc..ff4128b 100644
--- a/webrtc/modules/audio_processing/audio_processing_impl.cc
+++ b/webrtc/modules/audio_processing/audio_processing_impl.cc
@@ -510,7 +510,7 @@
 }
 
 int AudioProcessingImpl::ProcessStream(const float* const* src,
-                                       int samples_per_channel,
+                                       size_t samples_per_channel,
                                        int input_sample_rate_hz,
                                        ChannelLayout input_layout,
                                        int output_sample_rate_hz,
@@ -716,7 +716,7 @@
 }
 
 int AudioProcessingImpl::AnalyzeReverseStream(const float* const* data,
-                                              int samples_per_channel,
+                                              size_t samples_per_channel,
                                               int rev_sample_rate_hz,
                                               ChannelLayout layout) {
   const StreamConfig reverse_config = {
diff --git a/webrtc/modules/audio_processing/audio_processing_impl.h b/webrtc/modules/audio_processing/audio_processing_impl.h
index a08f7b3..15c6f75 100644
--- a/webrtc/modules/audio_processing/audio_processing_impl.h
+++ b/webrtc/modules/audio_processing/audio_processing_impl.h
@@ -80,7 +80,7 @@
   bool output_will_be_muted() const override;
   int ProcessStream(AudioFrame* frame) override;
   int ProcessStream(const float* const* src,
-                    int samples_per_channel,
+                    size_t samples_per_channel,
                     int input_sample_rate_hz,
                     ChannelLayout input_layout,
                     int output_sample_rate_hz,
@@ -93,7 +93,7 @@
   int AnalyzeReverseStream(AudioFrame* frame) override;
   int ProcessReverseStream(AudioFrame* frame) override;
   int AnalyzeReverseStream(const float* const* data,
-                           int samples_per_channel,
+                           size_t samples_per_channel,
                            int sample_rate_hz,
                            ChannelLayout layout) override;
   int ProcessReverseStream(const float* const* src,
diff --git a/webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.cc b/webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.cc
index c70bf5e..ed81247 100644
--- a/webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.cc
+++ b/webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.cc
@@ -51,9 +51,9 @@
 void CovarianceMatrixGenerator::AngledCovarianceMatrix(
     float sound_speed,
     float angle,
-    int frequency_bin,
-    int fft_size,
-    int num_freq_bins,
+    size_t frequency_bin,
+    size_t fft_size,
+    size_t num_freq_bins,
     int sample_rate,
     const std::vector<Point>& geometry,
     ComplexMatrix<float>* mat) {
@@ -75,8 +75,8 @@
 }
 
 void CovarianceMatrixGenerator::PhaseAlignmentMasks(
-    int frequency_bin,
-    int fft_size,
+    size_t frequency_bin,
+    size_t fft_size,
     int sample_rate,
     float sound_speed,
     const std::vector<Point>& geometry,
diff --git a/webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.h b/webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.h
index 5979462..5375518 100644
--- a/webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.h
+++ b/webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.h
@@ -30,9 +30,9 @@
   // The covariance matrix of a source at the given angle.
   static void AngledCovarianceMatrix(float sound_speed,
                                      float angle,
-                                     int frequency_bin,
-                                     int fft_size,
-                                     int num_freq_bins,
+                                     size_t frequency_bin,
+                                     size_t fft_size,
+                                     size_t num_freq_bins,
                                      int sample_rate,
                                      const std::vector<Point>& geometry,
                                      ComplexMatrix<float>* mat);
@@ -40,8 +40,8 @@
   // Calculates phase shifts that, when applied to a multichannel signal and
   // added together, cause constructive interferernce for sources located at
   // the given angle.
-  static void PhaseAlignmentMasks(int frequency_bin,
-                                  int fft_size,
+  static void PhaseAlignmentMasks(size_t frequency_bin,
+                                  size_t fft_size,
                                   int sample_rate,
                                   float sound_speed,
                                   const std::vector<Point>& geometry,
diff --git a/webrtc/modules/audio_processing/beamformer/matrix.h b/webrtc/modules/audio_processing/beamformer/matrix.h
index 990f6a4..1a961af 100644
--- a/webrtc/modules/audio_processing/beamformer/matrix.h
+++ b/webrtc/modules/audio_processing/beamformer/matrix.h
@@ -95,7 +95,9 @@
     memcpy(&data_[0], data, num_rows_ * num_columns_ * sizeof(data_[0]));
   }
 
-  Matrix& CopyFromColumn(const T* const* src, int column_index, int num_rows) {
+  Matrix& CopyFromColumn(const T* const* src,
+                         size_t column_index,
+                         int num_rows) {
     Resize(1, num_rows);
     for (int i = 0; i < num_columns_; ++i) {
       data_[i] = src[i][column_index];
diff --git a/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.cc b/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.cc
index 6925b61..f7e80b5 100644
--- a/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.cc
+++ b/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.cc
@@ -119,8 +119,8 @@
 }
 
 // Works for positive numbers only.
-int Round(float x) {
-  return static_cast<int>(std::floor(x + 0.5f));
+size_t Round(float x) {
+  return static_cast<size_t>(std::floor(x + 0.5f));
 }
 
 // Calculates the sum of absolute values of a complex matrix.
@@ -179,6 +179,9 @@
 
 }  // namespace
 
+// static
+const size_t NonlinearBeamformer::kNumFreqBins;
+
 NonlinearBeamformer::NonlinearBeamformer(
     const std::vector<Point>& array_geometry)
   : num_input_channels_(array_geometry.size()),
@@ -187,7 +190,8 @@
 }
 
 void NonlinearBeamformer::Initialize(int chunk_size_ms, int sample_rate_hz) {
-  chunk_length_ = sample_rate_hz / (1000.f / chunk_size_ms);
+  chunk_length_ =
+      static_cast<size_t>(sample_rate_hz / (1000.f / chunk_size_ms));
   sample_rate_hz_ = sample_rate_hz;
   low_mean_start_bin_ = Round(kLowMeanStartHz * kFftSize / sample_rate_hz_);
   low_mean_end_bin_ = Round(kLowMeanEndHz * kFftSize / sample_rate_hz_);
@@ -203,7 +207,7 @@
   //   constant               ^                        ^
   //             low_mean_end_bin_       high_mean_end_bin_
   //
-  DCHECK_GT(low_mean_start_bin_, 0);
+  DCHECK_GT(low_mean_start_bin_, 0U);
   DCHECK_LT(low_mean_start_bin_, low_mean_end_bin_);
   DCHECK_LT(low_mean_end_bin_, high_mean_end_bin_);
   DCHECK_LT(high_mean_start_bin_, high_mean_end_bin_);
@@ -222,7 +226,7 @@
                                               kFftSize,
                                               kFftSize / 2,
                                               this));
-  for (int i = 0; i < kNumFreqBins; ++i) {
+  for (size_t i = 0; i < kNumFreqBins; ++i) {
     time_smooth_mask_[i] = 1.f;
     final_mask_[i] = 1.f;
     float freq_hz = (static_cast<float>(i) / kFftSize) * sample_rate_hz_;
@@ -237,7 +241,7 @@
   InitTargetCovMats();
   InitInterfCovMats();
 
-  for (int i = 0; i < kNumFreqBins; ++i) {
+  for (size_t i = 0; i < kNumFreqBins; ++i) {
     rxiws_[i] = Norm(target_cov_mats_[i], delay_sum_masks_[i]);
     rpsiws_[i] = Norm(interf_cov_mats_[i], delay_sum_masks_[i]);
     reflected_rpsiws_[i] =
@@ -246,7 +250,7 @@
 }
 
 void NonlinearBeamformer::InitDelaySumMasks() {
-  for (int f_ix = 0; f_ix < kNumFreqBins; ++f_ix) {
+  for (size_t f_ix = 0; f_ix < kNumFreqBins; ++f_ix) {
     delay_sum_masks_[f_ix].Resize(1, num_input_channels_);
     CovarianceMatrixGenerator::PhaseAlignmentMasks(f_ix,
                                                    kFftSize,
@@ -266,7 +270,7 @@
 }
 
 void NonlinearBeamformer::InitTargetCovMats() {
-  for (int i = 0; i < kNumFreqBins; ++i) {
+  for (size_t i = 0; i < kNumFreqBins; ++i) {
     target_cov_mats_[i].Resize(num_input_channels_, num_input_channels_);
     TransposedConjugatedProduct(delay_sum_masks_[i], &target_cov_mats_[i]);
     complex_f normalization_factor = target_cov_mats_[i].Trace();
@@ -275,7 +279,7 @@
 }
 
 void NonlinearBeamformer::InitInterfCovMats() {
-  for (int i = 0; i < kNumFreqBins; ++i) {
+  for (size_t i = 0; i < kNumFreqBins; ++i) {
     interf_cov_mats_[i].Resize(num_input_channels_, num_input_channels_);
     ComplexMatrixF uniform_cov_mat(num_input_channels_, num_input_channels_);
     ComplexMatrixF angled_cov_mat(num_input_channels_, num_input_channels_);
@@ -320,9 +324,9 @@
       input.num_frames_per_band();
   // Apply delay and sum and post-filter in the time domain. WARNING: only works
   // because delay-and-sum is not frequency dependent.
-  for (int i = 1; i < input.num_bands(); ++i) {
+  for (size_t i = 1; i < input.num_bands(); ++i) {
     float smoothed_mask = old_high_pass_mask;
-    for (int j = 0; j < input.num_frames_per_band(); ++j) {
+    for (size_t j = 0; j < input.num_frames_per_band(); ++j) {
       smoothed_mask += ramp_increment;
 
       // Applying the delay and sum (at zero degrees, this is equivalent to
@@ -345,7 +349,7 @@
 
 void NonlinearBeamformer::ProcessAudioBlock(const complex_f* const* input,
                                             int num_input_channels,
-                                            int num_freq_bins,
+                                            size_t num_freq_bins,
                                             int num_output_channels,
                                             complex_f* const* output) {
   CHECK_EQ(num_freq_bins, kNumFreqBins);
@@ -355,7 +359,7 @@
   // Calculating the post-filter masks. Note that we need two for each
   // frequency bin to account for the positive and negative interferer
   // angle.
-  for (int i = low_mean_start_bin_; i <= high_mean_end_bin_; ++i) {
+  for (size_t i = low_mean_start_bin_; i <= high_mean_end_bin_; ++i) {
     eig_m_.CopyFromColumn(input, i, num_input_channels_);
     float eig_m_norm_factor = std::sqrt(SumSquares(eig_m_));
     if (eig_m_norm_factor != 0.f) {
@@ -420,7 +424,7 @@
 void NonlinearBeamformer::ApplyMasks(const complex_f* const* input,
                                      complex_f* const* output) {
   complex_f* output_channel = output[0];
-  for (int f_ix = 0; f_ix < kNumFreqBins; ++f_ix) {
+  for (size_t f_ix = 0; f_ix < kNumFreqBins; ++f_ix) {
     output_channel[f_ix] = complex_f(0.f, 0.f);
 
     const complex_f* delay_sum_mask_els =
@@ -435,7 +439,7 @@
 
 // Smooth new_mask_ into time_smooth_mask_.
 void NonlinearBeamformer::ApplyMaskTimeSmoothing() {
-  for (int i = low_mean_start_bin_; i <= high_mean_end_bin_; ++i) {
+  for (size_t i = low_mean_start_bin_; i <= high_mean_end_bin_; ++i) {
     time_smooth_mask_[i] = kMaskTimeSmoothAlpha * new_mask_[i] +
                            (1 - kMaskTimeSmoothAlpha) * time_smooth_mask_[i];
   }
@@ -460,11 +464,11 @@
   // |------|------------|------|
   //  ^<------------------^
   std::copy(time_smooth_mask_, time_smooth_mask_ + kNumFreqBins, final_mask_);
-  for (int i = low_mean_start_bin_; i < kNumFreqBins; ++i) {
+  for (size_t i = low_mean_start_bin_; i < kNumFreqBins; ++i) {
     final_mask_[i] = kMaskFrequencySmoothAlpha * final_mask_[i] +
                      (1 - kMaskFrequencySmoothAlpha) * final_mask_[i - 1];
   }
-  for (int i = high_mean_end_bin_ + 1; i > 0; --i) {
+  for (size_t i = high_mean_end_bin_ + 1; i > 0; --i) {
     final_mask_[i - 1] = kMaskFrequencySmoothAlpha * final_mask_[i - 1] +
                          (1 - kMaskFrequencySmoothAlpha) * final_mask_[i];
   }
@@ -488,7 +492,7 @@
 }
 
 // Compute mean over the given range of time_smooth_mask_, [first, last).
-float NonlinearBeamformer::MaskRangeMean(int first, int last) {
+float NonlinearBeamformer::MaskRangeMean(size_t first, size_t last) {
   DCHECK_GT(last, first);
   const float sum = std::accumulate(time_smooth_mask_ + first,
                                     time_smooth_mask_ + last, 0.f);
@@ -496,9 +500,9 @@
 }
 
 void NonlinearBeamformer::EstimateTargetPresence() {
-  const int quantile =
+  const size_t quantile = static_cast<size_t>(
       (high_mean_end_bin_ - low_mean_start_bin_) * kMaskQuantile +
-      low_mean_start_bin_;
+      low_mean_start_bin_);
   std::nth_element(new_mask_ + low_mean_start_bin_, new_mask_ + quantile,
                    new_mask_ + high_mean_end_bin_ + 1);
   if (new_mask_[quantile] > kMaskTargetThreshold) {
diff --git a/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h b/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h
index 219cff0..46c68bf 100644
--- a/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h
+++ b/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h
@@ -60,7 +60,7 @@
   // happens. Implements LappedTransform::Callback.
   void ProcessAudioBlock(const complex<float>* const* input,
                          int num_input_channels,
-                         int num_freq_bins,
+                         size_t num_freq_bins,
                          int num_output_channels,
                          complex<float>* const* output) override;
 
@@ -100,18 +100,18 @@
   void ApplyHighFrequencyCorrection();
 
   // Compute the means needed for the above frequency correction.
-  float MaskRangeMean(int start_bin, int end_bin);
+  float MaskRangeMean(size_t start_bin, size_t end_bin);
 
   // Applies both sets of masks to |input| and store in |output|.
   void ApplyMasks(const complex_f* const* input, complex_f* const* output);
 
   void EstimateTargetPresence();
 
-  static const int kFftSize = 256;
-  static const int kNumFreqBins = kFftSize / 2 + 1;
+  static const size_t kFftSize = 256;
+  static const size_t kNumFreqBins = kFftSize / 2 + 1;
 
   // Deals with the fft transform and blocking.
-  int chunk_length_;
+  size_t chunk_length_;
   rtc::scoped_ptr<LappedTransform> lapped_transform_;
   float window_[kFftSize];
 
@@ -122,10 +122,10 @@
   const std::vector<Point> array_geometry_;
 
   // Calculated based on user-input and constants in the .cc file.
-  int low_mean_start_bin_;
-  int low_mean_end_bin_;
-  int high_mean_start_bin_;
-  int high_mean_end_bin_;
+  size_t low_mean_start_bin_;
+  size_t low_mean_end_bin_;
+  size_t high_mean_start_bin_;
+  size_t high_mean_end_bin_;
 
   // Quickly varying mask updated every block.
   float new_mask_[kNumFreqBins];
@@ -167,9 +167,9 @@
   bool is_target_present_;
   // Number of blocks after which the data is considered interference if the
   // mask does not pass |kMaskSignalThreshold|.
-  int hold_target_blocks_;
+  size_t hold_target_blocks_;
   // Number of blocks since the last mask that passed |kMaskSignalThreshold|.
-  int interference_blocks_count_;
+  size_t interference_blocks_count_;
 };
 
 }  // namespace webrtc
diff --git a/webrtc/modules/audio_processing/echo_cancellation_impl.cc b/webrtc/modules/audio_processing/echo_cancellation_impl.cc
index f13ea8b..86ed923 100644
--- a/webrtc/modules/audio_processing/echo_cancellation_impl.cc
+++ b/webrtc/modules/audio_processing/echo_cancellation_impl.cc
@@ -91,7 +91,7 @@
       err = WebRtcAec_BufferFarend(
           my_handle,
           audio->split_bands_const_f(j)[kBand0To8kHz],
-          static_cast<int16_t>(audio->num_frames_per_band()));
+          audio->num_frames_per_band());
 
       if (err != apm_->kNoError) {
         return GetHandleError(my_handle);  // TODO(ajm): warning possible?
@@ -133,7 +133,7 @@
           audio->split_bands_const_f(i),
           audio->num_bands(),
           audio->split_bands_f(i),
-          static_cast<int16_t>(audio->num_frames_per_band()),
+          audio->num_frames_per_band(),
           apm_->stream_delay_ms(),
           stream_drift_samples_);
 
diff --git a/webrtc/modules/audio_processing/echo_control_mobile_impl.cc b/webrtc/modules/audio_processing/echo_control_mobile_impl.cc
index 33205eb..8d5ec9c 100644
--- a/webrtc/modules/audio_processing/echo_control_mobile_impl.cc
+++ b/webrtc/modules/audio_processing/echo_control_mobile_impl.cc
@@ -96,7 +96,7 @@
       err = WebRtcAecm_BufferFarend(
           my_handle,
           audio->split_bands_const(j)[kBand0To8kHz],
-          static_cast<int16_t>(audio->num_frames_per_band()));
+          audio->num_frames_per_band());
 
       if (err != apm_->kNoError) {
         return GetHandleError(my_handle);  // TODO(ajm): warning possible?
@@ -141,7 +141,7 @@
           noisy,
           clean,
           audio->split_bands(i)[kBand0To8kHz],
-          static_cast<int16_t>(audio->num_frames_per_band()),
+          audio->num_frames_per_band(),
           apm_->stream_delay_ms());
 
       if (err != apm_->kNoError) {
diff --git a/webrtc/modules/audio_processing/gain_control_impl.cc b/webrtc/modules/audio_processing/gain_control_impl.cc
index 398cf5c..8a3612d 100644
--- a/webrtc/modules/audio_processing/gain_control_impl.cc
+++ b/webrtc/modules/audio_processing/gain_control_impl.cc
@@ -64,7 +64,7 @@
     int err = WebRtcAgc_AddFarend(
         my_handle,
         audio->mixed_low_pass_data(),
-        static_cast<int16_t>(audio->num_frames_per_band()));
+        audio->num_frames_per_band());
 
     if (err != apm_->kNoError) {
       return GetHandleError(my_handle);
@@ -92,7 +92,7 @@
           my_handle,
           audio->split_bands(i),
           audio->num_bands(),
-          static_cast<int16_t>(audio->num_frames_per_band()));
+          audio->num_frames_per_band());
 
       if (err != apm_->kNoError) {
         return GetHandleError(my_handle);
@@ -108,7 +108,7 @@
           my_handle,
           audio->split_bands(i),
           audio->num_bands(),
-          static_cast<int16_t>(audio->num_frames_per_band()),
+          audio->num_frames_per_band(),
           analog_capture_level_,
           &capture_level_out);
 
@@ -146,7 +146,7 @@
         my_handle,
         audio->split_bands_const(i),
         audio->num_bands(),
-        static_cast<int16_t>(audio->num_frames_per_band()),
+        audio->num_frames_per_band(),
         audio->split_bands(i),
         capture_levels_[i],
         &capture_level_out,
diff --git a/webrtc/modules/audio_processing/high_pass_filter_impl.cc b/webrtc/modules/audio_processing/high_pass_filter_impl.cc
index 588ba41..6302f13 100644
--- a/webrtc/modules/audio_processing/high_pass_filter_impl.cc
+++ b/webrtc/modules/audio_processing/high_pass_filter_impl.cc
@@ -47,7 +47,7 @@
   return AudioProcessing::kNoError;
 }
 
-int Filter(FilterState* hpf, int16_t* data, int length) {
+int Filter(FilterState* hpf, int16_t* data, size_t length) {
   assert(hpf != NULL);
 
   int32_t tmp_int32 = 0;
@@ -55,7 +55,7 @@
   int16_t* x = hpf->x;
   const int16_t* ba = hpf->ba;
 
-  for (int i = 0; i < length; i++) {
+  for (size_t i = 0; i < length; i++) {
     //  y[i] = b[0] * x[i] + b[1] * x[i-1] + b[2] * x[i-2]
     //         + -a[1] * y[i-1] + -a[2] * y[i-2];
 
diff --git a/webrtc/modules/audio_processing/include/audio_processing.h b/webrtc/modules/audio_processing/include/audio_processing.h
index fd91bfa..445d5c8 100644
--- a/webrtc/modules/audio_processing/include/audio_processing.h
+++ b/webrtc/modules/audio_processing/include/audio_processing.h
@@ -311,7 +311,7 @@
   //
   // TODO(mgraczyk): Remove once clients are updated to use the new interface.
   virtual int ProcessStream(const float* const* src,
-                            int samples_per_channel,
+                            size_t samples_per_channel,
                             int input_sample_rate_hz,
                             ChannelLayout input_layout,
                             int output_sample_rate_hz,
@@ -357,7 +357,7 @@
   // of |data| points to a channel buffer, arranged according to |layout|.
   // TODO(mgraczyk): Remove once clients are updated to use the new interface.
   virtual int AnalyzeReverseStream(const float* const* data,
-                                   int samples_per_channel,
+                                   size_t samples_per_channel,
                                    int rev_sample_rate_hz,
                                    ChannelLayout layout) = 0;
 
@@ -510,8 +510,8 @@
   int num_channels() const { return num_channels_; }
 
   bool has_keyboard() const { return has_keyboard_; }
-  int num_frames() const { return num_frames_; }
-  int num_samples() const { return num_channels_ * num_frames_; }
+  size_t num_frames() const { return num_frames_; }
+  size_t num_samples() const { return num_channels_ * num_frames_; }
 
   bool operator==(const StreamConfig& other) const {
     return sample_rate_hz_ == other.sample_rate_hz_ &&
@@ -522,14 +522,15 @@
   bool operator!=(const StreamConfig& other) const { return !(*this == other); }
 
  private:
-  static int calculate_frames(int sample_rate_hz) {
-    return AudioProcessing::kChunkSizeMs * sample_rate_hz / 1000;
+  static size_t calculate_frames(int sample_rate_hz) {
+    return static_cast<size_t>(
+        AudioProcessing::kChunkSizeMs * sample_rate_hz / 1000);
   }
 
   int sample_rate_hz_;
   int num_channels_;
   bool has_keyboard_;
-  int num_frames_;
+  size_t num_frames_;
 };
 
 class ProcessingConfig {
diff --git a/webrtc/modules/audio_processing/include/mock_audio_processing.h b/webrtc/modules/audio_processing/include/mock_audio_processing.h
index f0d9c32..4ff52ba 100644
--- a/webrtc/modules/audio_processing/include/mock_audio_processing.h
+++ b/webrtc/modules/audio_processing/include/mock_audio_processing.h
@@ -214,7 +214,7 @@
       int(AudioFrame* frame));
   MOCK_METHOD7(ProcessStream,
       int(const float* const* src,
-          int samples_per_channel,
+          size_t samples_per_channel,
           int input_sample_rate_hz,
           ChannelLayout input_layout,
           int output_sample_rate_hz,
@@ -229,7 +229,7 @@
       int(AudioFrame* frame));
   MOCK_METHOD1(ProcessReverseStream, int(AudioFrame* frame));
   MOCK_METHOD4(AnalyzeReverseStream,
-      int(const float* const* data, int frames, int sample_rate_hz,
+      int(const float* const* data, size_t frames, int sample_rate_hz,
           ChannelLayout input_layout));
   MOCK_METHOD4(ProcessReverseStream,
                int(const float* const* src,
diff --git a/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc b/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc
index 8eccde4..33ff5cd 100644
--- a/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc
+++ b/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc
@@ -30,7 +30,7 @@
 
 namespace {
 
-const int kErbResolution = 2;
+const size_t kErbResolution = 2;
 const int kWindowSizeMs = 2;
 const int kChunkSizeMs = 10;  // Size provided by APM.
 const float kClipFreq = 200.0f;
@@ -55,7 +55,7 @@
 void IntelligibilityEnhancer::TransformCallback::ProcessAudioBlock(
     const complex<float>* const* in_block,
     int in_channels,
-    int frames,
+    size_t frames,
     int /* out_channels */,
     complex<float>* const* out_block) {
   DCHECK_EQ(parent_->freqs_, frames);
@@ -71,8 +71,9 @@
 IntelligibilityEnhancer::IntelligibilityEnhancer(const Config& config)
     : freqs_(RealFourier::ComplexLength(
           RealFourier::FftOrder(config.sample_rate_hz * kWindowSizeMs / 1000))),
-      window_size_(1 << RealFourier::FftOrder(freqs_)),
-      chunk_length_(config.sample_rate_hz * kChunkSizeMs / 1000),
+      window_size_(static_cast<size_t>(1 << RealFourier::FftOrder(freqs_))),
+      chunk_length_(
+          static_cast<size_t>(config.sample_rate_hz * kChunkSizeMs / 1000)),
       bank_size_(GetBankSize(config.sample_rate_hz, kErbResolution)),
       sample_rate_hz_(config.sample_rate_hz),
       erb_resolution_(kErbResolution),
@@ -107,14 +108,14 @@
   CreateErbBank();
 
   // Assumes all rho equal.
-  for (int i = 0; i < bank_size_; ++i) {
+  for (size_t i = 0; i < bank_size_; ++i) {
     rho_[i] = config.rho * config.rho;
   }
 
   float freqs_khz = kClipFreq / 1000.0f;
-  int erb_index = static_cast<int>(ceilf(
+  size_t erb_index = static_cast<size_t>(ceilf(
       11.17f * logf((freqs_khz + 0.312f) / (freqs_khz + 14.6575f)) + 43.0f));
-  start_freq_ = max(1, erb_index * erb_resolution_);
+  start_freq_ = std::max(static_cast<size_t>(1), erb_index * erb_resolution_);
 
   WindowGenerator::KaiserBesselDerived(kKbdAlpha, window_size_,
                                        kbd_window_.get());
@@ -238,9 +239,9 @@
 void IntelligibilityEnhancer::UpdateErbGains() {
   // (ERB gain) = filterbank' * (freq gain)
   float* gains = gain_applier_.target();
-  for (int i = 0; i < freqs_; ++i) {
+  for (size_t i = 0; i < freqs_; ++i) {
     gains[i] = 0.0f;
-    for (int j = 0; j < bank_size_; ++j) {
+    for (size_t j = 0; j < bank_size_; ++j) {
       gains[i] = fmaf(filter_bank_[j][i], gains_eq_[j], gains[i]);
     }
   }
@@ -251,90 +252,95 @@
   noise_variance_.Step(in_block);
 }
 
-int IntelligibilityEnhancer::GetBankSize(int sample_rate, int erb_resolution) {
+size_t IntelligibilityEnhancer::GetBankSize(int sample_rate,
+                                            size_t erb_resolution) {
   float freq_limit = sample_rate / 2000.0f;
-  int erb_scale = ceilf(
-      11.17f * logf((freq_limit + 0.312f) / (freq_limit + 14.6575f)) + 43.0f);
+  size_t erb_scale = static_cast<size_t>(ceilf(
+      11.17f * logf((freq_limit + 0.312f) / (freq_limit + 14.6575f)) + 43.0f));
   return erb_scale * erb_resolution;
 }
 
 void IntelligibilityEnhancer::CreateErbBank() {
-  int lf = 1, rf = 4;
+  size_t lf = 1, rf = 4;
 
-  for (int i = 0; i < bank_size_; ++i) {
+  for (size_t i = 0; i < bank_size_; ++i) {
     float abs_temp = fabsf((i + 1.0f) / static_cast<float>(erb_resolution_));
     center_freqs_[i] = 676170.4f / (47.06538f - expf(0.08950404f * abs_temp));
     center_freqs_[i] -= 14678.49f;
   }
   float last_center_freq = center_freqs_[bank_size_ - 1];
-  for (int i = 0; i < bank_size_; ++i) {
+  for (size_t i = 0; i < bank_size_; ++i) {
     center_freqs_[i] *= 0.5f * sample_rate_hz_ / last_center_freq;
   }
 
-  for (int i = 0; i < bank_size_; ++i) {
+  for (size_t i = 0; i < bank_size_; ++i) {
     filter_bank_[i].resize(freqs_);
   }
 
-  for (int i = 1; i <= bank_size_; ++i) {
-    int lll, ll, rr, rrr;
-    lll = round(center_freqs_[max(1, i - lf) - 1] * freqs_ /
-                (0.5f * sample_rate_hz_));
-    ll =
-        round(center_freqs_[max(1, i) - 1] * freqs_ / (0.5f * sample_rate_hz_));
-    lll = min(freqs_, max(lll, 1)) - 1;
-    ll = min(freqs_, max(ll, 1)) - 1;
+  for (size_t i = 1; i <= bank_size_; ++i) {
+    size_t lll, ll, rr, rrr;
+    static const size_t kOne = 1;  // Avoids repeated static_cast<>s below.
+    lll = static_cast<size_t>(round(
+        center_freqs_[max(kOne, i - lf) - 1] * freqs_ /
+            (0.5f * sample_rate_hz_)));
+    ll = static_cast<size_t>(round(
+        center_freqs_[max(kOne, i) - 1] * freqs_ / (0.5f * sample_rate_hz_)));
+    lll = min(freqs_, max(lll, kOne)) - 1;
+    ll = min(freqs_, max(ll, kOne)) - 1;
 
-    rrr = round(center_freqs_[min(bank_size_, i + rf) - 1] * freqs_ /
-                (0.5f * sample_rate_hz_));
-    rr = round(center_freqs_[min(bank_size_, i + 1) - 1] * freqs_ /
-               (0.5f * sample_rate_hz_));
-    rrr = min(freqs_, max(rrr, 1)) - 1;
-    rr = min(freqs_, max(rr, 1)) - 1;
+    rrr = static_cast<size_t>(round(
+        center_freqs_[min(bank_size_, i + rf) - 1] * freqs_ /
+            (0.5f * sample_rate_hz_)));
+    rr = static_cast<size_t>(round(
+        center_freqs_[min(bank_size_, i + 1) - 1] * freqs_ /
+            (0.5f * sample_rate_hz_)));
+    rrr = min(freqs_, max(rrr, kOne)) - 1;
+    rr = min(freqs_, max(rr, kOne)) - 1;
 
     float step, element;
 
     step = 1.0f / (ll - lll);
     element = 0.0f;
-    for (int j = lll; j <= ll; ++j) {
+    for (size_t j = lll; j <= ll; ++j) {
       filter_bank_[i - 1][j] = element;
       element += step;
     }
     step = 1.0f / (rrr - rr);
     element = 1.0f;
-    for (int j = rr; j <= rrr; ++j) {
+    for (size_t j = rr; j <= rrr; ++j) {
       filter_bank_[i - 1][j] = element;
       element -= step;
     }
-    for (int j = ll; j <= rr; ++j) {
+    for (size_t j = ll; j <= rr; ++j) {
       filter_bank_[i - 1][j] = 1.0f;
     }
   }
 
   float sum;
-  for (int i = 0; i < freqs_; ++i) {
+  for (size_t i = 0; i < freqs_; ++i) {
     sum = 0.0f;
-    for (int j = 0; j < bank_size_; ++j) {
+    for (size_t j = 0; j < bank_size_; ++j) {
       sum += filter_bank_[j][i];
     }
-    for (int j = 0; j < bank_size_; ++j) {
+    for (size_t j = 0; j < bank_size_; ++j) {
       filter_bank_[j][i] /= sum;
     }
   }
 }
 
 void IntelligibilityEnhancer::SolveForGainsGivenLambda(float lambda,
-                                                       int start_freq,
+                                                       size_t start_freq,
                                                        float* sols) {
   bool quadratic = (kConfigRho < 1.0f);
   const float* var_x0 = filtered_clear_var_.get();
   const float* var_n0 = filtered_noise_var_.get();
 
-  for (int n = 0; n < start_freq; ++n) {
+  for (size_t n = 0; n < start_freq; ++n) {
     sols[n] = 1.0f;
   }
 
   // Analytic solution for optimal gains. See paper for derivation.
-  for (int n = start_freq - 1; n < bank_size_; ++n) {
+  for (size_t n = start_freq - 1; n < bank_size_; ++n) {
     float alpha0, beta0, gamma0;
     gamma0 = 0.5f * rho_[n] * var_x0[n] * var_n0[n] +
              lambda * var_x0[n] * var_n0[n] * var_n0[n];
@@ -351,18 +357,18 @@
 }
 
 void IntelligibilityEnhancer::FilterVariance(const float* var, float* result) {
-  DCHECK_GT(freqs_, 0);
-  for (int i = 0; i < bank_size_; ++i) {
+  DCHECK_GT(freqs_, 0u);
+  for (size_t i = 0; i < bank_size_; ++i) {
     result[i] = DotProduct(&filter_bank_[i][0], var, freqs_);
   }
 }
 
 float IntelligibilityEnhancer::DotProduct(const float* a,
                                           const float* b,
-                                          int length) {
+                                          size_t length) {
   float ret = 0.0f;
 
-  for (int i = 0; i < length; ++i) {
+  for (size_t i = 0; i < length; ++i) {
     ret = fmaf(a[i], b[i], ret);
   }
   return ret;
diff --git a/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h b/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h
index 1a2ef23..1e9e35a 100644
--- a/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h
+++ b/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h
@@ -51,7 +51,7 @@
     int num_render_channels;
     intelligibility::VarianceArray::StepType var_type;
     float var_decay_rate;
-    int var_window_size;
+    size_t var_window_size;
     int analysis_rate;
     float gain_change_limit;
     float rho;
@@ -86,7 +86,7 @@
     // intelligibility enhancement, and writes result to |out_block|.
     void ProcessAudioBlock(const std::complex<float>* const* in_block,
                            int in_channels,
-                           int frames,
+                           size_t frames,
                            int out_channels,
                            std::complex<float>* const* out_block) override;
 
@@ -122,34 +122,34 @@
                          std::complex<float>* out_block);
 
   // Returns number of ERB filters.
-  static int GetBankSize(int sample_rate, int erb_resolution);
+  static size_t GetBankSize(int sample_rate, size_t erb_resolution);
 
   // Initializes ERB filterbank.
   void CreateErbBank();
 
   // Analytically solves quadratic for optimal gains given |lambda|.
   // Negative gains are set to 0. Stores the results in |sols|.
-  void SolveForGainsGivenLambda(float lambda, int start_freq, float* sols);
+  void SolveForGainsGivenLambda(float lambda, size_t start_freq, float* sols);
 
   // Computes variance across ERB filters from freq variance |var|.
   // Stores in |result|.
   void FilterVariance(const float* var, float* result);
 
   // Returns dot product of vectors specified by size |length| arrays |a|,|b|.
-  static float DotProduct(const float* a, const float* b, int length);
+  static float DotProduct(const float* a, const float* b, size_t length);
 
-  const int freqs_;         // Num frequencies in frequency domain.
-  const int window_size_;   // Window size in samples; also the block size.
-  const int chunk_length_;  // Chunk size in samples.
-  const int bank_size_;     // Num ERB filters.
+  const size_t freqs_;         // Num frequencies in frequency domain.
+  const size_t window_size_;   // Window size in samples; also the block size.
+  const size_t chunk_length_;  // Chunk size in samples.
+  const size_t bank_size_;     // Num ERB filters.
   const int sample_rate_hz_;
   const int erb_resolution_;
   const int num_capture_channels_;
   const int num_render_channels_;
-  const int analysis_rate_;  // Num blocks before gains recalculated.
+  const int analysis_rate_;    // Num blocks before gains recalculated.
 
-  const bool active_;  // Whether render gains are being updated.
-                       // TODO(ekm): Add logic for updating |active_|.
+  const bool active_;          // Whether render gains are being updated.
+                               // TODO(ekm): Add logic for updating |active_|.
 
   intelligibility::VarianceArray clear_variance_;
   intelligibility::VarianceArray noise_variance_;
@@ -157,7 +157,7 @@
   rtc::scoped_ptr<float[]> filtered_noise_var_;
   std::vector<std::vector<float>> filter_bank_;
   rtc::scoped_ptr<float[]> center_freqs_;
-  int start_freq_;
+  size_t start_freq_;
   rtc::scoped_ptr<float[]> rho_;  // Production and interpretation SNR.
                                   // for each ERB band.
   rtc::scoped_ptr<float[]> gains_eq_;  // Pre-filter modified gains.
diff --git a/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer_unittest.cc b/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer_unittest.cc
index cb0085d..ce146de 100644
--- a/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer_unittest.cc
+++ b/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer_unittest.cc
@@ -58,7 +58,7 @@
               "Test filterbank badly initialized.");
 
 // Target output for gain solving test. Generated with matlab.
-const int kTestStartFreq = 12;  // Lowest integral frequency for ERBs.
+const size_t kTestStartFreq = 12;  // Lowest integral frequency for ERBs.
 const float kTestZeroVar[] = {1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f,
                               1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f, 0.f,
                               0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
@@ -149,11 +149,11 @@
 
 // Tests ERB bank creation, comparing against matlab output.
 TEST_F(IntelligibilityEnhancerTest, TestErbCreation) {
-  ASSERT_EQ(static_cast<int>(arraysize(kTestCenterFreqs)), enh_->bank_size_);
-  for (int i = 0; i < enh_->bank_size_; ++i) {
+  ASSERT_EQ(arraysize(kTestCenterFreqs), enh_->bank_size_);
+  for (size_t i = 0; i < enh_->bank_size_; ++i) {
     EXPECT_NEAR(kTestCenterFreqs[i], enh_->center_freqs_[i], kMaxTestError);
-    ASSERT_EQ(static_cast<int>(arraysize(kTestFilterBank[0])), enh_->freqs_);
-    for (int j = 0; j < enh_->freqs_; ++j) {
+    ASSERT_EQ(arraysize(kTestFilterBank[0]), enh_->freqs_);
+    for (size_t j = 0; j < enh_->freqs_; ++j) {
       EXPECT_NEAR(kTestFilterBank[i][j], enh_->filter_bank_[i][j],
                   kMaxTestError);
     }
@@ -166,26 +166,26 @@
   ASSERT_EQ(kTestStartFreq, enh_->start_freq_);
   vector<float> sols(enh_->bank_size_);
   float lambda = -0.001f;
-  for (int i = 0; i < enh_->bank_size_; i++) {
+  for (size_t i = 0; i < enh_->bank_size_; i++) {
     enh_->filtered_clear_var_[i] = 0.0f;
     enh_->filtered_noise_var_[i] = 0.0f;
     enh_->rho_[i] = 0.02f;
   }
   enh_->SolveForGainsGivenLambda(lambda, enh_->start_freq_, &sols[0]);
-  for (int i = 0; i < enh_->bank_size_; i++) {
+  for (size_t i = 0; i < enh_->bank_size_; i++) {
     EXPECT_NEAR(kTestZeroVar[i], sols[i], kMaxTestError);
   }
-  for (int i = 0; i < enh_->bank_size_; i++) {
+  for (size_t i = 0; i < enh_->bank_size_; i++) {
     enh_->filtered_clear_var_[i] = static_cast<float>(i + 1);
     enh_->filtered_noise_var_[i] = static_cast<float>(enh_->bank_size_ - i);
   }
   enh_->SolveForGainsGivenLambda(lambda, enh_->start_freq_, &sols[0]);
-  for (int i = 0; i < enh_->bank_size_; i++) {
+  for (size_t i = 0; i < enh_->bank_size_; i++) {
     EXPECT_NEAR(kTestNonZeroVarLambdaTop[i], sols[i], kMaxTestError);
   }
   lambda = -1.0;
   enh_->SolveForGainsGivenLambda(lambda, enh_->start_freq_, &sols[0]);
-  for (int i = 0; i < enh_->bank_size_; i++) {
+  for (size_t i = 0; i < enh_->bank_size_; i++) {
     EXPECT_NEAR(kTestZeroVar[i], sols[i], kMaxTestError);
   }
 }
diff --git a/webrtc/modules/audio_processing/intelligibility/intelligibility_utils.cc b/webrtc/modules/audio_processing/intelligibility/intelligibility_utils.cc
index 2c2743f..7da9b95 100644
--- a/webrtc/modules/audio_processing/intelligibility/intelligibility_utils.cc
+++ b/webrtc/modules/audio_processing/intelligibility/intelligibility_utils.cc
@@ -40,20 +40,20 @@
   return complex<float>(AddDitherIfZero(c.real()), AddDitherIfZero(c.imag()));
 }
 
-complex<float> NewMean(complex<float> mean, complex<float> data, int count) {
+complex<float> NewMean(complex<float> mean, complex<float> data, size_t count) {
   return mean + (data - mean) / static_cast<float>(count);
 }
 
-void AddToMean(complex<float> data, int count, complex<float>* mean) {
+void AddToMean(complex<float> data, size_t count, complex<float>* mean) {
   (*mean) = NewMean(*mean, data, count);
 }
 
 
-static const int kWindowBlockSize = 10;
+static const size_t kWindowBlockSize = 10;
 
-VarianceArray::VarianceArray(int num_freqs,
+VarianceArray::VarianceArray(size_t num_freqs,
                              StepType type,
-                             int window_size,
+                             size_t window_size,
                              float decay)
     : running_mean_(new complex<float>[num_freqs]()),
       running_mean_sq_(new complex<float>[num_freqs]()),
@@ -69,15 +69,15 @@
       array_mean_(0.0f),
       buffer_full_(false) {
   history_.reset(new rtc::scoped_ptr<complex<float>[]>[num_freqs_]());
-  for (int i = 0; i < num_freqs_; ++i) {
+  for (size_t i = 0; i < num_freqs_; ++i) {
     history_[i].reset(new complex<float>[window_size_]());
   }
   subhistory_.reset(new rtc::scoped_ptr<complex<float>[]>[num_freqs_]());
-  for (int i = 0; i < num_freqs_; ++i) {
+  for (size_t i = 0; i < num_freqs_; ++i) {
     subhistory_[i].reset(new complex<float>[window_size_]());
   }
   subhistory_sq_.reset(new rtc::scoped_ptr<complex<float>[]>[num_freqs_]());
-  for (int i = 0; i < num_freqs_; ++i) {
+  for (size_t i = 0; i < num_freqs_; ++i) {
     subhistory_sq_[i].reset(new complex<float>[window_size_]());
   }
   switch (type) {
@@ -104,7 +104,7 @@
 void VarianceArray::InfiniteStep(const complex<float>* data, bool skip_fudge) {
   array_mean_ = 0.0f;
   ++count_;
-  for (int i = 0; i < num_freqs_; ++i) {
+  for (size_t i = 0; i < num_freqs_; ++i) {
     complex<float> sample = data[i];
     if (!skip_fudge) {
       sample = zerofudge(sample);
@@ -132,7 +132,7 @@
 void VarianceArray::DecayStep(const complex<float>* data, bool /*dummy*/) {
   array_mean_ = 0.0f;
   ++count_;
-  for (int i = 0; i < num_freqs_; ++i) {
+  for (size_t i = 0; i < num_freqs_; ++i) {
     complex<float> sample = data[i];
     sample = zerofudge(sample);
 
@@ -157,9 +157,9 @@
 // Windowed variance computation. On each step, the variances for the
 // window are recomputed from scratch, using Welford's algorithm.
 void VarianceArray::WindowedStep(const complex<float>* data, bool /*dummy*/) {
-  int num = min(count_ + 1, window_size_);
+  size_t num = min(count_ + 1, window_size_);
   array_mean_ = 0.0f;
-  for (int i = 0; i < num_freqs_; ++i) {
+  for (size_t i = 0; i < num_freqs_; ++i) {
     complex<float> mean;
     float conj_sum = 0.0f;
 
@@ -167,7 +167,7 @@
 
     mean = history_[i][history_cursor_];
     variance_[i] = 0.0f;
-    for (int j = 1; j < num; ++j) {
+    for (size_t j = 1; j < num; ++j) {
       complex<float> sample =
           zerofudge(history_[i][(history_cursor_ + j) % window_size_]);
       sample = history_[i][(history_cursor_ + j) % window_size_];
@@ -191,8 +191,8 @@
 // history window and a new block is started. The variances for the window
 // are recomputed from scratch at each of these transitions.
 void VarianceArray::BlockedStep(const complex<float>* data, bool /*dummy*/) {
-  int blocks = min(window_size_, history_cursor_ + 1);
-  for (int i = 0; i < num_freqs_; ++i) {
+  size_t blocks = min(window_size_, history_cursor_ + 1);
+  for (size_t i = 0; i < num_freqs_; ++i) {
     AddToMean(data[i], count_ + 1, &sub_running_mean_[i]);
     AddToMean(data[i] * std::conj(data[i]), count_ + 1,
               &sub_running_mean_sq_[i]);
@@ -209,7 +209,7 @@
       sub_running_mean_sq_[i] = complex<float>(0.0f, 0.0f);
       running_mean_[i] = complex<float>(0.0f, 0.0f);
       running_mean_sq_[i] = complex<float>(0.0f, 0.0f);
-      for (int j = 0; j < min(window_size_, history_cursor_); ++j) {
+      for (size_t j = 0; j < min(window_size_, history_cursor_); ++j) {
         AddToMean(subhistory_[i][j], j + 1, &running_mean_[i]);
         AddToMean(subhistory_sq_[i][j], j + 1, &running_mean_sq_[i]);
       }
@@ -228,7 +228,7 @@
   // TODO(ekmeyerson) To mitigate potential divergence, add counter so that
   // after every so often sums are computed scratch by summing over all
   // elements instead of subtracting oldest and adding newest.
-  for (int i = 0; i < num_freqs_; ++i) {
+  for (size_t i = 0; i < num_freqs_; ++i) {
     sub_running_mean_[i] += data[i];
     sub_running_mean_sq_[i] += data[i] * std::conj(data[i]);
   }
@@ -239,7 +239,7 @@
   if (count_ >= kWindowBlockSize) {
     count_ = 0;
 
-    for (int i = 0; i < num_freqs_; ++i) {
+    for (size_t i = 0; i < num_freqs_; ++i) {
       running_mean_[i] -= subhistory_[i][history_cursor_];
       running_mean_sq_[i] -= subhistory_sq_[i][history_cursor_];
 
@@ -280,18 +280,18 @@
 
 void VarianceArray::ApplyScale(float scale) {
   array_mean_ = 0.0f;
-  for (int i = 0; i < num_freqs_; ++i) {
+  for (size_t i = 0; i < num_freqs_; ++i) {
     variance_[i] *= scale * scale;
     array_mean_ += (variance_[i] - array_mean_) / (i + 1);
   }
 }
 
-GainApplier::GainApplier(int freqs, float change_limit)
+GainApplier::GainApplier(size_t freqs, float change_limit)
     : num_freqs_(freqs),
       change_limit_(change_limit),
       target_(new float[freqs]()),
       current_(new float[freqs]()) {
-  for (int i = 0; i < freqs; ++i) {
+  for (size_t i = 0; i < freqs; ++i) {
     target_[i] = 1.0f;
     current_[i] = 1.0f;
   }
@@ -299,7 +299,7 @@
 
 void GainApplier::Apply(const complex<float>* in_block,
                         complex<float>* out_block) {
-  for (int i = 0; i < num_freqs_; ++i) {
+  for (size_t i = 0; i < num_freqs_; ++i) {
     float factor = sqrtf(fabsf(current_[i]));
     if (!std::isnormal(factor)) {
       factor = 1.0f;
diff --git a/webrtc/modules/audio_processing/intelligibility/intelligibility_utils.h b/webrtc/modules/audio_processing/intelligibility/intelligibility_utils.h
index fa0e974..4ac1167 100644
--- a/webrtc/modules/audio_processing/intelligibility/intelligibility_utils.h
+++ b/webrtc/modules/audio_processing/intelligibility/intelligibility_utils.h
@@ -36,10 +36,12 @@
 // mean |mean| with added |data|.
 std::complex<float> NewMean(std::complex<float> mean,
                             std::complex<float> data,
-                            int count);
+                            size_t count);
 
 // Updates |mean| with added |data|;
-void AddToMean(std::complex<float> data, int count, std::complex<float>* mean);
+void AddToMean(std::complex<float> data,
+               size_t count,
+               std::complex<float>* mean);
 
 // Internal helper for computing the variances of a stream of arrays.
 // The result is an array of variances per position: the i-th variance
@@ -70,7 +72,7 @@
   // |window_size| is the number of samples for kStepWindowed and
   // the number of blocks for kStepBlocked. |decay| is the forgetting factor
   // for kStepDecaying.
-  VarianceArray(int freqs, StepType type, int window_size, float decay);
+  VarianceArray(size_t freqs, StepType type, size_t window_size, float decay);
 
   // Add a new data point to the series and compute the new variances.
   // TODO(bercic) |skip_fudge| is a flag for kStepWindowed and kStepDecaying,
@@ -119,11 +121,11 @@
   rtc::scoped_ptr<float[]> variance_;
   rtc::scoped_ptr<float[]> conj_sum_;
 
-  const int num_freqs_;
-  const int window_size_;
+  const size_t num_freqs_;
+  const size_t window_size_;
   const float decay_;
-  int history_cursor_;
-  int count_;
+  size_t history_cursor_;
+  size_t count_;
   float array_mean_;
   bool buffer_full_;
   void (VarianceArray::*step_func_)(const std::complex<float>*, bool);
@@ -134,7 +136,7 @@
 // constrained by a limit on the magnitude of the changes.
 class GainApplier {
  public:
-  GainApplier(int freqs, float change_limit);
+  GainApplier(size_t freqs, float change_limit);
 
   // Copy |in_block| to |out_block|, multiplied by the current set of gains,
   // and step the current set of gains towards the target set.
@@ -145,7 +147,7 @@
   float* target() const { return target_.get(); }
 
  private:
-  const int num_freqs_;
+  const size_t num_freqs_;
   const float change_limit_;
   rtc::scoped_ptr<float[]> target_;
   rtc::scoped_ptr<float[]> current_;
diff --git a/webrtc/modules/audio_processing/intelligibility/test/intelligibility_proc.cc b/webrtc/modules/audio_processing/intelligibility/test/intelligibility_proc.cc
index 6b6bfa0..e20429d 100644
--- a/webrtc/modules/audio_processing/intelligibility/test/intelligibility_proc.cc
+++ b/webrtc/modules/audio_processing/intelligibility/test/intelligibility_proc.cc
@@ -111,7 +111,7 @@
   config.sample_rate_hz = FLAGS_sample_rate;
   config.var_type = static_cast<VarianceArray::StepType>(FLAGS_clear_type);
   config.var_decay_rate = static_cast<float>(FLAGS_clear_alpha);
-  config.var_window_size = FLAGS_clear_window;
+  config.var_window_size = static_cast<size_t>(FLAGS_clear_window);
   config.analysis_rate = FLAGS_ana_rate;
   config.gain_change_limit = FLAGS_gain_limit;
   IntelligibilityEnhancer enh(config);
diff --git a/webrtc/modules/audio_processing/ns/include/noise_suppression.h b/webrtc/modules/audio_processing/ns/include/noise_suppression.h
index 41bf9ac..9dac56b 100644
--- a/webrtc/modules/audio_processing/ns/include/noise_suppression.h
+++ b/webrtc/modules/audio_processing/ns/include/noise_suppression.h
@@ -11,6 +11,8 @@
 #ifndef WEBRTC_MODULES_AUDIO_PROCESSING_NS_INCLUDE_NOISE_SUPPRESSION_H_
 #define WEBRTC_MODULES_AUDIO_PROCESSING_NS_INCLUDE_NOISE_SUPPRESSION_H_
 
+#include <stddef.h>
+
 #include "webrtc/typedefs.h"
 
 typedef struct NsHandleT NsHandle;
@@ -92,7 +94,7 @@
  */
 void WebRtcNs_Process(NsHandle* NS_inst,
                      const float* const* spframe,
-                     int num_bands,
+                     size_t num_bands,
                      float* const* outframe);
 
 /* Returns the internally used prior speech probability of the current frame.
diff --git a/webrtc/modules/audio_processing/ns/noise_suppression.c b/webrtc/modules/audio_processing/ns/noise_suppression.c
index cdecd62..13f1b2d 100644
--- a/webrtc/modules/audio_processing/ns/noise_suppression.c
+++ b/webrtc/modules/audio_processing/ns/noise_suppression.c
@@ -41,7 +41,7 @@
 
 void WebRtcNs_Process(NsHandle* NS_inst,
                       const float* const* spframe,
-                      int num_bands,
+                      size_t num_bands,
                       float* const* outframe) {
   WebRtcNs_ProcessCore((NoiseSuppressionC*)NS_inst, spframe, num_bands,
                        outframe);
diff --git a/webrtc/modules/audio_processing/ns/ns_core.c b/webrtc/modules/audio_processing/ns/ns_core.c
index 652f0fe..1d60914 100644
--- a/webrtc/modules/audio_processing/ns/ns_core.c
+++ b/webrtc/modules/audio_processing/ns/ns_core.c
@@ -217,7 +217,7 @@
 static void NoiseEstimation(NoiseSuppressionC* self,
                             float* magn,
                             float* noise) {
-  int i, s, offset;
+  size_t i, s, offset;
   float lmagn[HALF_ANAL_BLOCKL], delta;
 
   if (self->updates < END_STARTUP_LONG) {
@@ -522,8 +522,8 @@
 // Spectral flatness is returned in self->featureData[0].
 static void ComputeSpectralFlatness(NoiseSuppressionC* self,
                                     const float* magnIn) {
-  int i;
-  int shiftLP = 1;  // Option to remove first bin(s) from spectral measures.
+  size_t i;
+  size_t shiftLP = 1;  // Option to remove first bin(s) from spectral measures.
   float avgSpectralFlatnessNum, avgSpectralFlatnessDen, spectralTmp;
 
   // Compute spectral measures.
@@ -568,7 +568,7 @@
                        const float* noise,
                        float* snrLocPrior,
                        float* snrLocPost) {
-  int i;
+  size_t i;
 
   for (i = 0; i < self->magnLen; i++) {
     // Previous post SNR.
@@ -596,7 +596,7 @@
                                       const float* magnIn) {
   // avgDiffNormMagn = var(magnIn) - cov(magnIn, magnAvgPause)^2 /
   // var(magnAvgPause)
-  int i;
+  size_t i;
   float avgPause, avgMagn, covMagnPause, varPause, varMagn, avgDiffNormMagn;
 
   avgPause = 0.0;
@@ -643,7 +643,8 @@
                             float* probSpeechFinal,
                             const float* snrLocPrior,
                             const float* snrLocPost) {
-  int i, sgnMap;
+  size_t i;
+  int sgnMap;
   float invLrt, gainPrior, indPrior;
   float logLrtTimeAvgKsum, besselTmp;
   float indicator0, indicator1, indicator2;
@@ -802,7 +803,7 @@
                                 const float* snrLocPrior,
                                 const float* snrLocPost,
                                 float* noise) {
-  int i;
+  size_t i;
   float probSpeech, probNonSpeech;
   // Time-avg parameter for noise update.
   float gammaNoiseTmp = NOISE_UPDATE;
@@ -853,8 +854,8 @@
 // Output:
 //   * |buffer| is the updated buffer.
 static void UpdateBuffer(const float* frame,
-                         int frame_length,
-                         int buffer_length,
+                         size_t frame_length,
+                         size_t buffer_length,
                          float* buffer) {
   assert(buffer_length < 2 * frame_length);
 
@@ -885,12 +886,12 @@
 //   * |magn| is the calculated signal magnitude in the frequency domain.
 static void FFT(NoiseSuppressionC* self,
                 float* time_data,
-                int time_data_length,
-                int magnitude_length,
+                size_t time_data_length,
+                size_t magnitude_length,
                 float* real,
                 float* imag,
                 float* magn) {
-  int i;
+  size_t i;
 
   assert(magnitude_length == time_data_length / 2 + 1);
 
@@ -923,10 +924,10 @@
 static void IFFT(NoiseSuppressionC* self,
                  const float* real,
                  const float* imag,
-                 int magnitude_length,
-                 int time_data_length,
+                 size_t magnitude_length,
+                 size_t time_data_length,
                  float* time_data) {
-  int i;
+  size_t i;
 
   assert(time_data_length == 2 * (magnitude_length - 1));
 
@@ -948,8 +949,8 @@
 //   * |buffer| is the buffer over which the energy is calculated.
 //   * |length| is the length of the buffer.
 // Returns the calculated energy.
-static float Energy(const float* buffer, int length) {
-  int i;
+static float Energy(const float* buffer, size_t length) {
+  size_t i;
   float energy = 0.f;
 
   for (i = 0; i < length; ++i) {
@@ -968,9 +969,9 @@
 //   * |data_windowed| is the windowed data.
 static void Windowing(const float* window,
                       const float* data,
-                      int length,
+                      size_t length,
                       float* data_windowed) {
-  int i;
+  size_t i;
 
   for (i = 0; i < length; ++i) {
     data_windowed[i] = window[i] * data[i];
@@ -985,7 +986,7 @@
 static void ComputeDdBasedWienerFilter(const NoiseSuppressionC* self,
                                        const float* magn,
                                        float* theFilter) {
-  int i;
+  size_t i;
   float snrPrior, previousEstimateStsa, currentEstimateStsa;
 
   for (i = 0; i < self->magnLen; i++) {
@@ -1041,8 +1042,8 @@
 }
 
 void WebRtcNs_AnalyzeCore(NoiseSuppressionC* self, const float* speechFrame) {
-  int i;
-  const int kStartBand = 5;  // Skip first frequency bins during estimation.
+  size_t i;
+  const size_t kStartBand = 5;  // Skip first frequency bins during estimation.
   int updateParsFlag;
   float energy;
   float signalEnergy = 0.f;
@@ -1182,11 +1183,11 @@
 
 void WebRtcNs_ProcessCore(NoiseSuppressionC* self,
                           const float* const* speechFrame,
-                          int num_bands,
+                          size_t num_bands,
                           float* const* outFrame) {
   // Main routine for noise reduction.
   int flagHB = 0;
-  int i, j;
+  size_t i, j;
 
   float energy1, energy2, gain, factor, factor1, factor2;
   float fout[BLOCKL_MAX];
@@ -1210,7 +1211,7 @@
 
   const float* const* speechFrameHB = NULL;
   float* const* outFrameHB = NULL;
-  int num_high_bands = 0;
+  size_t num_high_bands = 0;
   if (num_bands > 1) {
     speechFrameHB = &speechFrame[1];
     outFrameHB = &outFrame[1];
diff --git a/webrtc/modules/audio_processing/ns/ns_core.h b/webrtc/modules/audio_processing/ns/ns_core.h
index 8a7992e..aba1c46 100644
--- a/webrtc/modules/audio_processing/ns/ns_core.h
+++ b/webrtc/modules/audio_processing/ns/ns_core.h
@@ -51,10 +51,10 @@
 
 typedef struct NoiseSuppressionC_ {
   uint32_t fs;
-  int blockLen;
-  int windShift;
-  int anaLen;
-  int magnLen;
+  size_t blockLen;
+  size_t windShift;
+  size_t anaLen;
+  size_t magnLen;
   int aggrMode;
   const float* window;
   float analyzeBuf[ANAL_BLOCKL_MAX];
@@ -74,7 +74,7 @@
   float denoiseBound;
   int gainmap;
   // FFT work arrays.
-  int ip[IP_LENGTH];
+  size_t ip[IP_LENGTH];
   float wfft[W_LENGTH];
 
   // Parameters for new method: some not needed, will reduce/cleanup later.
@@ -181,7 +181,7 @@
  */
 void WebRtcNs_ProcessCore(NoiseSuppressionC* self,
                           const float* const* inFrame,
-                          int num_bands,
+                          size_t num_bands,
                           float* const* outFrame);
 
 #ifdef __cplusplus
diff --git a/webrtc/modules/audio_processing/ns/nsx_core.c b/webrtc/modules/audio_processing/ns/nsx_core.c
index 0f9894e..ed6125a 100644
--- a/webrtc/modules/audio_processing/ns/nsx_core.c
+++ b/webrtc/modules/audio_processing/ns/nsx_core.c
@@ -68,7 +68,7 @@
 #endif  // WEBRTC_DETECT_NEON || WEBRTC_HAS_NEON
 
 // Skip first frequency bins during estimation. (0 <= value < 64)
-static const int kStartBand = 5;
+static const size_t kStartBand = 5;
 
 // hybrib Hanning & flat window
 static const int16_t kBlocks80w128x[128] = {
@@ -306,7 +306,7 @@
   int16_t tmp16 = 0;
   const int16_t kExp2Const = 11819; // Q13
 
-  int i = 0;
+  size_t i = 0;
 
   tmp16 = WebRtcSpl_MaxValueW16(inst->noiseEstLogQuantile + offset,
                                    inst->magnLen);
@@ -341,7 +341,7 @@
   const int16_t log2_const = 22713; // Q15
   const int16_t width_factor = 21845;
 
-  int i, s, offset;
+  size_t i, s, offset;
 
   tabind = inst->stages - inst->normData;
   assert(tabind < 9);
@@ -454,7 +454,7 @@
 
 // Filter the data in the frequency domain, and create spectrum.
 static void PrepareSpectrumC(NoiseSuppressionFixedC* inst, int16_t* freq_buf) {
-  int i = 0, j = 0;
+  size_t i = 0, j = 0;
 
   for (i = 0; i < inst->magnLen; i++) {
     inst->real[i] = (int16_t)((inst->real[i] *
@@ -477,7 +477,7 @@
 static void DenormalizeC(NoiseSuppressionFixedC* inst,
                          int16_t* in,
                          int factor) {
-  int i = 0;
+  size_t i = 0;
   int32_t tmp32 = 0;
   for (i = 0; i < inst->anaLen; i += 1) {
     tmp32 = WEBRTC_SPL_SHIFT_W32((int32_t)in[i],
@@ -491,7 +491,7 @@
 static void SynthesisUpdateC(NoiseSuppressionFixedC* inst,
                              int16_t* out_frame,
                              int16_t gain_factor) {
-  int i = 0;
+  size_t i = 0;
   int16_t tmp16a = 0;
   int16_t tmp16b = 0;
   int32_t tmp32 = 0;
@@ -523,7 +523,7 @@
 static void AnalysisUpdateC(NoiseSuppressionFixedC* inst,
                             int16_t* out,
                             int16_t* new_speech) {
-  int i = 0;
+  size_t i = 0;
 
   // For lower band update analysis buffer.
   memcpy(inst->analysisBuffer, inst->analysisBuffer + inst->blockLen10ms,
@@ -542,7 +542,7 @@
 static void NormalizeRealBufferC(NoiseSuppressionFixedC* inst,
                                  const int16_t* in,
                                  int16_t* out) {
-  int i = 0;
+  size_t i = 0;
   assert(inst->normData >= 0);
   for (i = 0; i < inst->anaLen; ++i) {
     out[i] = in[i] << inst->normData;  // Q(normData)
@@ -1026,7 +1026,7 @@
 
   int16_t zeros, frac, intPart;
 
-  int i;
+  size_t i;
 
   // for flatness
   avgSpectralFlatnessNum = 0;
@@ -1099,7 +1099,8 @@
 
   int16_t tmp16no1;
 
-  int i, norm32, nShifts;
+  size_t i;
+  int norm32, nShifts;
 
   avgPauseFX = 0;
   maxPause = 0;
@@ -1198,7 +1199,7 @@
   int16_t   matrix_determinant = 0;
   int16_t   maxWinData;
 
-  int i, j;
+  size_t i, j;
   int zeros;
   int net_norm = 0;
   int right_shifts_in_magnU16 = 0;
@@ -1430,7 +1431,7 @@
   int16_t energyRatio;
   int16_t gainFactor, gainFactor1, gainFactor2;
 
-  int i;
+  size_t i;
   int outCIFFT;
   int scaleEnergyOut = 0;
 
@@ -1531,7 +1532,7 @@
   int16_t avgProbSpeechHB, gainModHB, avgFilterGainHB, gainTimeDomainHB;
   int16_t pink_noise_exp_avg = 0;
 
-  int i, j;
+  size_t i, j;
   int nShifts, postShifts;
   int norm32no1, norm32no2;
   int flag, sign;
@@ -1559,11 +1560,11 @@
 
   const short* const* speechFrameHB = NULL;
   short* const* outFrameHB = NULL;
-  int num_high_bands = 0;
+  size_t num_high_bands = 0;
   if (num_bands > 1) {
     speechFrameHB = &speechFrame[1];
     outFrameHB = &outFrame[1];
-    num_high_bands = num_bands - 1;
+    num_high_bands = (size_t)(num_bands - 1);
   }
 
   // Store speechFrame and transform to frequency domain
diff --git a/webrtc/modules/audio_processing/ns/nsx_core.h b/webrtc/modules/audio_processing/ns/nsx_core.h
index 33b9a32..f463dbb 100644
--- a/webrtc/modules/audio_processing/ns/nsx_core.h
+++ b/webrtc/modules/audio_processing/ns/nsx_core.h
@@ -34,9 +34,9 @@
   int16_t                 noiseEstCounter[SIMULT];
   int16_t                 noiseEstQuantile[HALF_ANAL_BLOCKL];
 
-  int                     anaLen;
-  int                     anaLen2;
-  int                     magnLen;
+  size_t                  anaLen;
+  size_t                  anaLen2;
+  size_t                  magnLen;
   int                     aggrMode;
   int                     stages;
   int                     initFlag;
@@ -98,7 +98,7 @@
   int                     qNoise;
   int                     prevQNoise;
   int                     prevQMagn;
-  int                     blockLen10ms;
+  size_t                  blockLen10ms;
 
   int16_t                 real[ANAL_BLOCKL_MAX];
   int16_t                 imag[ANAL_BLOCKL_MAX];
diff --git a/webrtc/modules/audio_processing/ns/nsx_core_c.c b/webrtc/modules/audio_processing/ns/nsx_core_c.c
index 9c929d1..14322d3 100644
--- a/webrtc/modules/audio_processing/ns/nsx_core_c.c
+++ b/webrtc/modules/audio_processing/ns/nsx_core_c.c
@@ -33,7 +33,8 @@
   int32_t logLrtTimeAvgKsumFX;
   int16_t indPriorFX16;
   int16_t tmp16, tmp16no1, tmp16no2, tmpIndFX, tableIndex, frac, intPart;
-  int i, normTmp, normTmp2, nShifts;
+  size_t i;
+  int normTmp, normTmp2, nShifts;
 
   // compute feature based on average LR factor
   // this is the average over all frequencies of the smooth log LRT
diff --git a/webrtc/modules/audio_processing/ns/nsx_core_mips.c b/webrtc/modules/audio_processing/ns/nsx_core_mips.c
index 0e4b28f..d99be87 100644
--- a/webrtc/modules/audio_processing/ns/nsx_core_mips.c
+++ b/webrtc/modules/audio_processing/ns/nsx_core_mips.c
@@ -32,7 +32,8 @@
   int32_t logLrtTimeAvgKsumFX;
   int16_t indPriorFX16;
   int16_t tmp16, tmp16no1, tmp16no2, tmpIndFX, tableIndex, frac;
-  int i, normTmp, nShifts;
+  size_t i;
+  int normTmp, nShifts;
 
   int32_t r0, r1, r2, r3, r4, r5, r6, r7, r8, r9;
   int32_t const_max = 0x7fffffff;
@@ -331,7 +332,7 @@
                                    int16_t* out,
                                    int16_t* new_speech) {
   int iters, after;
-  int anaLen = inst->anaLen;
+  int anaLen = (int)inst->anaLen;
   int *window = (int*)inst->window;
   int *anaBuf = (int*)inst->analysisBuffer;
   int *outBuf = (int*)out;
@@ -504,7 +505,7 @@
 void WebRtcNsx_SynthesisUpdate_mips(NoiseSuppressionFixedC* inst,
                                     int16_t* out_frame,
                                     int16_t gain_factor) {
-  int iters = inst->blockLen10ms >> 2;
+  int iters = (int)inst->blockLen10ms >> 2;
   int after = inst->blockLen10ms & 3;
   int r0, r1, r2, r3, r4, r5, r6, r7;
   int16_t *window = (int16_t*)inst->window;
@@ -861,7 +862,7 @@
                                 int16_t* in,
                                 int factor) {
   int32_t r0, r1, r2, r3, t0;
-  int len = inst->anaLen;
+  int len = (int)inst->anaLen;
   int16_t *out = &inst->real[0];
   int shift = factor - inst->normData;
 
@@ -951,7 +952,7 @@
                                         const int16_t* in,
                                         int16_t* out) {
   int32_t r0, r1, r2, r3, t0;
-  int len = inst->anaLen;
+  int len = (int)inst->anaLen;
   int shift = inst->normData;
 
   __asm __volatile (
diff --git a/webrtc/modules/audio_processing/ns/nsx_core_neon.c b/webrtc/modules/audio_processing/ns/nsx_core_neon.c
index 9675d11..65788ae 100644
--- a/webrtc/modules/audio_processing/ns/nsx_core_neon.c
+++ b/webrtc/modules/audio_processing/ns/nsx_core_neon.c
@@ -141,7 +141,7 @@
   const int16_t log2_const = 22713;
   const int16_t width_factor = 21845;
 
-  int i, s, offset;
+  size_t i, s, offset;
 
   tabind = inst->stages - inst->normData;
   assert(tabind < 9);
diff --git a/webrtc/modules/audio_processing/rms_level.cc b/webrtc/modules/audio_processing/rms_level.cc
index 14136bf3..70c4422 100644
--- a/webrtc/modules/audio_processing/rms_level.cc
+++ b/webrtc/modules/audio_processing/rms_level.cc
@@ -28,14 +28,14 @@
   sample_count_ = 0;
 }
 
-void RMSLevel::Process(const int16_t* data, int length) {
-  for (int i = 0; i < length; ++i) {
+void RMSLevel::Process(const int16_t* data, size_t length) {
+  for (size_t i = 0; i < length; ++i) {
     sum_square_ += data[i] * data[i];
   }
   sample_count_ += length;
 }
 
-void RMSLevel::ProcessMuted(int length) {
+void RMSLevel::ProcessMuted(size_t length) {
   sample_count_ += length;
 }
 
diff --git a/webrtc/modules/audio_processing/rms_level.h b/webrtc/modules/audio_processing/rms_level.h
index 055d271..12fa212 100644
--- a/webrtc/modules/audio_processing/rms_level.h
+++ b/webrtc/modules/audio_processing/rms_level.h
@@ -11,6 +11,8 @@
 #ifndef WEBRTC_MODULES_AUDIO_PROCESSING_RMS_LEVEL_H_
 #define WEBRTC_MODULES_AUDIO_PROCESSING_RMS_LEVEL_H_
 
+#include <cstddef>
+
 #include "webrtc/typedefs.h"
 
 namespace webrtc {
@@ -35,11 +37,11 @@
   void Reset();
 
   // Pass each chunk of audio to Process() to accumulate the level.
-  void Process(const int16_t* data, int length);
+  void Process(const int16_t* data, size_t length);
 
   // If all samples with the given |length| have a magnitude of zero, this is
   // a shortcut to avoid some computation.
-  void ProcessMuted(int length);
+  void ProcessMuted(size_t length);
 
   // Computes the RMS level over all data passed to Process() since the last
   // call to RMS(). The returned value is positive but should be interpreted as
@@ -48,7 +50,7 @@
 
  private:
   float sum_square_;
-  int sample_count_;
+  size_t sample_count_;
 };
 
 }  // namespace webrtc
diff --git a/webrtc/modules/audio_processing/splitting_filter.cc b/webrtc/modules/audio_processing/splitting_filter.cc
index 00a1239..06af56e 100644
--- a/webrtc/modules/audio_processing/splitting_filter.cc
+++ b/webrtc/modules/audio_processing/splitting_filter.cc
@@ -17,8 +17,8 @@
 namespace webrtc {
 
 SplittingFilter::SplittingFilter(int num_channels,
-                                 int num_bands,
-                                 int num_frames)
+                                 size_t num_bands,
+                                 size_t num_frames)
     : num_bands_(num_bands) {
   CHECK(num_bands_ == 2 || num_bands_ == 3);
   if (num_bands_ == 2) {
diff --git a/webrtc/modules/audio_processing/splitting_filter.h b/webrtc/modules/audio_processing/splitting_filter.h
index bc036c3..51088d5 100644
--- a/webrtc/modules/audio_processing/splitting_filter.h
+++ b/webrtc/modules/audio_processing/splitting_filter.h
@@ -45,7 +45,7 @@
 // used.
 class SplittingFilter {
  public:
-  SplittingFilter(int num_channels, int num_bands, int num_frames);
+  SplittingFilter(int num_channels, size_t num_bands, size_t num_frames);
 
   void Analysis(const IFChannelBuffer* data, IFChannelBuffer* bands);
   void Synthesis(const IFChannelBuffer* bands, IFChannelBuffer* data);
@@ -58,7 +58,7 @@
   void ThreeBandsSynthesis(const IFChannelBuffer* bands, IFChannelBuffer* data);
   void InitBuffers();
 
-  const int num_bands_;
+  const size_t num_bands_;
   std::vector<TwoBandsStates> two_bands_states_;
   ScopedVector<ThreeBandFilterBank> three_band_filter_banks_;
 };
diff --git a/webrtc/modules/audio_processing/splitting_filter_unittest.cc b/webrtc/modules/audio_processing/splitting_filter_unittest.cc
index 0498cc6..e7af651 100644
--- a/webrtc/modules/audio_processing/splitting_filter_unittest.cc
+++ b/webrtc/modules/audio_processing/splitting_filter_unittest.cc
@@ -20,8 +20,8 @@
 namespace webrtc {
 namespace {
 
-const int kSamplesPer16kHzChannel = 160;
-const int kSamplesPer48kHzChannel = 480;
+const size_t kSamplesPer16kHzChannel = 160;
+const size_t kSamplesPer48kHzChannel = 480;
 
 }  // namespace
 
@@ -35,26 +35,26 @@
 TEST(SplittingFilterTest, SplitsIntoThreeBandsAndReconstructs) {
   static const int kChannels = 1;
   static const int kSampleRateHz = 48000;
-  static const int kNumBands = 3;
+  static const size_t kNumBands = 3;
   static const int kFrequenciesHz[kNumBands] = {1000, 12000, 18000};
   static const float kAmplitude = 8192.f;
-  static const int kChunks = 8;
+  static const size_t kChunks = 8;
   SplittingFilter splitting_filter(kChannels,
                                    kNumBands,
                                    kSamplesPer48kHzChannel);
   IFChannelBuffer in_data(kSamplesPer48kHzChannel, kChannels, kNumBands);
   IFChannelBuffer bands(kSamplesPer48kHzChannel, kChannels, kNumBands);
   IFChannelBuffer out_data(kSamplesPer48kHzChannel, kChannels, kNumBands);
-  for (int i = 0; i < kChunks; ++i) {
+  for (size_t i = 0; i < kChunks; ++i) {
     // Input signal generation.
     bool is_present[kNumBands];
     memset(in_data.fbuf()->channels()[0],
            0,
            kSamplesPer48kHzChannel * sizeof(in_data.fbuf()->channels()[0][0]));
-    for (int j = 0; j < kNumBands; ++j) {
-      is_present[j] = i & (1 << j);
+    for (size_t j = 0; j < kNumBands; ++j) {
+      is_present[j] = i & (static_cast<size_t>(1) << j);
       float amplitude = is_present[j] ? kAmplitude : 0.f;
-      for (int k = 0; k < kSamplesPer48kHzChannel; ++k) {
+      for (size_t k = 0; k < kSamplesPer48kHzChannel; ++k) {
         in_data.fbuf()->channels()[0][k] +=
             amplitude * sin(2.f * M_PI * kFrequenciesHz[j] *
                 (i * kSamplesPer48kHzChannel + k) / kSampleRateHz);
@@ -64,9 +64,9 @@
     splitting_filter.Analysis(&in_data, &bands);
     // Energy calculation.
     float energy[kNumBands];
-    for (int j = 0; j < kNumBands; ++j) {
+    for (size_t j = 0; j < kNumBands; ++j) {
       energy[j] = 0.f;
-      for (int k = 0; k < kSamplesPer16kHzChannel; ++k) {
+      for (size_t k = 0; k < kSamplesPer16kHzChannel; ++k) {
         energy[j] += bands.fbuf_const()->channels(j)[0][k] *
                      bands.fbuf_const()->channels(j)[0][k];
       }
@@ -81,9 +81,9 @@
     splitting_filter.Synthesis(&bands, &out_data);
     // Delay and cross correlation estimation.
     float xcorr = 0.f;
-    for (int delay = 0; delay < kSamplesPer48kHzChannel; ++delay) {
+    for (size_t delay = 0; delay < kSamplesPer48kHzChannel; ++delay) {
       float tmpcorr = 0.f;
-      for (int j = delay; j < kSamplesPer48kHzChannel; ++j) {
+      for (size_t j = delay; j < kSamplesPer48kHzChannel; ++j) {
         tmpcorr += in_data.fbuf_const()->channels()[0][j - delay] *
                    out_data.fbuf_const()->channels()[0][j];
       }
@@ -94,7 +94,7 @@
     }
     // High cross correlation check.
     bool any_present = false;
-    for (int j = 0; j < kNumBands; ++j) {
+    for (size_t j = 0; j < kNumBands; ++j) {
       any_present |= is_present[j];
     }
     if (any_present) {
diff --git a/webrtc/modules/audio_processing/test/audio_processing_unittest.cc b/webrtc/modules/audio_processing/test/audio_processing_unittest.cc
index 8384c36..d82ea31 100644
--- a/webrtc/modules/audio_processing/test/audio_processing_unittest.cc
+++ b/webrtc/modules/audio_processing/test/audio_processing_unittest.cc
@@ -129,21 +129,23 @@
 }
 
 void SetFrameTo(AudioFrame* frame, int16_t value) {
-  for (int i = 0; i < frame->samples_per_channel_ * frame->num_channels_; ++i) {
+  for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_;
+       ++i) {
     frame->data_[i] = value;
   }
 }
 
 void SetFrameTo(AudioFrame* frame, int16_t left, int16_t right) {
   ASSERT_EQ(2, frame->num_channels_);
-  for (int i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
+  for (size_t i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
     frame->data_[i] = left;
     frame->data_[i + 1] = right;
   }
 }
 
 void ScaleFrame(AudioFrame* frame, float scale) {
-  for (int i = 0; i < frame->samples_per_channel_ * frame->num_channels_; ++i) {
+  for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_;
+       ++i) {
     frame->data_[i] = FloatS16ToS16(frame->data_[i] * scale);
   }
 }
@@ -676,13 +678,18 @@
   }
   // Calculate expected delay estimate and acceptable regions. Further,
   // limit them w.r.t. AEC delay estimation support.
-  const int samples_per_ms = std::min(16, frame_->samples_per_channel_ / 10);
+  const size_t samples_per_ms =
+      std::min(static_cast<size_t>(16), frame_->samples_per_channel_ / 10);
   int expected_median = std::min(std::max(delay_ms - system_delay_ms,
                                           delay_min), delay_max);
-  int expected_median_high = std::min(std::max(
-      expected_median + 96 / samples_per_ms, delay_min), delay_max);
-  int expected_median_low = std::min(std::max(
-      expected_median - 96 / samples_per_ms, delay_min), delay_max);
+  int expected_median_high = std::min(
+      std::max(expected_median + static_cast<int>(96 / samples_per_ms),
+               delay_min),
+      delay_max);
+  int expected_median_low = std::min(
+      std::max(expected_median - static_cast<int>(96 / samples_per_ms),
+               delay_min),
+      delay_max);
   // Verify delay metrics.
   int median;
   int std;
@@ -998,8 +1005,8 @@
          2,
          false);
     // Sampling frequency dependent variables.
-    const int num_ms_per_block = std::max(4,
-                                          640 / frame_->samples_per_channel_);
+    const int num_ms_per_block =
+        std::max(4, static_cast<int>(640 / frame_->samples_per_channel_));
     const int delay_min_ms = -kLookaheadBlocks * num_ms_per_block;
     const int delay_max_ms = (kMaxDelayBlocks - 1) * num_ms_per_block;
 
diff --git a/webrtc/modules/audio_processing/three_band_filter_bank.cc b/webrtc/modules/audio_processing/three_band_filter_bank.cc
index efd7a79..e81e519 100644
--- a/webrtc/modules/audio_processing/three_band_filter_bank.cc
+++ b/webrtc/modules/audio_processing/three_band_filter_bank.cc
@@ -42,8 +42,8 @@
 namespace webrtc {
 namespace {
 
-const int kNumBands = 3;
-const int kSparsity = 4;
+const size_t kNumBands = 3;
+const size_t kSparsity = 4;
 
 // Factors to take into account when choosing |kNumCoeffs|:
 //   1. Higher |kNumCoeffs|, means faster transition, which ensures less
@@ -53,7 +53,7 @@
 //      |kNumBands| * |kSparsity| * |kNumCoeffs| / 2, so it increases linearly
 //      with |kNumCoeffs|.
 //   3. The computation complexity also increases linearly with |kNumCoeffs|.
-const int kNumCoeffs = 4;
+const size_t kNumCoeffs = 4;
 
 // The Matlab code to generate these |kLowpassCoeffs| is:
 //
@@ -85,8 +85,11 @@
 // Downsamples |in| into |out|, taking one every |kNumbands| starting from
 // |offset|. |split_length| is the |out| length. |in| has to be at least
 // |kNumBands| * |split_length| long.
-void Downsample(const float* in, int split_length, int offset, float* out) {
-  for (int i = 0; i < split_length; ++i) {
+void Downsample(const float* in,
+                size_t split_length,
+                size_t offset,
+                float* out) {
+  for (size_t i = 0; i < split_length; ++i) {
     out[i] = in[kNumBands * i + offset];
   }
 }
@@ -94,8 +97,8 @@
 // Upsamples |in| into |out|, scaling by |kNumBands| and accumulating it every
 // |kNumBands| starting from |offset|. |split_length| is the |in| length. |out|
 // has to be at least |kNumBands| * |split_length| long.
-void Upsample(const float* in, int split_length, int offset, float* out) {
-  for (int i = 0; i < split_length; ++i) {
+void Upsample(const float* in, size_t split_length, size_t offset, float* out) {
+  for (size_t i = 0; i < split_length; ++i) {
     out[kNumBands * i + offset] += kNumBands * in[i];
   }
 }
@@ -105,11 +108,11 @@
 // Because the low-pass filter prototype has half bandwidth it is possible to
 // use a DCT to shift it in both directions at the same time, to the center
 // frequencies [1 / 12, 3 / 12, 5 / 12].
-ThreeBandFilterBank::ThreeBandFilterBank(int length)
+ThreeBandFilterBank::ThreeBandFilterBank(size_t length)
     : in_buffer_(rtc::CheckedDivExact(length, kNumBands)),
       out_buffer_(in_buffer_.size()) {
-  for (int i = 0; i < kSparsity; ++i) {
-    for (int j = 0; j < kNumBands; ++j) {
+  for (size_t i = 0; i < kSparsity; ++i) {
+    for (size_t j = 0; j < kNumBands; ++j) {
       analysis_filters_.push_back(new SparseFIRFilter(
           kLowpassCoeffs[i * kNumBands + j], kNumCoeffs, kSparsity, i));
       synthesis_filters_.push_back(new SparseFIRFilter(
@@ -119,7 +122,7 @@
   dct_modulation_.resize(kNumBands * kSparsity);
   for (size_t i = 0; i < dct_modulation_.size(); ++i) {
     dct_modulation_[i].resize(kNumBands);
-    for (int j = 0; j < kNumBands; ++j) {
+    for (size_t j = 0; j < kNumBands; ++j) {
       dct_modulation_[i][j] =
           2.f * cos(2.f * M_PI * i * (2.f * j + 1.f) / dct_modulation_.size());
     }
@@ -133,17 +136,16 @@
 //      of |kSparsity|.
 //   3. Modulating with cosines and accumulating to get the desired band.
 void ThreeBandFilterBank::Analysis(const float* in,
-                                   int length,
+                                   size_t length,
                                    float* const* out) {
-  CHECK_EQ(static_cast<int>(in_buffer_.size()),
-           rtc::CheckedDivExact(length, kNumBands));
-  for (int i = 0; i < kNumBands; ++i) {
+  CHECK_EQ(in_buffer_.size(), rtc::CheckedDivExact(length, kNumBands));
+  for (size_t i = 0; i < kNumBands; ++i) {
     memset(out[i], 0, in_buffer_.size() * sizeof(*out[i]));
   }
-  for (int i = 0; i < kNumBands; ++i) {
+  for (size_t i = 0; i < kNumBands; ++i) {
     Downsample(in, in_buffer_.size(), kNumBands - i - 1, &in_buffer_[0]);
-    for (int j = 0; j < kSparsity; ++j) {
-      const int offset = i + j * kNumBands;
+    for (size_t j = 0; j < kSparsity; ++j) {
+      const size_t offset = i + j * kNumBands;
       analysis_filters_[offset]->Filter(&in_buffer_[0],
                                         in_buffer_.size(),
                                         &out_buffer_[0]);
@@ -159,13 +161,13 @@
 //      |kSparsity| signals with different delays.
 //   3. Parallel to serial upsampling by a factor of |kNumBands|.
 void ThreeBandFilterBank::Synthesis(const float* const* in,
-                                    int split_length,
+                                    size_t split_length,
                                     float* out) {
-  CHECK_EQ(static_cast<int>(in_buffer_.size()), split_length);
+  CHECK_EQ(in_buffer_.size(), split_length);
   memset(out, 0, kNumBands * in_buffer_.size() * sizeof(*out));
-  for (int i = 0; i < kNumBands; ++i) {
-    for (int j = 0; j < kSparsity; ++j) {
-      const int offset = i + j * kNumBands;
+  for (size_t i = 0; i < kNumBands; ++i) {
+    for (size_t j = 0; j < kSparsity; ++j) {
+      const size_t offset = i + j * kNumBands;
       UpModulate(in, in_buffer_.size(), offset, &in_buffer_[0]);
       synthesis_filters_[offset]->Filter(&in_buffer_[0],
                                          in_buffer_.size(),
@@ -181,11 +183,11 @@
 // cosines used for modulation. |split_length| is the length of |in| and each
 // band of |out|.
 void ThreeBandFilterBank::DownModulate(const float* in,
-                                       int split_length,
-                                       int offset,
+                                       size_t split_length,
+                                       size_t offset,
                                        float* const* out) {
-  for (int i = 0; i < kNumBands; ++i) {
-    for (int j = 0; j < split_length; ++j) {
+  for (size_t i = 0; i < kNumBands; ++i) {
+    for (size_t j = 0; j < split_length; ++j) {
       out[i][j] += dct_modulation_[offset][i] * in[j];
     }
   }
@@ -196,12 +198,12 @@
 // |offset| is the index in the period of the cosines used for modulation.
 // |split_length| is the length of each band of |in| and |out|.
 void ThreeBandFilterBank::UpModulate(const float* const* in,
-                                     int split_length,
-                                     int offset,
+                                     size_t split_length,
+                                     size_t offset,
                                      float* out) {
   memset(out, 0, split_length * sizeof(*out));
-  for (int i = 0; i < kNumBands; ++i) {
-    for (int j = 0; j < split_length; ++j) {
+  for (size_t i = 0; i < kNumBands; ++i) {
+    for (size_t j = 0; j < split_length; ++j) {
       out[j] += dct_modulation_[offset][i] * in[i][j];
     }
   }
diff --git a/webrtc/modules/audio_processing/three_band_filter_bank.h b/webrtc/modules/audio_processing/three_band_filter_bank.h
index 7677448..18e8aee 100644
--- a/webrtc/modules/audio_processing/three_band_filter_bank.h
+++ b/webrtc/modules/audio_processing/three_band_filter_bank.h
@@ -34,26 +34,26 @@
 // depending on the input signal after compensating for the delay.
 class ThreeBandFilterBank final {
  public:
-  explicit ThreeBandFilterBank(int length);
+  explicit ThreeBandFilterBank(size_t length);
 
   // Splits |in| into 3 downsampled frequency bands in |out|.
   // |length| is the |in| length. Each of the 3 bands of |out| has to have a
   // length of |length| / 3.
-  void Analysis(const float* in, int length, float* const* out);
+  void Analysis(const float* in, size_t length, float* const* out);
 
   // Merges the 3 downsampled frequency bands in |in| into |out|.
   // |split_length| is the length of each band of |in|. |out| has to have at
   // least a length of 3 * |split_length|.
-  void Synthesis(const float* const* in, int split_length, float* out);
+  void Synthesis(const float* const* in, size_t split_length, float* out);
 
  private:
   void DownModulate(const float* in,
-                    int split_length,
-                    int offset,
+                    size_t split_length,
+                    size_t offset,
                     float* const* out);
   void UpModulate(const float* const* in,
-                  int split_length,
-                  int offset,
+                  size_t split_length,
+                  size_t offset,
                   float* out);
 
   std::vector<float> in_buffer_;
diff --git a/webrtc/modules/audio_processing/transient/transient_suppressor.cc b/webrtc/modules/audio_processing/transient/transient_suppressor.cc
index 2f79a20..206d14d 100644
--- a/webrtc/modules/audio_processing/transient/transient_suppressor.cc
+++ b/webrtc/modules/audio_processing/transient/transient_suppressor.cc
@@ -124,7 +124,7 @@
          analysis_length_ * num_channels_ * sizeof(out_buffer_[0]));
   // ip[0] must be zero to trigger initialization using rdft().
   size_t ip_length = 2 + sqrtf(analysis_length_);
-  ip_.reset(new int[ip_length]());
+  ip_.reset(new size_t[ip_length]());
   memset(ip_.get(), 0, ip_length * sizeof(ip_[0]));
   wfft_.reset(new float[complex_analysis_length_ - 1]);
   memset(wfft_.get(), 0, (complex_analysis_length_ - 1) * sizeof(wfft_[0]));
diff --git a/webrtc/modules/audio_processing/transient/transient_suppressor.h b/webrtc/modules/audio_processing/transient/transient_suppressor.h
index 12e4b5e..5a6f117 100644
--- a/webrtc/modules/audio_processing/transient/transient_suppressor.h
+++ b/webrtc/modules/audio_processing/transient/transient_suppressor.h
@@ -86,7 +86,7 @@
   rtc::scoped_ptr<float[]> out_buffer_;
 
   // Arrays for fft.
-  rtc::scoped_ptr<int[]> ip_;
+  rtc::scoped_ptr<size_t[]> ip_;
   rtc::scoped_ptr<float[]> wfft_;
 
   rtc::scoped_ptr<float[]> spectral_mean_;
diff --git a/webrtc/modules/audio_processing/vad/common.h b/webrtc/modules/audio_processing/vad/common.h
index 0772d55..be99c1c 100644
--- a/webrtc/modules/audio_processing/vad/common.h
+++ b/webrtc/modules/audio_processing/vad/common.h
@@ -12,15 +12,15 @@
 #define WEBRTC_MODULES_AUDIO_PROCESSING_VAD_COMMON_H_
 
 static const int kSampleRateHz = 16000;
-static const int kLength10Ms = kSampleRateHz / 100;
-static const int kMaxNumFrames = 4;
+static const size_t kLength10Ms = kSampleRateHz / 100;
+static const size_t kMaxNumFrames = 4;
 
 struct AudioFeatures {
   double log_pitch_gain[kMaxNumFrames];
   double pitch_lag_hz[kMaxNumFrames];
   double spectral_peak[kMaxNumFrames];
   double rms[kMaxNumFrames];
-  int num_frames;
+  size_t num_frames;
   bool silence;
 };
 
diff --git a/webrtc/modules/audio_processing/vad/pitch_based_vad.cc b/webrtc/modules/audio_processing/vad/pitch_based_vad.cc
index 91638d0..39ec37e 100644
--- a/webrtc/modules/audio_processing/vad/pitch_based_vad.cc
+++ b/webrtc/modules/audio_processing/vad/pitch_based_vad.cc
@@ -75,7 +75,7 @@
   const double kLimLowSpectralPeak = 200;
   const double kLimHighSpectralPeak = 2000;
   const double kEps = 1e-12;
-  for (int n = 0; n < features.num_frames; n++) {
+  for (size_t n = 0; n < features.num_frames; n++) {
     gmm_features[0] = features.log_pitch_gain[n];
     gmm_features[1] = features.spectral_peak[n];
     gmm_features[2] = features.pitch_lag_hz[n];
diff --git a/webrtc/modules/audio_processing/vad/pole_zero_filter.cc b/webrtc/modules/audio_processing/vad/pole_zero_filter.cc
index 84d0739..9769515 100644
--- a/webrtc/modules/audio_processing/vad/pole_zero_filter.cc
+++ b/webrtc/modules/audio_processing/vad/pole_zero_filter.cc
@@ -17,11 +17,10 @@
 namespace webrtc {
 
 PoleZeroFilter* PoleZeroFilter::Create(const float* numerator_coefficients,
-                                       int order_numerator,
+                                       size_t order_numerator,
                                        const float* denominator_coefficients,
-                                       int order_denominator) {
-  if (order_numerator < 0 || order_denominator < 0 ||
-      order_numerator > kMaxFilterOrder ||
+                                       size_t order_denominator) {
+  if (order_numerator > kMaxFilterOrder ||
       order_denominator > kMaxFilterOrder || denominator_coefficients[0] == 0 ||
       numerator_coefficients == NULL || denominator_coefficients == NULL)
     return NULL;
@@ -30,9 +29,9 @@
 }
 
 PoleZeroFilter::PoleZeroFilter(const float* numerator_coefficients,
-                               int order_numerator,
+                               size_t order_numerator,
                                const float* denominator_coefficients,
-                               int order_denominator)
+                               size_t order_denominator)
     : past_input_(),
       past_output_(),
       numerator_coefficients_(),
@@ -46,30 +45,31 @@
          sizeof(denominator_coefficients_[0]) * (order_denominator_ + 1));
 
   if (denominator_coefficients_[0] != 1) {
-    for (int n = 0; n <= order_numerator_; n++)
+    for (size_t n = 0; n <= order_numerator_; n++)
       numerator_coefficients_[n] /= denominator_coefficients_[0];
-    for (int n = 0; n <= order_denominator_; n++)
+    for (size_t n = 0; n <= order_denominator_; n++)
       denominator_coefficients_[n] /= denominator_coefficients_[0];
   }
 }
 
 template <typename T>
-static float FilterArPast(const T* past, int order, const float* coefficients) {
+static float FilterArPast(const T* past, size_t order,
+                          const float* coefficients) {
   float sum = 0.0f;
-  int past_index = order - 1;
-  for (int k = 1; k <= order; k++, past_index--)
+  size_t past_index = order - 1;
+  for (size_t k = 1; k <= order; k++, past_index--)
     sum += coefficients[k] * past[past_index];
   return sum;
 }
 
 int PoleZeroFilter::Filter(const int16_t* in,
-                           int num_input_samples,
+                           size_t num_input_samples,
                            float* output) {
-  if (in == NULL || num_input_samples < 0 || output == NULL)
+  if (in == NULL || output == NULL)
     return -1;
   // This is the typical case, just a memcpy.
-  const int k = std::min(num_input_samples, highest_order_);
-  int n;
+  const size_t k = std::min(num_input_samples, highest_order_);
+  size_t n;
   for (n = 0; n < k; n++) {
     output[n] = in[n] * numerator_coefficients_[0];
     output[n] += FilterArPast(&past_input_[n], order_numerator_,
@@ -81,7 +81,7 @@
     past_output_[n + order_denominator_] = output[n];
   }
   if (highest_order_ < num_input_samples) {
-    for (int m = 0; n < num_input_samples; n++, m++) {
+    for (size_t m = 0; n < num_input_samples; n++, m++) {
       output[n] = in[n] * numerator_coefficients_[0];
       output[n] +=
           FilterArPast(&in[m], order_numerator_, numerator_coefficients_);
diff --git a/webrtc/modules/audio_processing/vad/pole_zero_filter.h b/webrtc/modules/audio_processing/vad/pole_zero_filter.h
index 038d801..bd13050 100644
--- a/webrtc/modules/audio_processing/vad/pole_zero_filter.h
+++ b/webrtc/modules/audio_processing/vad/pole_zero_filter.h
@@ -11,6 +11,8 @@
 #ifndef WEBRTC_MODULES_AUDIO_PROCESSING_VAD_POLE_ZERO_FILTER_H_
 #define WEBRTC_MODULES_AUDIO_PROCESSING_VAD_POLE_ZERO_FILTER_H_
 
+#include <cstddef>
+
 #include "webrtc/typedefs.h"
 
 namespace webrtc {
@@ -20,17 +22,17 @@
   ~PoleZeroFilter() {}
 
   static PoleZeroFilter* Create(const float* numerator_coefficients,
-                                int order_numerator,
+                                size_t order_numerator,
                                 const float* denominator_coefficients,
-                                int order_denominator);
+                                size_t order_denominator);
 
-  int Filter(const int16_t* in, int num_input_samples, float* output);
+  int Filter(const int16_t* in, size_t num_input_samples, float* output);
 
  private:
   PoleZeroFilter(const float* numerator_coefficients,
-                 int order_numerator,
+                 size_t order_numerator,
                  const float* denominator_coefficients,
-                 int order_denominator);
+                 size_t order_denominator);
 
   static const int kMaxFilterOrder = 24;
 
@@ -40,9 +42,9 @@
   float numerator_coefficients_[kMaxFilterOrder + 1];
   float denominator_coefficients_[kMaxFilterOrder + 1];
 
-  int order_numerator_;
-  int order_denominator_;
-  int highest_order_;
+  size_t order_numerator_;
+  size_t order_denominator_;
+  size_t highest_order_;
 };
 
 }  // namespace webrtc
diff --git a/webrtc/modules/audio_processing/vad/standalone_vad.cc b/webrtc/modules/audio_processing/vad/standalone_vad.cc
index 7837851..468b8ff 100644
--- a/webrtc/modules/audio_processing/vad/standalone_vad.cc
+++ b/webrtc/modules/audio_processing/vad/standalone_vad.cc
@@ -42,7 +42,7 @@
   return new StandaloneVad(vad);
 }
 
-int StandaloneVad::AddAudio(const int16_t* data, int length) {
+int StandaloneVad::AddAudio(const int16_t* data, size_t length) {
   if (length != kLength10Ms)
     return -1;
 
@@ -57,11 +57,11 @@
   return 0;
 }
 
-int StandaloneVad::GetActivity(double* p, int length_p) {
+int StandaloneVad::GetActivity(double* p, size_t length_p) {
   if (index_ == 0)
     return -1;
 
-  const int num_frames = index_ / kLength10Ms;
+  const size_t num_frames = index_ / kLength10Ms;
   if (num_frames > length_p)
     return -1;
   assert(WebRtcVad_ValidRateAndFrameLength(kSampleRateHz, index_) == 0);
@@ -73,7 +73,7 @@
     p[0] = 0.01;  // Arbitrary but small and non-zero.
   else
     p[0] = 0.5;  // 0.5 is neutral values when combinned by other probabilities.
-  for (int n = 1; n < num_frames; n++)
+  for (size_t n = 1; n < num_frames; n++)
     p[n] = p[0];
   // Reset the buffer to start from the beginning.
   index_ = 0;
diff --git a/webrtc/modules/audio_processing/vad/standalone_vad.h b/webrtc/modules/audio_processing/vad/standalone_vad.h
index 4017a72..6a25424 100644
--- a/webrtc/modules/audio_processing/vad/standalone_vad.h
+++ b/webrtc/modules/audio_processing/vad/standalone_vad.h
@@ -41,10 +41,10 @@
   // classified as passive. In this way, when probabilities are combined, the
   // effect of the stand-alone VAD is neutral if the input is classified as
   // active.
-  int GetActivity(double* p, int length_p);
+  int GetActivity(double* p, size_t length_p);
 
   // Expecting 10 ms of 16 kHz audio to be pushed in.
-  int AddAudio(const int16_t* data, int length);
+  int AddAudio(const int16_t* data, size_t length);
 
   // Set aggressiveness of VAD, 0 is the least aggressive and 3 is the most
   // aggressive mode. Returns -1 if the input is less than 0 or larger than 3,
@@ -56,12 +56,12 @@
  private:
   explicit StandaloneVad(VadInst* vad);
 
-  static const int kMaxNum10msFrames = 3;
+  static const size_t kMaxNum10msFrames = 3;
 
   // TODO(turajs): Is there a way to use scoped-pointer here?
   VadInst* vad_;
   int16_t buffer_[kMaxNum10msFrames * kLength10Ms];
-  int index_;
+  size_t index_;
   int mode_;
 };
 
diff --git a/webrtc/modules/audio_processing/vad/standalone_vad_unittest.cc b/webrtc/modules/audio_processing/vad/standalone_vad_unittest.cc
index 404a66f..942008e 100644
--- a/webrtc/modules/audio_processing/vad/standalone_vad_unittest.cc
+++ b/webrtc/modules/audio_processing/vad/standalone_vad_unittest.cc
@@ -27,9 +27,9 @@
   // Valid frame length (for 32 kHz rate), but not what the VAD is expecting.
   EXPECT_EQ(-1, vad->AddAudio(data, 320));
 
-  const int kMaxNumFrames = 3;
+  const size_t kMaxNumFrames = 3;
   double p[kMaxNumFrames];
-  for (int n = 0; n < kMaxNumFrames; n++)
+  for (size_t n = 0; n < kMaxNumFrames; n++)
     EXPECT_EQ(0, vad->AddAudio(data, kLength10Ms));
 
   // Pretend |p| is shorter that it should be.
@@ -41,7 +41,7 @@
   EXPECT_EQ(-1, vad->GetActivity(p, kMaxNumFrames));
 
   // Should reset and result in one buffer.
-  for (int n = 0; n < kMaxNumFrames + 1; n++)
+  for (size_t n = 0; n < kMaxNumFrames + 1; n++)
     EXPECT_EQ(0, vad->AddAudio(data, kLength10Ms));
   EXPECT_EQ(0, vad->GetActivity(p, 1));
 
diff --git a/webrtc/modules/audio_processing/vad/vad_audio_proc.cc b/webrtc/modules/audio_processing/vad/vad_audio_proc.cc
index e8f27f8..8535d1f 100644
--- a/webrtc/modules/audio_processing/vad/vad_audio_proc.cc
+++ b/webrtc/modules/audio_processing/vad/vad_audio_proc.cc
@@ -76,7 +76,7 @@
 }
 
 int VadAudioProc::ExtractFeatures(const int16_t* frame,
-                                  int length,
+                                  size_t length,
                                   AudioFeatures* features) {
   features->num_frames = 0;
   if (length != kNumSubframeSamples) {
@@ -100,7 +100,7 @@
   features->silence = false;
 
   Rms(features->rms, kMaxNumFrames);
-  for (int i = 0; i < kNum10msSubframes; ++i) {
+  for (size_t i = 0; i < kNum10msSubframes; ++i) {
     if (features->rms[i] < kSilenceRms) {
       // PitchAnalysis can cause NaNs in the pitch gain if it's fed silence.
       // Bail out here instead.
@@ -119,13 +119,13 @@
 
 // Computes |kLpcOrder + 1| correlation coefficients.
 void VadAudioProc::SubframeCorrelation(double* corr,
-                                       int length_corr,
-                                       int subframe_index) {
+                                       size_t length_corr,
+                                       size_t subframe_index) {
   assert(length_corr >= kLpcOrder + 1);
   double windowed_audio[kNumSubframeSamples + kNumPastSignalSamples];
-  int buffer_index = subframe_index * kNumSubframeSamples;
+  size_t buffer_index = subframe_index * kNumSubframeSamples;
 
-  for (int n = 0; n < kNumSubframeSamples + kNumPastSignalSamples; n++)
+  for (size_t n = 0; n < kNumSubframeSamples + kNumPastSignalSamples; n++)
     windowed_audio[n] = audio_buffer_[buffer_index++] * kLpcAnalWin[n];
 
   WebRtcIsac_AutoCorr(corr, windowed_audio,
@@ -136,16 +136,16 @@
 // The analysis window is 15 ms long and it is centered on the first half of
 // each 10ms sub-frame. This is equivalent to computing LPC coefficients for the
 // first half of each 10 ms subframe.
-void VadAudioProc::GetLpcPolynomials(double* lpc, int length_lpc) {
+void VadAudioProc::GetLpcPolynomials(double* lpc, size_t length_lpc) {
   assert(length_lpc >= kNum10msSubframes * (kLpcOrder + 1));
   double corr[kLpcOrder + 1];
   double reflec_coeff[kLpcOrder];
-  for (int i = 0, offset_lpc = 0; i < kNum10msSubframes;
+  for (size_t i = 0, offset_lpc = 0; i < kNum10msSubframes;
        i++, offset_lpc += kLpcOrder + 1) {
     SubframeCorrelation(corr, kLpcOrder + 1, i);
     corr[0] *= 1.0001;
     // This makes Lev-Durb a bit more stable.
-    for (int k = 0; k < kLpcOrder + 1; k++) {
+    for (size_t k = 0; k < kLpcOrder + 1; k++) {
       corr[k] *= kCorrWeight[k];
     }
     WebRtcIsac_LevDurb(&lpc[offset_lpc], reflec_coeff, corr, kLpcOrder);
@@ -174,30 +174,31 @@
 // with the local minimum of A(z). It saves complexity, as we save one
 // inversion. Furthermore, we find the first local maximum of magnitude squared,
 // to save on one square root.
-void VadAudioProc::FindFirstSpectralPeaks(double* f_peak, int length_f_peak) {
+void VadAudioProc::FindFirstSpectralPeaks(double* f_peak,
+                                          size_t length_f_peak) {
   assert(length_f_peak >= kNum10msSubframes);
   double lpc[kNum10msSubframes * (kLpcOrder + 1)];
   // For all sub-frames.
   GetLpcPolynomials(lpc, kNum10msSubframes * (kLpcOrder + 1));
 
-  const int kNumDftCoefficients = kDftSize / 2 + 1;
+  const size_t kNumDftCoefficients = kDftSize / 2 + 1;
   float data[kDftSize];
 
-  for (int i = 0; i < kNum10msSubframes; i++) {
+  for (size_t i = 0; i < kNum10msSubframes; i++) {
     // Convert to float with zero pad.
     memset(data, 0, sizeof(data));
-    for (int n = 0; n < kLpcOrder + 1; n++) {
+    for (size_t n = 0; n < kLpcOrder + 1; n++) {
       data[n] = static_cast<float>(lpc[i * (kLpcOrder + 1) + n]);
     }
     // Transform to frequency domain.
     WebRtc_rdft(kDftSize, 1, data, ip_, w_fft_);
 
-    int index_peak = 0;
+    size_t index_peak = 0;
     float prev_magn_sqr = data[0] * data[0];
     float curr_magn_sqr = data[2] * data[2] + data[3] * data[3];
     float next_magn_sqr;
     bool found_peak = false;
-    for (int n = 2; n < kNumDftCoefficients - 1; n++) {
+    for (size_t n = 2; n < kNumDftCoefficients - 1; n++) {
       next_magn_sqr =
           data[2 * n] * data[2 * n] + data[2 * n + 1] * data[2 * n + 1];
       if (curr_magn_sqr < prev_magn_sqr && curr_magn_sqr < next_magn_sqr) {
@@ -228,7 +229,7 @@
 // Using iSAC functions to estimate pitch gains & lags.
 void VadAudioProc::PitchAnalysis(double* log_pitch_gains,
                                  double* pitch_lags_hz,
-                                 int length) {
+                                 size_t length) {
   // TODO(turajs): This can be "imported" from iSAC & and the next two
   // constants.
   assert(length >= kNum10msSubframes);
@@ -260,12 +261,12 @@
       &log_old_gain_, &old_lag_, log_pitch_gains, pitch_lags_hz);
 }
 
-void VadAudioProc::Rms(double* rms, int length_rms) {
+void VadAudioProc::Rms(double* rms, size_t length_rms) {
   assert(length_rms >= kNum10msSubframes);
-  int offset = kNumPastSignalSamples;
-  for (int i = 0; i < kNum10msSubframes; i++) {
+  size_t offset = kNumPastSignalSamples;
+  for (size_t i = 0; i < kNum10msSubframes; i++) {
     rms[i] = 0;
-    for (int n = 0; n < kNumSubframeSamples; n++, offset++)
+    for (size_t n = 0; n < kNumSubframeSamples; n++, offset++)
       rms[i] += audio_buffer_[offset] * audio_buffer_[offset];
     rms[i] = sqrt(rms[i] / kNumSubframeSamples);
   }
diff --git a/webrtc/modules/audio_processing/vad/vad_audio_proc.h b/webrtc/modules/audio_processing/vad/vad_audio_proc.h
index 6cf3937..85500ae 100644
--- a/webrtc/modules/audio_processing/vad/vad_audio_proc.h
+++ b/webrtc/modules/audio_processing/vad/vad_audio_proc.h
@@ -30,46 +30,51 @@
   ~VadAudioProc();
 
   int ExtractFeatures(const int16_t* audio_frame,
-                      int length,
+                      size_t length,
                       AudioFeatures* audio_features);
 
-  static const int kDftSize = 512;
+  static const size_t kDftSize = 512;
 
  private:
-  void PitchAnalysis(double* pitch_gains, double* pitch_lags_hz, int length);
-  void SubframeCorrelation(double* corr, int length_corr, int subframe_index);
-  void GetLpcPolynomials(double* lpc, int length_lpc);
-  void FindFirstSpectralPeaks(double* f_peak, int length_f_peak);
-  void Rms(double* rms, int length_rms);
+  void PitchAnalysis(double* pitch_gains, double* pitch_lags_hz, size_t length);
+  void SubframeCorrelation(double* corr,
+                           size_t length_corr,
+                           size_t subframe_index);
+  void GetLpcPolynomials(double* lpc, size_t length_lpc);
+  void FindFirstSpectralPeaks(double* f_peak, size_t length_f_peak);
+  void Rms(double* rms, size_t length_rms);
   void ResetBuffer();
 
   // To compute spectral peak we perform LPC analysis to get spectral envelope.
   // For every 30 ms we compute 3 spectral peak there for 3 LPC analysis.
   // LPC is computed over 15 ms of windowed audio. For every 10 ms sub-frame
   // we need 5 ms of past signal to create the input of LPC analysis.
-  static const int kNumPastSignalSamples = kSampleRateHz / 200;
+  static const size_t kNumPastSignalSamples =
+      static_cast<size_t>(kSampleRateHz / 200);
 
   // TODO(turajs): maybe defining this at a higher level (maybe enum) so that
   // all the code recognize it as "no-error."
   static const int kNoError = 0;
 
-  static const int kNum10msSubframes = 3;
-  static const int kNumSubframeSamples = kSampleRateHz / 100;
-  static const int kNumSamplesToProcess =
+  static const size_t kNum10msSubframes = 3;
+  static const size_t kNumSubframeSamples =
+      static_cast<size_t>(kSampleRateHz / 100);
+  static const size_t kNumSamplesToProcess =
       kNum10msSubframes *
       kNumSubframeSamples;  // Samples in 30 ms @ given sampling rate.
-  static const int kBufferLength = kNumPastSignalSamples + kNumSamplesToProcess;
-  static const int kIpLength = kDftSize >> 1;
-  static const int kWLength = kDftSize >> 1;
+  static const size_t kBufferLength =
+      kNumPastSignalSamples + kNumSamplesToProcess;
+  static const size_t kIpLength = kDftSize >> 1;
+  static const size_t kWLength = kDftSize >> 1;
 
-  static const int kLpcOrder = 16;
+  static const size_t kLpcOrder = 16;
 
-  int ip_[kIpLength];
+  size_t ip_[kIpLength];
   float w_fft_[kWLength];
 
   // A buffer of 5 ms (past audio) + 30 ms (one iSAC frame ).
   float audio_buffer_[kBufferLength];
-  int num_buffer_samples_;
+  size_t num_buffer_samples_;
 
   double log_old_gain_;
   double old_lag_;
diff --git a/webrtc/modules/audio_processing/vad/vad_audio_proc_internal.h b/webrtc/modules/audio_processing/vad/vad_audio_proc_internal.h
index 4486879..45586b9b 100644
--- a/webrtc/modules/audio_processing/vad/vad_audio_proc_internal.h
+++ b/webrtc/modules/audio_processing/vad/vad_audio_proc_internal.h
@@ -74,7 +74,7 @@
     0.14408883, 0.13106918, 0.11802689, 0.10496421, 0.09188339, 0.07878670,
     0.06567639, 0.05255473, 0.03942400, 0.02628645, 0.01314436, 0.00000000};
 
-static const int kFilterOrder = 2;
+static const size_t kFilterOrder = 2;
 static const float kCoeffNumerator[kFilterOrder + 1] = {0.974827f,
                                                         -1.949650f,
                                                         0.974827f};
diff --git a/webrtc/modules/audio_processing/vad/vad_audio_proc_unittest.cc b/webrtc/modules/audio_processing/vad/vad_audio_proc_unittest.cc
index 675af70..f509af4 100644
--- a/webrtc/modules/audio_processing/vad/vad_audio_proc_unittest.cc
+++ b/webrtc/modules/audio_processing/vad/vad_audio_proc_unittest.cc
@@ -51,7 +51,7 @@
       // Read reference values.
       const size_t num_frames = features.num_frames;
       ASSERT_EQ(num_frames, fread(sp, sizeof(sp[0]), num_frames, peak_file));
-      for (int n = 0; n < features.num_frames; n++)
+      for (size_t n = 0; n < features.num_frames; n++)
         EXPECT_NEAR(features.spectral_peak[n], sp[n], 3);
     }
   }
diff --git a/webrtc/modules/audio_processing/vad/voice_activity_detector.cc b/webrtc/modules/audio_processing/vad/voice_activity_detector.cc
index 05995ed..c5c8498 100644
--- a/webrtc/modules/audio_processing/vad/voice_activity_detector.cc
+++ b/webrtc/modules/audio_processing/vad/voice_activity_detector.cc
@@ -17,7 +17,7 @@
 namespace webrtc {
 namespace {
 
-const int kMaxLength = 320;
+const size_t kMaxLength = 320;
 const int kNumChannels = 1;
 
 const double kDefaultVoiceValue = 1.0;
@@ -35,9 +35,9 @@
 // |chunkwise_voice_probabilities_| and |chunkwise_rms_| when there is new data.
 // Otherwise it clears them.
 void VoiceActivityDetector::ProcessChunk(const int16_t* audio,
-                                         int length,
+                                         size_t length,
                                          int sample_rate_hz) {
-  DCHECK_EQ(length, sample_rate_hz / 100);
+  DCHECK_EQ(static_cast<int>(length), sample_rate_hz / 100);
   DCHECK_LE(length, kMaxLength);
   // Resample to the required rate.
   const int16_t* resampled_ptr = audio;
diff --git a/webrtc/modules/audio_processing/vad/voice_activity_detector.h b/webrtc/modules/audio_processing/vad/voice_activity_detector.h
index aedd6ed..e2dcf02 100644
--- a/webrtc/modules/audio_processing/vad/voice_activity_detector.h
+++ b/webrtc/modules/audio_processing/vad/voice_activity_detector.h
@@ -31,7 +31,7 @@
   // Processes each audio chunk and estimates the voice probability. The maximum
   // supported sample rate is 32kHz.
   // TODO(aluebs): Change |length| to size_t.
-  void ProcessChunk(const int16_t* audio, int length, int sample_rate_hz);
+  void ProcessChunk(const int16_t* audio, size_t length, int sample_rate_hz);
 
   // Returns a vector of voice probabilities for each chunk. It can be empty for
   // some chunks, but it catches up afterwards returning multiple values at
diff --git a/webrtc/modules/audio_processing/voice_detection_impl.cc b/webrtc/modules/audio_processing/voice_detection_impl.cc
index 0883536..710df42 100644
--- a/webrtc/modules/audio_processing/voice_detection_impl.cc
+++ b/webrtc/modules/audio_processing/voice_detection_impl.cc
@@ -140,8 +140,8 @@
   }
 
   using_external_vad_ = false;
-  frame_size_samples_ = frame_size_ms_ *
-      apm_->proc_split_sample_rate_hz() / 1000;
+  frame_size_samples_ = static_cast<size_t>(
+      frame_size_ms_ * apm_->proc_split_sample_rate_hz() / 1000);
   // TODO(ajm): intialize frame buffer here.
 
   return apm_->kNoError;
diff --git a/webrtc/modules/audio_processing/voice_detection_impl.h b/webrtc/modules/audio_processing/voice_detection_impl.h
index 32f031e..b188083 100644
--- a/webrtc/modules/audio_processing/voice_detection_impl.h
+++ b/webrtc/modules/audio_processing/voice_detection_impl.h
@@ -57,7 +57,7 @@
   bool using_external_vad_;
   Likelihood likelihood_;
   int frame_size_ms_;
-  int frame_size_samples_;
+  size_t frame_size_samples_;
 };
 }  // namespace webrtc
 
diff --git a/webrtc/modules/interface/module_common_types.h b/webrtc/modules/interface/module_common_types.h
index 02ce03f..b500962 100644
--- a/webrtc/modules/interface/module_common_types.h
+++ b/webrtc/modules/interface/module_common_types.h
@@ -480,7 +480,7 @@
 class AudioFrame {
  public:
   // Stereo, 32 kHz, 60 ms (2 * 32 * 60)
-  static const int kMaxDataSizeSamples = 3840;
+  static const size_t kMaxDataSizeSamples = 3840;
 
   enum VADActivity {
     kVadActive = 0,
@@ -504,7 +504,7 @@
 
   // |interleaved_| is not changed by this method.
   void UpdateFrame(int id, uint32_t timestamp, const int16_t* data,
-                   int samples_per_channel, int sample_rate_hz,
+                   size_t samples_per_channel, int sample_rate_hz,
                    SpeechType speech_type, VADActivity vad_activity,
                    int num_channels = 1, uint32_t energy = -1);
 
@@ -528,7 +528,7 @@
   // -1 represents an uninitialized value.
   int64_t ntp_time_ms_;
   int16_t data_[kMaxDataSizeSamples];
-  int samples_per_channel_;
+  size_t samples_per_channel_;
   int sample_rate_hz_;
   int num_channels_;
   SpeechType speech_type_;
@@ -568,7 +568,7 @@
 inline void AudioFrame::UpdateFrame(int id,
                                     uint32_t timestamp,
                                     const int16_t* data,
-                                    int samples_per_channel,
+                                    size_t samples_per_channel,
                                     int sample_rate_hz,
                                     SpeechType speech_type,
                                     VADActivity vad_activity,
@@ -584,7 +584,7 @@
   energy_ = energy;
 
   assert(num_channels >= 0);
-  const int length = samples_per_channel * num_channels;
+  const size_t length = samples_per_channel * num_channels;
   assert(length <= kMaxDataSizeSamples);
   if (data != NULL) {
     memcpy(data_, data, sizeof(int16_t) * length);
@@ -609,7 +609,7 @@
   interleaved_ = src.interleaved_;
 
   assert(num_channels_ >= 0);
-  const int length = samples_per_channel_ * num_channels_;
+  const size_t length = samples_per_channel_ * num_channels_;
   assert(length <= kMaxDataSizeSamples);
   memcpy(data_, src.data_, sizeof(int16_t) * length);
 }
@@ -622,7 +622,7 @@
   assert((num_channels_ > 0) && (num_channels_ < 3));
   if ((num_channels_ > 2) || (num_channels_ < 1)) return *this;
 
-  for (int i = 0; i < samples_per_channel_ * num_channels_; i++) {
+  for (size_t i = 0; i < samples_per_channel_ * num_channels_; i++) {
     data_[i] = static_cast<int16_t>(data_[i] >> rhs);
   }
   return *this;
@@ -644,8 +644,8 @@
     speech_type_ = kUndefined;
   }
 
-  int offset = samples_per_channel_ * num_channels_;
-  for (int i = 0; i < rhs.samples_per_channel_ * rhs.num_channels_; i++) {
+  size_t offset = samples_per_channel_ * num_channels_;
+  for (size_t i = 0; i < rhs.samples_per_channel_ * rhs.num_channels_; i++) {
     data_[offset + i] = rhs.data_[i];
   }
   samples_per_channel_ += rhs.samples_per_channel_;
@@ -695,7 +695,7 @@
            sizeof(int16_t) * rhs.samples_per_channel_ * num_channels_);
   } else {
     // IMPROVEMENT this can be done very fast in assembly
-    for (int i = 0; i < samples_per_channel_ * num_channels_; i++) {
+    for (size_t i = 0; i < samples_per_channel_ * num_channels_; i++) {
       int32_t wrap_guard =
           static_cast<int32_t>(data_[i]) + static_cast<int32_t>(rhs.data_[i]);
       data_[i] = ClampToInt16(wrap_guard);
@@ -720,7 +720,7 @@
   }
   speech_type_ = kUndefined;
 
-  for (int i = 0; i < samples_per_channel_ * num_channels_; i++) {
+  for (size_t i = 0; i < samples_per_channel_ * num_channels_; i++) {
     int32_t wrap_guard =
         static_cast<int32_t>(data_[i]) - static_cast<int32_t>(rhs.data_[i]);
     data_[i] = ClampToInt16(wrap_guard);
diff --git a/webrtc/modules/utility/interface/audio_frame_operations.h b/webrtc/modules/utility/interface/audio_frame_operations.h
index f439dac..c2af68a 100644
--- a/webrtc/modules/utility/interface/audio_frame_operations.h
+++ b/webrtc/modules/utility/interface/audio_frame_operations.h
@@ -26,7 +26,7 @@
   // operation, meaning src_audio and dst_audio must point to different
   // buffers. It is the caller's responsibility to ensure that |dst_audio| is
   // sufficiently large.
-  static void MonoToStereo(const int16_t* src_audio, int samples_per_channel,
+  static void MonoToStereo(const int16_t* src_audio, size_t samples_per_channel,
                            int16_t* dst_audio);
   // |frame.num_channels_| will be updated. This version checks for sufficient
   // buffer size and that |num_channels_| is mono.
@@ -35,7 +35,7 @@
   // Downmixes stereo |src_audio| to mono |dst_audio|. This is an in-place
   // operation, meaning |src_audio| and |dst_audio| may point to the same
   // buffer.
-  static void StereoToMono(const int16_t* src_audio, int samples_per_channel,
+  static void StereoToMono(const int16_t* src_audio, size_t samples_per_channel,
                            int16_t* dst_audio);
   // |frame.num_channels_| will be updated. This version checks that
   // |num_channels_| is stereo.
diff --git a/webrtc/modules/utility/interface/file_player.h b/webrtc/modules/utility/interface/file_player.h
index fdce277..44f03e4 100644
--- a/webrtc/modules/utility/interface/file_player.h
+++ b/webrtc/modules/utility/interface/file_player.h
@@ -38,7 +38,7 @@
     // channel).
     virtual int Get10msAudioFromFile(
         int16_t* outBuffer,
-        int& lengthInSamples,
+        size_t& lengthInSamples,
         int frequencyInHz) = 0;
 
     // Register callback for receiving file playing notifications.
diff --git a/webrtc/modules/utility/source/audio_frame_operations.cc b/webrtc/modules/utility/source/audio_frame_operations.cc
index e3b0010..c07ca1f 100644
--- a/webrtc/modules/utility/source/audio_frame_operations.cc
+++ b/webrtc/modules/utility/source/audio_frame_operations.cc
@@ -14,9 +14,9 @@
 namespace webrtc {
 
 void AudioFrameOperations::MonoToStereo(const int16_t* src_audio,
-                                        int samples_per_channel,
+                                        size_t samples_per_channel,
                                         int16_t* dst_audio) {
-  for (int i = 0; i < samples_per_channel; i++) {
+  for (size_t i = 0; i < samples_per_channel; i++) {
     dst_audio[2 * i] = src_audio[i];
     dst_audio[2 * i + 1] = src_audio[i];
   }
@@ -41,9 +41,9 @@
 }
 
 void AudioFrameOperations::StereoToMono(const int16_t* src_audio,
-                                        int samples_per_channel,
+                                        size_t samples_per_channel,
                                         int16_t* dst_audio) {
-  for (int i = 0; i < samples_per_channel; i++) {
+  for (size_t i = 0; i < samples_per_channel; i++) {
     dst_audio[i] = (src_audio[2 * i] + src_audio[2 * i + 1]) >> 1;
   }
 }
@@ -62,7 +62,7 @@
 void AudioFrameOperations::SwapStereoChannels(AudioFrame* frame) {
   if (frame->num_channels_ != 2) return;
 
-  for (int i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
+  for (size_t i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
     int16_t temp_data = frame->data_[i];
     frame->data_[i] = frame->data_[i + 1];
     frame->data_[i + 1] = temp_data;
@@ -79,7 +79,7 @@
     return -1;
   }
 
-  for (int i = 0; i < frame.samples_per_channel_; i++) {
+  for (size_t i = 0; i < frame.samples_per_channel_; i++) {
     frame.data_[2 * i] =
         static_cast<int16_t>(left * frame.data_[2 * i]);
     frame.data_[2 * i + 1] =
@@ -92,7 +92,7 @@
   int32_t temp_data = 0;
 
   // Ensure that the output result is saturated [-32768, +32767].
-  for (int i = 0; i < frame.samples_per_channel_ * frame.num_channels_;
+  for (size_t i = 0; i < frame.samples_per_channel_ * frame.num_channels_;
        i++) {
     temp_data = static_cast<int32_t>(scale * frame.data_[i]);
     if (temp_data < -32768) {
diff --git a/webrtc/modules/utility/source/audio_frame_operations_unittest.cc b/webrtc/modules/utility/source/audio_frame_operations_unittest.cc
index f4d881c..c278cdd 100644
--- a/webrtc/modules/utility/source/audio_frame_operations_unittest.cc
+++ b/webrtc/modules/utility/source/audio_frame_operations_unittest.cc
@@ -28,14 +28,14 @@
 };
 
 void SetFrameData(AudioFrame* frame, int16_t left, int16_t right) {
-  for (int i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
+  for (size_t i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
     frame->data_[i] = left;
     frame->data_[i + 1] = right;
   }
 }
 
 void SetFrameData(AudioFrame* frame, int16_t data) {
-  for (int i = 0; i < frame->samples_per_channel_; i++) {
+  for (size_t i = 0; i < frame->samples_per_channel_; i++) {
     frame->data_[i] = data;
   }
 }
@@ -45,7 +45,7 @@
   EXPECT_EQ(frame1.samples_per_channel_,
             frame2.samples_per_channel_);
 
-  for (int i = 0; i < frame1.samples_per_channel_ * frame1.num_channels_;
+  for (size_t i = 0; i < frame1.samples_per_channel_ * frame1.num_channels_;
       i++) {
     EXPECT_EQ(frame1.data_[i], frame2.data_[i]);
   }
diff --git a/webrtc/modules/utility/source/file_player_impl.cc b/webrtc/modules/utility/source/file_player_impl.cc
index df6a5bf..8c94caa 100644
--- a/webrtc/modules/utility/source/file_player_impl.cc
+++ b/webrtc/modules/utility/source/file_player_impl.cc
@@ -95,7 +95,7 @@
 
 int32_t FilePlayerImpl::Get10msAudioFromFile(
     int16_t* outBuffer,
-    int& lengthInSamples,
+    size_t& lengthInSamples,
     int frequencyInHz)
 {
     if(_codec.plfreq == 0)
@@ -127,8 +127,7 @@
             return 0;
         }
         // One sample is two bytes.
-        unresampledAudioFrame.samples_per_channel_ =
-            (uint16_t)lengthInBytes >> 1;
+        unresampledAudioFrame.samples_per_channel_ = lengthInBytes >> 1;
 
     } else {
         // Decode will generate 10 ms of audio data. PlayoutAudioData(..)
@@ -156,14 +155,14 @@
         }
     }
 
-    int outLen = 0;
+    size_t outLen = 0;
     if(_resampler.ResetIfNeeded(unresampledAudioFrame.sample_rate_hz_,
                                 frequencyInHz, 1))
     {
         LOG(LS_WARNING) << "Get10msAudioFromFile() unexpected codec.";
 
         // New sampling frequency. Update state.
-        outLen = frequencyInHz / 100;
+        outLen = static_cast<size_t>(frequencyInHz / 100);
         memset(outBuffer, 0, outLen * sizeof(int16_t));
         return 0;
     }
@@ -177,7 +176,7 @@
 
     if(_scaling != 1.0)
     {
-        for (int i = 0;i < outLen; i++)
+        for (size_t i = 0;i < outLen; i++)
         {
             outBuffer[i] = (int16_t)(outBuffer[i] * _scaling);
         }
diff --git a/webrtc/modules/utility/source/file_player_impl.h b/webrtc/modules/utility/source/file_player_impl.h
index f81e710..8818b5c 100644
--- a/webrtc/modules/utility/source/file_player_impl.h
+++ b/webrtc/modules/utility/source/file_player_impl.h
@@ -31,7 +31,7 @@
 
     virtual int Get10msAudioFromFile(
         int16_t* outBuffer,
-        int& lengthInSamples,
+        size_t& lengthInSamples,
         int frequencyInHz);
     virtual int32_t RegisterModuleFileCallback(FileCallback* callback);
     virtual int32_t StartPlayingFile(
diff --git a/webrtc/modules/utility/source/file_player_unittests.cc b/webrtc/modules/utility/source/file_player_unittests.cc
index c5f6fba..4b65acd 100644
--- a/webrtc/modules/utility/source/file_player_unittests.cc
+++ b/webrtc/modules/utility/source/file_player_unittests.cc
@@ -62,12 +62,12 @@
     rtc::Md5Digest checksum;
     for (int i = 0; i < output_length_ms / 10; ++i) {
       int16_t out[10 * kSampleRateHz / 1000] = {0};
-      int num_samples;
+      size_t num_samples;
       EXPECT_EQ(0,
                 player_->Get10msAudioFromFile(out, num_samples, kSampleRateHz));
       checksum.Update(out, num_samples * sizeof(out[0]));
       if (FLAGS_file_player_output) {
-        ASSERT_EQ(static_cast<size_t>(num_samples),
+        ASSERT_EQ(num_samples,
                   fwrite(out, sizeof(out[0]), num_samples, output_file_));
       }
     }
diff --git a/webrtc/modules/utility/source/file_recorder_impl.cc b/webrtc/modules/utility/source/file_recorder_impl.cc
index e86afc6..6a5f2c2 100644
--- a/webrtc/modules/utility/source/file_recorder_impl.cc
+++ b/webrtc/modules/utility/source/file_recorder_impl.cc
@@ -156,7 +156,7 @@
         tempAudioFrame.sample_rate_hz_ = incomingAudioFrame.sample_rate_hz_;
         tempAudioFrame.samples_per_channel_ =
           incomingAudioFrame.samples_per_channel_;
-        for (uint16_t i = 0;
+        for (size_t i = 0;
              i < (incomingAudioFrame.samples_per_channel_); i++)
         {
             // Sample value is the average of left and right buffer rounded to
@@ -174,7 +174,7 @@
         tempAudioFrame.sample_rate_hz_ = incomingAudioFrame.sample_rate_hz_;
         tempAudioFrame.samples_per_channel_ =
           incomingAudioFrame.samples_per_channel_;
-        for (uint16_t i = 0;
+        for (size_t i = 0;
              i < (incomingAudioFrame.samples_per_channel_); i++)
         {
             // Duplicate sample to both channels
@@ -210,7 +210,7 @@
             return -1;
         }
     } else {
-        int outLen = 0;
+        size_t outLen = 0;
         _audioResampler.ResetIfNeeded(ptrAudioFrame->sample_rate_hz_,
                                       codec_info_.plfreq,
                                       ptrAudioFrame->num_channels_);
diff --git a/webrtc/system_wrappers/interface/aligned_array.h b/webrtc/system_wrappers/interface/aligned_array.h
index 8278af2..3648c7c 100644
--- a/webrtc/system_wrappers/interface/aligned_array.h
+++ b/webrtc/system_wrappers/interface/aligned_array.h
@@ -20,7 +20,7 @@
 // aligned to the given byte alignment.
 template<typename T> class AlignedArray {
  public:
-  AlignedArray(int rows, int cols, int alignment)
+  AlignedArray(int rows, size_t cols, int alignment)
       : rows_(rows),
         cols_(cols),
         alignment_(alignment) {
@@ -58,12 +58,12 @@
     return head_row_[row];
   }
 
-  T& At(int row, int col) {
+  T& At(int row, size_t col) {
     CHECK_LE(col, cols_);
     return Row(row)[col];
   }
 
-  const T& At(int row, int col) const {
+  const T& At(int row, size_t col) const {
     CHECK_LE(col, cols_);
     return Row(row)[col];
   }
@@ -72,13 +72,13 @@
     return rows_;
   }
 
-  int cols() const {
+  size_t cols() const {
     return cols_;
   }
 
  private:
   int rows_;
-  int cols_;
+  size_t cols_;
   int alignment_;
   T** head_row_;
 };
diff --git a/webrtc/system_wrappers/source/aligned_array_unittest.cc b/webrtc/system_wrappers/source/aligned_array_unittest.cc
index f25c717..8b8d50d 100644
--- a/webrtc/system_wrappers/source/aligned_array_unittest.cc
+++ b/webrtc/system_wrappers/source/aligned_array_unittest.cc
@@ -34,16 +34,16 @@
 }
 
 TEST(AlignedArrayTest, CheckOverlap) {
-  AlignedArray<int> arr(10, 7, 128);
+  AlignedArray<size_t> arr(10, 7, 128);
 
   for (int i = 0; i < 10; ++i) {
-    for (int j = 0; j < 7; ++j) {
+    for (size_t j = 0; j < 7; ++j) {
       arr.At(i, j) = 20 * i + j;
     }
   }
 
   for (int i = 0; i < 10; ++i) {
-    for (int j = 0; j < 7; ++j) {
+    for (size_t j = 0; j < 7; ++j) {
       ASSERT_EQ(arr.At(i, j), 20 * i + j);
       ASSERT_EQ(arr.Row(i)[j], 20 * i + j);
       ASSERT_EQ(arr.Array()[i][j], 20 * i + j);
@@ -54,7 +54,7 @@
 TEST(AlignedArrayTest, CheckRowsCols) {
   AlignedArray<bool> arr(10, 7, 128);
   ASSERT_EQ(arr.rows(), 10);
-  ASSERT_EQ(arr.cols(), 7);
+  ASSERT_EQ(arr.cols(), 7u);
 }
 
 }  // namespace webrtc
diff --git a/webrtc/test/fake_audio_device.cc b/webrtc/test/fake_audio_device.cc
index bea125b..4309aed 100644
--- a/webrtc/test/fake_audio_device.cc
+++ b/webrtc/test/fake_audio_device.cc
@@ -99,7 +99,8 @@
           *input_stream_.get(), captured_audio_, kBufferSizeBytes);
       if (bytes_read <= 0)
         return;
-      int num_samples = bytes_read / 2;  // 2 bytes per sample.
+      // 2 bytes per sample.
+      size_t num_samples = static_cast<size_t>(bytes_read / 2);
       uint32_t new_mic_level;
       EXPECT_EQ(0,
                 audio_callback_->RecordedDataIsAvailable(captured_audio_,
@@ -112,14 +113,15 @@
                                                          0,
                                                          false,
                                                          new_mic_level));
-      uint32_t samples_needed = kFrequencyHz / 100;
+      size_t samples_needed = kFrequencyHz / 100;
       int64_t now_ms = clock_->TimeInMilliseconds();
       uint32_t time_since_last_playout_ms = now_ms - last_playout_ms_;
       if (last_playout_ms_ > 0 && time_since_last_playout_ms > 0) {
-        samples_needed = std::min(kFrequencyHz / time_since_last_playout_ms,
-                                  kBufferSizeBytes / 2);
+        samples_needed = std::min(
+            static_cast<size_t>(kFrequencyHz / time_since_last_playout_ms),
+            kBufferSizeBytes / 2);
       }
-      uint32_t samples_out = 0;
+      size_t samples_out = 0;
       int64_t elapsed_time_ms = -1;
       int64_t ntp_time_ms = -1;
       EXPECT_EQ(0,
diff --git a/webrtc/test/fake_audio_device.h b/webrtc/test/fake_audio_device.h
index f4442d4..bdc6728 100644
--- a/webrtc/test/fake_audio_device.h
+++ b/webrtc/test/fake_audio_device.h
@@ -48,7 +48,7 @@
   void CaptureAudio();
 
   static const uint32_t kFrequencyHz = 16000;
-  static const uint32_t kBufferSizeBytes = 2 * kFrequencyHz;
+  static const size_t kBufferSizeBytes = 2 * kFrequencyHz;
 
   AudioTransport* audio_callback_;
   bool capturing_;
diff --git a/webrtc/tools/agc/activity_metric.cc b/webrtc/tools/agc/activity_metric.cc
index fb50daf..18e7c6d 100644
--- a/webrtc/tools/agc/activity_metric.cc
+++ b/webrtc/tools/agc/activity_metric.cc
@@ -61,10 +61,10 @@
   const double sum_squared_silence = kRmsSilence * kRmsSilence *
       frame->samples_per_channel_;
   double sum_squared = 0;
-  for (int n = 0; n < frame->samples_per_channel_; n++)
+  for (size_t n = 0; n < frame->samples_per_channel_; n++)
     sum_squared += frame->data_[n] * frame->data_[n];
   if (sum_squared <= sum_squared_silence) {
-    for (int n = 0; n < frame->samples_per_channel_; n++)
+    for (size_t n = 0; n < frame->samples_per_channel_; n++)
       frame->data_[n] = (rand() & 0xF) - 8;
   }
 }
@@ -79,7 +79,7 @@
         vad_(new PitchBasedVad()),
         standalone_vad_(StandaloneVad::Create()),
         audio_content_fid_(NULL) {
-    for (int n = 0; n < kMaxNumFrames; n++)
+    for (size_t n = 0; n < kMaxNumFrames; n++)
       video_vad_[n] = 0.5;
   }
 
@@ -116,7 +116,7 @@
       // TODO(turajs) combining and limiting are used in the source files as
       // well they can be moved to utility.
       // Combine Video and stand-alone VAD.
-      for (int n = 0; n < features.num_frames; n++) {
+      for (size_t n = 0; n < features.num_frames; n++) {
         double p_active = p[n] * video_vad_[n];
         double p_passive = (1 - p[n]) * (1 - video_vad_[n]);
         p[n]  = p_active / (p_active + p_passive);
@@ -125,7 +125,7 @@
       }
       if (vad_->VoicingProbability(features, p) < 0)
         return -1;
-      for (int n = 0; n < features.num_frames; n++) {
+      for (size_t n = 0; n < features.num_frames; n++) {
         audio_content_->Update(features.rms[n], p[n]);
         double ac = audio_content_->AudioContent();
         if (audio_content_fid_ != NULL) {
@@ -139,7 +139,7 @@
       }
       video_index_ = 0;
     }
-    return features.num_frames;
+    return static_cast<int>(features.num_frames);
   }
 
   void Reset() {
@@ -246,7 +246,7 @@
   bool onset = false;
   uint8_t previous_true_vad = 0;
   int num_not_adapted = 0;
-  int true_vad_index = 0;
+  size_t true_vad_index = 0;
   bool in_false_positive_region = false;
   int total_false_positive_duration = 0;
   bool video_adapted = false;
@@ -292,7 +292,7 @@
     ASSERT_GE(ret_val, 0);
 
     if (ret_val > 0) {
-      ASSERT_EQ(true_vad_index, ret_val);
+      ASSERT_EQ(true_vad_index, static_cast<size_t>(ret_val));
       for (int n = 0; n < ret_val; n++) {
         if (true_vad[n] == 1) {
           total_active++;
diff --git a/webrtc/tools/agc/agc_manager.cc b/webrtc/tools/agc/agc_manager.cc
index 3d7f624..36290f7 100644
--- a/webrtc/tools/agc/agc_manager.cc
+++ b/webrtc/tools/agc/agc_manager.cc
@@ -66,7 +66,7 @@
 
  protected:
   virtual void Process(const int channel, const ProcessingTypes type,
-                       int16_t audio[], const int samples_per_channel,
+                       int16_t audio[], const size_t samples_per_channel,
                        const int sample_rate_hz, const bool is_stereo) {
     CriticalSectionScoped cs(crit_);
     if (direct_->capture_muted()) {
@@ -81,7 +81,7 @@
     int16_t mono[kMaxSamplesPerChannel];
     int16_t* mono_ptr = audio;
     if (is_stereo) {
-      for (int n = 0; n < samples_per_channel; n++) {
+      for (size_t n = 0; n < samples_per_channel; n++) {
         mono[n] = audio[n * 2];
       }
       mono_ptr = mono;
@@ -94,7 +94,7 @@
     frame_.num_channels_ = is_stereo ? 2 : 1;
     frame_.samples_per_channel_ = samples_per_channel;
     frame_.sample_rate_hz_ = sample_rate_hz;
-    const int length_samples = frame_.num_channels_ * samples_per_channel;
+    const size_t length_samples = frame_.num_channels_ * samples_per_channel;
     memcpy(frame_.data_, audio, length_samples * sizeof(int16_t));
 
     // Apply compression to the audio.
@@ -122,7 +122,7 @@
 
  protected:
   virtual void Process(const int channel, const ProcessingTypes type,
-                       int16_t audio[], const int samples_per_channel,
+                       int16_t audio[], const size_t samples_per_channel,
                        const int sample_rate_hz, const bool is_stereo) {
     CriticalSectionScoped cs(crit_);
     if (direct_->capture_muted()) {
diff --git a/webrtc/tools/agc/agc_manager_unittest.cc b/webrtc/tools/agc/agc_manager_unittest.cc
index c379d2d..e54923b 100644
--- a/webrtc/tools/agc/agc_manager_unittest.cc
+++ b/webrtc/tools/agc/agc_manager_unittest.cc
@@ -33,7 +33,7 @@
 
 const int kSampleRateHz = 32000;
 const int kNumChannels = 1;
-const int kSamplesPerChannel = kSampleRateHz / 100;
+const size_t kSamplesPerChannel = static_cast<size_t>(kSampleRateHz / 100);
 const float kAboveClippedThreshold = 0.2f;
 
 }  // namespace
diff --git a/webrtc/tools/agc/test_utils.cc b/webrtc/tools/agc/test_utils.cc
index 3a26cb9..81819c5 100644
--- a/webrtc/tools/agc/test_utils.cc
+++ b/webrtc/tools/agc/test_utils.cc
@@ -27,11 +27,12 @@
 }
 
 void ApplyGainLinear(float gain, float last_gain, AudioFrame* frame) {
-  const int frame_length = frame->samples_per_channel_ * frame->num_channels_;
+  const size_t frame_length =
+      frame->samples_per_channel_ * frame->num_channels_;
   // Smooth the transition between gain levels across the frame.
   float smoothed_gain = last_gain;
   float gain_step = (gain - last_gain) / (frame_length - 1);
-  for (int i = 0; i < frame_length; ++i) {
+  for (size_t i = 0; i < frame_length; ++i) {
     smoothed_gain += gain_step;
     float sample = std::floor(frame->data_[i] * smoothed_gain + 0.5);
     sample = std::max(std::min(32767.0f, sample), -32768.0f);
diff --git a/webrtc/voice_engine/channel.cc b/webrtc/voice_engine/channel.cc
index 9fe8f0e..c42ac0a 100644
--- a/webrtc/voice_engine/channel.cc
+++ b/webrtc/voice_engine/channel.cc
@@ -3336,7 +3336,7 @@
 
 void Channel::Demultiplex(const int16_t* audio_data,
                           int sample_rate,
-                          int number_of_frames,
+                          size_t number_of_frames,
                           int number_of_channels) {
   CodecInst codec;
   GetSendCodec(codec);
@@ -3398,7 +3398,8 @@
     InsertInbandDtmfTone();
 
     if (_includeAudioLevelIndication) {
-      int length = _audioFrame.samples_per_channel_ * _audioFrame.num_channels_;
+      size_t length =
+          _audioFrame.samples_per_channel_ * _audioFrame.num_channels_;
       if (is_muted) {
         rms_level_.ProcessMuted(length);
       } else {
@@ -3686,7 +3687,7 @@
 Channel::MixOrReplaceAudioWithFile(int mixingFrequency)
 {
   rtc::scoped_ptr<int16_t[]> fileBuffer(new int16_t[640]);
-    int fileSamples(0);
+    size_t fileSamples(0);
 
     {
         CriticalSectionScoped cs(&_fileCritSect);
@@ -3756,7 +3757,7 @@
     assert(mixingFrequency <= 48000);
 
     rtc::scoped_ptr<int16_t[]> fileBuffer(new int16_t[960]);
-    int fileSamples(0);
+    size_t fileSamples(0);
 
     {
         CriticalSectionScoped cs(&_fileCritSect);
@@ -3794,8 +3795,8 @@
     else
     {
         WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
-            "Channel::MixAudioWithFile() samples_per_channel_(%d) != "
-            "fileSamples(%d)",
+            "Channel::MixAudioWithFile() samples_per_channel_(%" PRIuS ") != "
+            "fileSamples(%" PRIuS ")",
             audioFrame.samples_per_channel_, fileSamples);
         return -1;
     }
@@ -3855,7 +3856,7 @@
         }
 
         // Replace mixed audio with DTMF tone.
-        for (int sample = 0;
+        for (size_t sample = 0;
             sample < _audioFrame.samples_per_channel_;
             sample++)
         {
@@ -3863,7 +3864,8 @@
                 channel < _audioFrame.num_channels_;
                 channel++)
             {
-                const int index = sample * _audioFrame.num_channels_ + channel;
+                const size_t index =
+                    sample * _audioFrame.num_channels_ + channel;
                 _audioFrame.data_[index] = toneBuffer[sample];
             }
         }
diff --git a/webrtc/voice_engine/channel.h b/webrtc/voice_engine/channel.h
index d32863c..43bdeb8 100644
--- a/webrtc/voice_engine/channel.h
+++ b/webrtc/voice_engine/channel.h
@@ -431,7 +431,7 @@
     // does not go through transmit_mixer and APM.
     void Demultiplex(const int16_t* audio_data,
                      int sample_rate,
-                     int number_of_frames,
+                     size_t number_of_frames,
                      int number_of_channels);
     uint32_t PrepareEncodeAndSend(int mixingFrequency);
     uint32_t EncodeAndSend();
diff --git a/webrtc/voice_engine/include/voe_external_media.h b/webrtc/voice_engine/include/voe_external_media.h
index 6185540..2c451e4 100644
--- a/webrtc/voice_engine/include/voe_external_media.h
+++ b/webrtc/voice_engine/include/voe_external_media.h
@@ -31,7 +31,7 @@
   virtual void Process(int channel,
                        ProcessingTypes type,
                        int16_t audio10ms[],
-                       int length,
+                       size_t length,
                        int samplingFreq,
                        bool isStereo) = 0;
 
diff --git a/webrtc/voice_engine/mock/fake_voe_external_media.h b/webrtc/voice_engine/mock/fake_voe_external_media.h
index 66554ae..a315e9b 100644
--- a/webrtc/voice_engine/mock/fake_voe_external_media.h
+++ b/webrtc/voice_engine/mock/fake_voe_external_media.h
@@ -42,9 +42,9 @@
   // Use this to trigger the Process() callback to a registered media processor.
   // If |audio| is NULL, a zero array of the correct length will be forwarded.
   void CallProcess(ProcessingTypes type, int16_t* audio,
-                   int samples_per_channel, int sample_rate_hz,
+                   size_t samples_per_channel, int sample_rate_hz,
                    int num_channels) {
-    const int length = samples_per_channel * num_channels;
+    const size_t length = samples_per_channel * num_channels;
     rtc::scoped_ptr<int16_t[]> data;
     if (!audio) {
       data.reset(new int16_t[length]);
diff --git a/webrtc/voice_engine/output_mixer.cc b/webrtc/voice_engine/output_mixer.cc
index aa6ff4b..3bffc35 100644
--- a/webrtc/voice_engine/output_mixer.cc
+++ b/webrtc/voice_engine/output_mixer.cc
@@ -589,7 +589,7 @@
     } else
     {
         // stereo
-        for (int i = 0; i < _audioFrame.samples_per_channel_; i++)
+        for (size_t i = 0; i < _audioFrame.samples_per_channel_; i++)
         {
             _audioFrame.data_[2 * i] = toneBuffer[i];
             _audioFrame.data_[2 * i + 1] = 0;
diff --git a/webrtc/voice_engine/test/auto_test/fakes/fake_media_process.h b/webrtc/voice_engine/test/auto_test/fakes/fake_media_process.h
index 3e1345a..9e82fbc 100644
--- a/webrtc/voice_engine/test/auto_test/fakes/fake_media_process.h
+++ b/webrtc/voice_engine/test/auto_test/fakes/fake_media_process.h
@@ -18,10 +18,10 @@
   virtual void Process(int channel,
                        const webrtc::ProcessingTypes type,
                        int16_t audio_10ms[],
-                       int length,
+                       size_t length,
                        int sampling_freq_hz,
                        bool stereo) {
-    for (int i = 0; i < length; i++) {
+    for (size_t i = 0; i < length; i++) {
       if (!stereo) {
         audio_10ms[i] = static_cast<int16_t>(audio_10ms[i] *
             sin(2.0 * 3.14 * frequency * 400.0 / sampling_freq_hz));
diff --git a/webrtc/voice_engine/test/auto_test/standard/external_media_test.cc b/webrtc/voice_engine/test/auto_test/standard/external_media_test.cc
index f586b0e..b4daba5 100644
--- a/webrtc/voice_engine/test/auto_test/standard/external_media_test.cc
+++ b/webrtc/voice_engine/test/auto_test/standard/external_media_test.cc
@@ -83,7 +83,7 @@
   ResumePlaying();
   EXPECT_EQ(0, voe_xmedia_->GetAudioFrame(channel_, 0, &frame));
   EXPECT_GT(frame.sample_rate_hz_, 0);
-  EXPECT_GT(frame.samples_per_channel_, 0);
+  EXPECT_GT(frame.samples_per_channel_, 0U);
   PausePlaying();
   EXPECT_EQ(0, voe_xmedia_->SetExternalMixing(channel_, false));
   ResumePlaying();
@@ -101,7 +101,7 @@
     EXPECT_EQ(0, voe_xmedia_->GetAudioFrame(channel_, f, &frame))
        << "Resampling succeeds for freq=" << f;
     EXPECT_EQ(f, frame.sample_rate_hz_);
-    EXPECT_EQ(f / 100, frame.samples_per_channel_);
+    EXPECT_EQ(static_cast<size_t>(f / 100), frame.samples_per_channel_);
   }
   PausePlaying();
   EXPECT_EQ(0, voe_xmedia_->SetExternalMixing(channel_, false));
diff --git a/webrtc/voice_engine/test/auto_test/standard/neteq_stats_test.cc b/webrtc/voice_engine/test/auto_test/standard/neteq_stats_test.cc
index f3be635..94a2c42 100644
--- a/webrtc/voice_engine/test/auto_test/standard/neteq_stats_test.cc
+++ b/webrtc/voice_engine/test/auto_test/standard/neteq_stats_test.cc
@@ -55,5 +55,5 @@
       network_statistics.maxWaitingTimeMs);
 
   // This is only set to a non-zero value in off-mode.
-  EXPECT_EQ(0, network_statistics.addedSamples);
+  EXPECT_EQ(0U, network_statistics.addedSamples);
 }
diff --git a/webrtc/voice_engine/transmit_mixer.cc b/webrtc/voice_engine/transmit_mixer.cc
index edd77b8..a02f298 100644
--- a/webrtc/voice_engine/transmit_mixer.cc
+++ b/webrtc/voice_engine/transmit_mixer.cc
@@ -10,6 +10,7 @@
 
 #include "webrtc/voice_engine/transmit_mixer.h"
 
+#include "webrtc/base/format_macros.h"
 #include "webrtc/modules/utility/interface/audio_frame_operations.h"
 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
 #include "webrtc/system_wrappers/interface/event_wrapper.h"
@@ -311,7 +312,7 @@
 
 int32_t
 TransmitMixer::PrepareDemux(const void* audioSamples,
-                            uint32_t nSamples,
+                            size_t nSamples,
                             uint8_t nChannels,
                             uint32_t samplesPerSec,
                             uint16_t totalDelayMS,
@@ -320,10 +321,11 @@
                             bool keyPressed)
 {
     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
-                 "TransmitMixer::PrepareDemux(nSamples=%u, nChannels=%u,"
-                 "samplesPerSec=%u, totalDelayMS=%u, clockDrift=%d,"
-                 "currentMicLevel=%u)", nSamples, nChannels, samplesPerSec,
-                 totalDelayMS, clockDrift, currentMicLevel);
+                 "TransmitMixer::PrepareDemux(nSamples=%" PRIuS ", "
+                 "nChannels=%u, samplesPerSec=%u, totalDelayMS=%u, "
+                 "clockDrift=%d, currentMicLevel=%u)",
+                 nSamples, nChannels, samplesPerSec, totalDelayMS, clockDrift,
+                 currentMicLevel);
 
     // --- Resample input audio and create/store the initial audio frame
     GenerateAudioFrame(static_cast<const int16_t*>(audioSamples),
@@ -1128,7 +1130,7 @@
 }
 
 void TransmitMixer::GenerateAudioFrame(const int16_t* audio,
-                                       int samples_per_channel,
+                                       size_t samples_per_channel,
                                        int num_channels,
                                        int sample_rate_hz) {
   int codec_rate;
@@ -1189,7 +1191,7 @@
 {
   rtc::scoped_ptr<int16_t[]> fileBuffer(new int16_t[640]);
 
-    int fileSamples(0);
+    size_t fileSamples(0);
     {
         CriticalSectionScoped cs(&_critSect);
         if (_filePlayerPtr == NULL)
diff --git a/webrtc/voice_engine/transmit_mixer.h b/webrtc/voice_engine/transmit_mixer.h
index 919de13..8bbb421 100644
--- a/webrtc/voice_engine/transmit_mixer.h
+++ b/webrtc/voice_engine/transmit_mixer.h
@@ -51,7 +51,7 @@
         AudioProcessing* audioProcessingModule);
 
     int32_t PrepareDemux(const void* audioSamples,
-                         uint32_t nSamples,
+                         size_t nSamples,
                          uint8_t  nChannels,
                          uint32_t samplesPerSec,
                          uint16_t totalDelayMS,
@@ -173,7 +173,7 @@
     void GetSendCodecInfo(int* max_sample_rate, int* max_channels);
 
     void GenerateAudioFrame(const int16_t audioSamples[],
-                            int nSamples,
+                            size_t nSamples,
                             int nChannels,
                             int samplesPerSec);
     int32_t RecordAudioToFile(uint32_t mixingFrequency);
diff --git a/webrtc/voice_engine/transmit_mixer_unittest.cc b/webrtc/voice_engine/transmit_mixer_unittest.cc
index 5fb982b..27aa8b3 100644
--- a/webrtc/voice_engine/transmit_mixer_unittest.cc
+++ b/webrtc/voice_engine/transmit_mixer_unittest.cc
@@ -20,7 +20,7 @@
 class MediaCallback : public VoEMediaProcess {
  public:
   virtual void Process(int channel, ProcessingTypes type,
-                       int16_t audio[], int samples_per_channel,
+                       int16_t audio[], size_t samples_per_channel,
                        int sample_rate_hz, bool is_stereo) {
   }
 };
diff --git a/webrtc/voice_engine/utility.cc b/webrtc/voice_engine/utility.cc
index f952d6c..82ef076 100644
--- a/webrtc/voice_engine/utility.cc
+++ b/webrtc/voice_engine/utility.cc
@@ -47,7 +47,7 @@
     assert(false);
   }
 
-  const int src_length = src_frame.samples_per_channel_ *
+  const size_t src_length = src_frame.samples_per_channel_ *
                          audio_ptr_num_channels;
   int out_length = resampler->Resample(audio_ptr, src_length, dst_frame->data_,
                                        AudioFrame::kMaxDataSizeSamples);
@@ -55,7 +55,8 @@
     LOG_FERR3(LS_ERROR, Resample, audio_ptr, src_length, dst_frame->data_);
     assert(false);
   }
-  dst_frame->samples_per_channel_ = out_length / audio_ptr_num_channels;
+  dst_frame->samples_per_channel_ =
+      static_cast<size_t>(out_length / audio_ptr_num_channels);
 
   // Upmix after resampling.
   if (src_frame.num_channels_ == 1 && dst_frame->num_channels_ == 2) {
@@ -71,7 +72,7 @@
 }
 
 void DownConvertToCodecFormat(const int16_t* src_data,
-                              int samples_per_channel,
+                              size_t samples_per_channel,
                               int num_channels,
                               int sample_rate_hz,
                               int codec_num_channels,
@@ -107,7 +108,7 @@
     assert(false);
   }
 
-  const int in_length = samples_per_channel * num_channels;
+  const size_t in_length = samples_per_channel * num_channels;
   int out_length = resampler->Resample(
       src_data, in_length, dst_af->data_, AudioFrame::kMaxDataSizeSamples);
   if (out_length == -1) {
@@ -115,7 +116,7 @@
     assert(false);
   }
 
-  dst_af->samples_per_channel_ = out_length / num_channels;
+  dst_af->samples_per_channel_ = static_cast<size_t>(out_length / num_channels);
   dst_af->sample_rate_hz_ = destination_rate;
   dst_af->num_channels_ = num_channels;
 }
@@ -124,7 +125,7 @@
                 int target_channel,
                 const int16_t source[],
                 int source_channel,
-                int source_len) {
+                size_t source_len) {
   assert(target_channel == 1 || target_channel == 2);
   assert(source_channel == 1 || source_channel == 2);
 
@@ -132,7 +133,7 @@
     // Convert source from mono to stereo.
     int32_t left = 0;
     int32_t right = 0;
-    for (int i = 0; i < source_len; ++i) {
+    for (size_t i = 0; i < source_len; ++i) {
       left = source[i] + target[i * 2];
       right = source[i] + target[i * 2 + 1];
       target[i * 2] = WebRtcSpl_SatW32ToW16(left);
@@ -141,13 +142,13 @@
   } else if (target_channel == 1 && source_channel == 2) {
     // Convert source from stereo to mono.
     int32_t temp = 0;
-    for (int i = 0; i < source_len / 2; ++i) {
+    for (size_t i = 0; i < source_len / 2; ++i) {
       temp = ((source[i * 2] + source[i * 2 + 1]) >> 1) + target[i];
       target[i] = WebRtcSpl_SatW32ToW16(temp);
     }
   } else {
     int32_t temp = 0;
-    for (int i = 0; i < source_len; ++i) {
+    for (size_t i = 0; i < source_len; ++i) {
       temp = source[i] + target[i];
       target[i] = WebRtcSpl_SatW32ToW16(temp);
     }
diff --git a/webrtc/voice_engine/utility.h b/webrtc/voice_engine/utility.h
index 3820695..87003c4 100644
--- a/webrtc/voice_engine/utility.h
+++ b/webrtc/voice_engine/utility.h
@@ -42,7 +42,7 @@
 // TODO(ajm): For now, this still calls Reset() on |dst_af|. Remove this, as
 // it shouldn't be needed.
 void DownConvertToCodecFormat(const int16_t* src_data,
-                              int samples_per_channel,
+                              size_t samples_per_channel,
                               int num_channels,
                               int sample_rate_hz,
                               int codec_num_channels,
@@ -55,7 +55,7 @@
                 int target_channel,
                 const int16_t source[],
                 int source_channel,
-                int source_len);
+                size_t source_len);
 
 }  // namespace voe
 }  // namespace webrtc
diff --git a/webrtc/voice_engine/utility_unittest.cc b/webrtc/voice_engine/utility_unittest.cc
index a5dd70b..5f02f51 100644
--- a/webrtc/voice_engine/utility_unittest.cc
+++ b/webrtc/voice_engine/utility_unittest.cc
@@ -11,6 +11,7 @@
 #include <math.h>
 
 #include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/format_macros.h"
 #include "webrtc/common_audio/resampler/include/push_resampler.h"
 #include "webrtc/modules/interface/module_common_types.h"
 #include "webrtc/voice_engine/utility.h"
@@ -53,7 +54,7 @@
   frame->num_channels_ = 1;
   frame->sample_rate_hz_ = sample_rate_hz;
   frame->samples_per_channel_ = sample_rate_hz / 100;
-  for (int i = 0; i < frame->samples_per_channel_; i++) {
+  for (size_t i = 0; i < frame->samples_per_channel_; i++) {
     frame->data_[i] = static_cast<int16_t>(data * i);
   }
 }
@@ -71,7 +72,7 @@
   frame->num_channels_ = 2;
   frame->sample_rate_hz_ = sample_rate_hz;
   frame->samples_per_channel_ = sample_rate_hz / 100;
-  for (int i = 0; i < frame->samples_per_channel_; i++) {
+  for (size_t i = 0; i < frame->samples_per_channel_; i++) {
     frame->data_[i * 2] = static_cast<int16_t>(left * i);
     frame->data_[i * 2 + 1] = static_cast<int16_t>(right * i);
   }
@@ -92,14 +93,14 @@
 // |test_frame|. It allows for up to a |max_delay| in samples between the
 // signals to compensate for the resampling delay.
 float ComputeSNR(const AudioFrame& ref_frame, const AudioFrame& test_frame,
-                 int max_delay) {
+                 size_t max_delay) {
   VerifyParams(ref_frame, test_frame);
   float best_snr = 0;
-  int best_delay = 0;
-  for (int delay = 0; delay <= max_delay; delay++) {
+  size_t best_delay = 0;
+  for (size_t delay = 0; delay <= max_delay; delay++) {
     float mse = 0;
     float variance = 0;
-    for (int i = 0; i < ref_frame.samples_per_channel_ *
+    for (size_t i = 0; i < ref_frame.samples_per_channel_ *
         ref_frame.num_channels_ - delay; i++) {
       int error = ref_frame.data_[i] - test_frame.data_[i + delay];
       mse += error * error;
@@ -113,15 +114,15 @@
       best_delay = delay;
     }
   }
-  printf("SNR=%.1f dB at delay=%d\n", best_snr, best_delay);
+  printf("SNR=%.1f dB at delay=%" PRIuS "\n", best_snr, best_delay);
   return best_snr;
 }
 
 void VerifyFramesAreEqual(const AudioFrame& ref_frame,
                           const AudioFrame& test_frame) {
   VerifyParams(ref_frame, test_frame);
-  for (int i = 0; i < ref_frame.samples_per_channel_ * ref_frame.num_channels_;
-      i++) {
+  for (size_t i = 0;
+       i < ref_frame.samples_per_channel_ * ref_frame.num_channels_; i++) {
     EXPECT_EQ(ref_frame.data_[i], test_frame.data_[i]);
   }
 }
@@ -161,9 +162,10 @@
   // The sinc resampler has a known delay, which we compute here. Multiplying by
   // two gives us a crude maximum for any resampling, as the old resampler
   // typically (but not always) has lower delay.
-  static const int kInputKernelDelaySamples = 16;
-  const int max_delay = static_cast<double>(dst_sample_rate_hz)
-      / src_sample_rate_hz * kInputKernelDelaySamples * dst_channels * 2;
+  static const size_t kInputKernelDelaySamples = 16;
+  const size_t max_delay = static_cast<size_t>(
+      static_cast<double>(dst_sample_rate_hz) / src_sample_rate_hz *
+      kInputKernelDelaySamples * dst_channels * 2);
   printf("(%d, %d Hz) -> (%d, %d Hz) ",  // SNR reported on the same line later.
       src_channels, src_sample_rate_hz, dst_channels, dst_sample_rate_hz);
   if (function == TestRemixAndResample) {
diff --git a/webrtc/voice_engine/voe_base_impl.cc b/webrtc/voice_engine/voe_base_impl.cc
index 7bfaddc..1dc89e7 100644
--- a/webrtc/voice_engine/voe_base_impl.cc
+++ b/webrtc/voice_engine/voe_base_impl.cc
@@ -10,6 +10,7 @@
 
 #include "webrtc/voice_engine/voe_base_impl.h"
 
+#include "webrtc/base/format_macros.h"
 #include "webrtc/common.h"
 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
 #include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
@@ -79,7 +80,7 @@
 }
 
 int32_t VoEBaseImpl::RecordedDataIsAvailable(
-    const void* audioSamples, uint32_t nSamples, uint8_t nBytesPerSample,
+    const void* audioSamples, size_t nSamples, size_t nBytesPerSample,
     uint8_t nChannels, uint32_t samplesPerSec, uint32_t totalDelayMS,
     int32_t clockDrift, uint32_t micLevel, bool keyPressed,
     uint32_t& newMicLevel) {
@@ -89,14 +90,14 @@
   return 0;
 }
 
-int32_t VoEBaseImpl::NeedMorePlayData(uint32_t nSamples,
-                                      uint8_t nBytesPerSample,
+int32_t VoEBaseImpl::NeedMorePlayData(size_t nSamples,
+                                      size_t nBytesPerSample,
                                       uint8_t nChannels, uint32_t samplesPerSec,
-                                      void* audioSamples, uint32_t& nSamplesOut,
+                                      void* audioSamples, size_t& nSamplesOut,
                                       int64_t* elapsed_time_ms,
                                       int64_t* ntp_time_ms) {
   GetPlayoutData(static_cast<int>(samplesPerSec), static_cast<int>(nChannels),
-                 static_cast<int>(nSamples), true, audioSamples,
+                 nSamples, true, audioSamples,
                  elapsed_time_ms, ntp_time_ms);
   nSamplesOut = audioFrame_.samples_per_channel_;
   return 0;
@@ -105,7 +106,8 @@
 int VoEBaseImpl::OnDataAvailable(const int voe_channels[],
                                  int number_of_voe_channels,
                                  const int16_t* audio_data, int sample_rate,
-                                 int number_of_channels, int number_of_frames,
+                                 int number_of_channels,
+                                 size_t number_of_frames,
                                  int audio_delay_milliseconds, int volume,
                                  bool key_pressed, bool need_audio_processing) {
   if (number_of_voe_channels == 0) return 0;
@@ -133,7 +135,7 @@
 
 void VoEBaseImpl::OnData(int voe_channel, const void* audio_data,
                          int bits_per_sample, int sample_rate,
-                         int number_of_channels, int number_of_frames) {
+                         int number_of_channels, size_t number_of_frames) {
   PushCaptureData(voe_channel, audio_data, bits_per_sample, sample_rate,
                   number_of_channels, number_of_frames);
 }
@@ -141,7 +143,7 @@
 void VoEBaseImpl::PushCaptureData(int voe_channel, const void* audio_data,
                                   int bits_per_sample, int sample_rate,
                                   int number_of_channels,
-                                  int number_of_frames) {
+                                  size_t number_of_frames) {
   voe::ChannelOwner ch = shared_->channel_manager().GetChannel(voe_channel);
   voe::Channel* channel_ptr = ch.channel();
   if (!channel_ptr) return;
@@ -154,12 +156,14 @@
   }
 }
 
-void VoEBaseImpl::PullRenderData(int bits_per_sample, int sample_rate,
-                                 int number_of_channels, int number_of_frames,
+void VoEBaseImpl::PullRenderData(int bits_per_sample,
+                                 int sample_rate,
+                                 int number_of_channels,
+                                 size_t number_of_frames,
                                  void* audio_data, int64_t* elapsed_time_ms,
                                  int64_t* ntp_time_ms) {
   assert(bits_per_sample == 16);
-  assert(number_of_frames == static_cast<int>(sample_rate / 100));
+  assert(number_of_frames == static_cast<size_t>(sample_rate / 100));
 
   GetPlayoutData(sample_rate, number_of_channels, number_of_frames, false,
                  audio_data, elapsed_time_ms, ntp_time_ms);
@@ -748,7 +752,7 @@
 int VoEBaseImpl::ProcessRecordedDataWithAPM(
     const int voe_channels[], int number_of_voe_channels,
     const void* audio_data, uint32_t sample_rate, uint8_t number_of_channels,
-    uint32_t number_of_frames, uint32_t audio_delay_milliseconds,
+    size_t number_of_frames, uint32_t audio_delay_milliseconds,
     int32_t clock_drift, uint32_t volume, bool key_pressed) {
   assert(shared_->transmit_mixer() != nullptr);
   assert(shared_->audio_device() != nullptr);
@@ -813,7 +817,7 @@
 }
 
 void VoEBaseImpl::GetPlayoutData(int sample_rate, int number_of_channels,
-                                 int number_of_frames, bool feed_data_to_apm,
+                                 size_t number_of_frames, bool feed_data_to_apm,
                                  void* audio_data, int64_t* elapsed_time_ms,
                                  int64_t* ntp_time_ms) {
   assert(shared_->output_mixer() != nullptr);
diff --git a/webrtc/voice_engine/voe_base_impl.h b/webrtc/voice_engine/voe_base_impl.h
index dfb2e04..f0ac959 100644
--- a/webrtc/voice_engine/voe_base_impl.h
+++ b/webrtc/voice_engine/voe_base_impl.h
@@ -54,30 +54,31 @@
   int AssociateSendChannel(int channel, int accociate_send_channel) override;
 
   // AudioTransport
-  int32_t RecordedDataIsAvailable(const void* audioSamples, uint32_t nSamples,
-                                  uint8_t nBytesPerSample, uint8_t nChannels,
+  int32_t RecordedDataIsAvailable(const void* audioSamples, size_t nSamples,
+                                  size_t nBytesPerSample, uint8_t nChannels,
                                   uint32_t samplesPerSec, uint32_t totalDelayMS,
                                   int32_t clockDrift, uint32_t micLevel,
                                   bool keyPressed,
                                   uint32_t& newMicLevel) override;
-  int32_t NeedMorePlayData(uint32_t nSamples, uint8_t nBytesPerSample,
+  int32_t NeedMorePlayData(size_t nSamples, size_t nBytesPerSample,
                            uint8_t nChannels, uint32_t samplesPerSec,
-                           void* audioSamples, uint32_t& nSamplesOut,
+                           void* audioSamples, size_t& nSamplesOut,
                            int64_t* elapsed_time_ms,
                            int64_t* ntp_time_ms) override;
   int OnDataAvailable(const int voe_channels[], int number_of_voe_channels,
                       const int16_t* audio_data, int sample_rate,
-                      int number_of_channels, int number_of_frames,
+                      int number_of_channels, size_t number_of_frames,
                       int audio_delay_milliseconds, int volume,
                       bool key_pressed, bool need_audio_processing) override;
   void OnData(int voe_channel, const void* audio_data, int bits_per_sample,
               int sample_rate, int number_of_channels,
-              int number_of_frames) override;
+              size_t number_of_frames) override;
   void PushCaptureData(int voe_channel, const void* audio_data,
                        int bits_per_sample, int sample_rate,
-                       int number_of_channels, int number_of_frames) override;
+                       int number_of_channels,
+                       size_t number_of_frames) override;
   void PullRenderData(int bits_per_sample, int sample_rate,
-                      int number_of_channels, int number_of_frames,
+                      int number_of_channels, size_t number_of_frames,
                       void* audio_data, int64_t* elapsed_time_ms,
                       int64_t* ntp_time_ms) override;
 
@@ -105,11 +106,11 @@
   int ProcessRecordedDataWithAPM(
       const int voe_channels[], int number_of_voe_channels,
       const void* audio_data, uint32_t sample_rate, uint8_t number_of_channels,
-      uint32_t number_of_frames, uint32_t audio_delay_milliseconds,
+      size_t number_of_frames, uint32_t audio_delay_milliseconds,
       int32_t clock_drift, uint32_t volume, bool key_pressed);
 
   void GetPlayoutData(int sample_rate, int number_of_channels,
-                      int number_of_frames, bool feed_data_to_apm,
+                      size_t number_of_frames, bool feed_data_to_apm,
                       void* audio_data, int64_t* elapsed_time_ms,
                       int64_t* ntp_time_ms);
 
diff --git a/webrtc/voice_engine/voice_engine_defines.h b/webrtc/voice_engine/voice_engine_defines.h
index 6fea6c7..f78fb2c 100644
--- a/webrtc/voice_engine/voice_engine_defines.h
+++ b/webrtc/voice_engine/voice_engine_defines.h
@@ -28,7 +28,7 @@
 
 // Internal buffer size required for mono audio, based on the highest sample
 // rate voice engine supports (10 ms of audio at 192 kHz).
-static const int kMaxMonoDataSizeSamples = 1920;
+static const size_t kMaxMonoDataSizeSamples = 1920;
 
 // VolumeControl
 enum { kMinVolumeLevel = 0 };